mirror of
https://github.com/torvalds/linux.git
synced 2024-11-05 11:32:04 +00:00
b67bfe0d42
I'm not sure why, but the hlist for each entry iterators were conceived list_for_each_entry(pos, head, member) The hlist ones were greedy and wanted an extra parameter: hlist_for_each_entry(tpos, pos, head, member) Why did they need an extra pos parameter? I'm not quite sure. Not only they don't really need it, it also prevents the iterator from looking exactly like the list iterator, which is unfortunate. Besides the semantic patch, there was some manual work required: - Fix up the actual hlist iterators in linux/list.h - Fix up the declaration of other iterators based on the hlist ones. - A very small amount of places were using the 'node' parameter, this was modified to use 'obj->member' instead. - Coccinelle didn't handle the hlist_for_each_entry_safe iterator properly, so those had to be fixed up manually. The semantic patch which is mostly the work of Peter Senna Tschudin is here: @@ iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host; type T; expression a,c,d,e; identifier b; statement S; @@ -T b; <+... when != b ( hlist_for_each_entry(a, - b, c, d) S | hlist_for_each_entry_continue(a, - b, c) S | hlist_for_each_entry_from(a, - b, c) S | hlist_for_each_entry_rcu(a, - b, c, d) S | hlist_for_each_entry_rcu_bh(a, - b, c, d) S | hlist_for_each_entry_continue_rcu_bh(a, - b, c) S | for_each_busy_worker(a, c, - b, d) S | ax25_uid_for_each(a, - b, c) S | ax25_for_each(a, - b, c) S | inet_bind_bucket_for_each(a, - b, c) S | sctp_for_each_hentry(a, - b, c) S | sk_for_each(a, - b, c) S | sk_for_each_rcu(a, - b, c) S | sk_for_each_from -(a, b) +(a) S + sk_for_each_from(a) S | sk_for_each_safe(a, - b, c, d) S | sk_for_each_bound(a, - b, c) S | hlist_for_each_entry_safe(a, - b, c, d, e) S | hlist_for_each_entry_continue_rcu(a, - b, c) S | nr_neigh_for_each(a, - b, c) S | nr_neigh_for_each_safe(a, - b, c, d) S | nr_node_for_each(a, - b, c) S | nr_node_for_each_safe(a, - b, c, d) S | - for_each_gfn_sp(a, c, d, b) S + for_each_gfn_sp(a, c, d) S | - for_each_gfn_indirect_valid_sp(a, c, d, b) S + for_each_gfn_indirect_valid_sp(a, c, d) S | for_each_host(a, - b, c) S | for_each_host_safe(a, - b, c, d) S | for_each_mesh_entry(a, - b, c, d) S ) ...+> [akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c] [akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c] [akpm@linux-foundation.org: checkpatch fixes] [akpm@linux-foundation.org: fix warnings] [akpm@linux-foudnation.org: redo intrusive kvm changes] Tested-by: Peter Senna Tschudin <peter.senna@gmail.com> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Gleb Natapov <gleb@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
45 lines
1.3 KiB
C
45 lines
1.3 KiB
C
|
|
#include <linux/user-return-notifier.h>
|
|
#include <linux/percpu.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/export.h>
|
|
|
|
static DEFINE_PER_CPU(struct hlist_head, return_notifier_list);
|
|
|
|
/*
|
|
* Request a notification when the current cpu returns to userspace. Must be
|
|
* called in atomic context. The notifier will also be called in atomic
|
|
* context.
|
|
*/
|
|
void user_return_notifier_register(struct user_return_notifier *urn)
|
|
{
|
|
set_tsk_thread_flag(current, TIF_USER_RETURN_NOTIFY);
|
|
hlist_add_head(&urn->link, &__get_cpu_var(return_notifier_list));
|
|
}
|
|
EXPORT_SYMBOL_GPL(user_return_notifier_register);
|
|
|
|
/*
|
|
* Removes a registered user return notifier. Must be called from atomic
|
|
* context, and from the same cpu registration occurred in.
|
|
*/
|
|
void user_return_notifier_unregister(struct user_return_notifier *urn)
|
|
{
|
|
hlist_del(&urn->link);
|
|
if (hlist_empty(&__get_cpu_var(return_notifier_list)))
|
|
clear_tsk_thread_flag(current, TIF_USER_RETURN_NOTIFY);
|
|
}
|
|
EXPORT_SYMBOL_GPL(user_return_notifier_unregister);
|
|
|
|
/* Calls registered user return notifiers */
|
|
void fire_user_return_notifiers(void)
|
|
{
|
|
struct user_return_notifier *urn;
|
|
struct hlist_node *tmp2;
|
|
struct hlist_head *head;
|
|
|
|
head = &get_cpu_var(return_notifier_list);
|
|
hlist_for_each_entry_safe(urn, tmp2, head, link)
|
|
urn->on_user_return(urn);
|
|
put_cpu_var(return_notifier_list);
|
|
}
|