mirror of
https://github.com/torvalds/linux.git
synced 2024-10-30 00:32:38 +00:00
fsnotify: destroy marks with call_srcu instead of dedicated thread
At the time that this code was originally written, call_srcu didn't exist, so this thread was required to ensure that we waited for that SRCU grace period to settle before finally freeing the object. It does exist now however and we can much more efficiently use call_srcu to handle this. That also allows us to potentially use srcu_barrier to ensure that they are all of the callbacks have run before proceeding. In order to conserve space, we union the rcu_head with the g_list. This will be necessary for nfsd which will allocate marks from a dedicated slabcache. We have to be able to ensure that all of the objects are destroyed before destroying the cache. That's fairly Signed-off-by: Jeff Layton <jeff.layton@primarydata.com> Cc: Eric Paris <eparis@parisplace.org> Reviewed-by: Jan Kara <jack@suse.com> Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
1deaf9d197
commit
c510eff6be
@ -92,9 +92,6 @@
|
|||||||
#include "fsnotify.h"
|
#include "fsnotify.h"
|
||||||
|
|
||||||
struct srcu_struct fsnotify_mark_srcu;
|
struct srcu_struct fsnotify_mark_srcu;
|
||||||
static DEFINE_SPINLOCK(destroy_lock);
|
|
||||||
static LIST_HEAD(destroy_list);
|
|
||||||
static DECLARE_WAIT_QUEUE_HEAD(destroy_waitq);
|
|
||||||
|
|
||||||
void fsnotify_get_mark(struct fsnotify_mark *mark)
|
void fsnotify_get_mark(struct fsnotify_mark *mark)
|
||||||
{
|
{
|
||||||
@ -168,10 +165,19 @@ void fsnotify_detach_mark(struct fsnotify_mark *mark)
|
|||||||
atomic_dec(&group->num_marks);
|
atomic_dec(&group->num_marks);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
fsnotify_mark_free_rcu(struct rcu_head *rcu)
|
||||||
|
{
|
||||||
|
struct fsnotify_mark *mark;
|
||||||
|
|
||||||
|
mark = container_of(rcu, struct fsnotify_mark, g_rcu);
|
||||||
|
fsnotify_put_mark(mark);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Free fsnotify mark. The freeing is actually happening from a kthread which
|
* Free fsnotify mark. The freeing is actually happening from a call_srcu
|
||||||
* first waits for srcu period end. Caller must have a reference to the mark
|
* callback. Caller must have a reference to the mark or be protected by
|
||||||
* or be protected by fsnotify_mark_srcu.
|
* fsnotify_mark_srcu.
|
||||||
*/
|
*/
|
||||||
void fsnotify_free_mark(struct fsnotify_mark *mark)
|
void fsnotify_free_mark(struct fsnotify_mark *mark)
|
||||||
{
|
{
|
||||||
@ -186,10 +192,7 @@ void fsnotify_free_mark(struct fsnotify_mark *mark)
|
|||||||
mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE;
|
mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE;
|
||||||
spin_unlock(&mark->lock);
|
spin_unlock(&mark->lock);
|
||||||
|
|
||||||
spin_lock(&destroy_lock);
|
call_srcu(&fsnotify_mark_srcu, &mark->g_rcu, fsnotify_mark_free_rcu);
|
||||||
list_add(&mark->g_list, &destroy_list);
|
|
||||||
spin_unlock(&destroy_lock);
|
|
||||||
wake_up(&destroy_waitq);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Some groups like to know that marks are being freed. This is a
|
* Some groups like to know that marks are being freed. This is a
|
||||||
@ -385,11 +388,7 @@ err:
|
|||||||
|
|
||||||
spin_unlock(&mark->lock);
|
spin_unlock(&mark->lock);
|
||||||
|
|
||||||
spin_lock(&destroy_lock);
|
call_srcu(&fsnotify_mark_srcu, &mark->g_rcu, fsnotify_mark_free_rcu);
|
||||||
list_add(&mark->g_list, &destroy_list);
|
|
||||||
spin_unlock(&destroy_lock);
|
|
||||||
wake_up(&destroy_waitq);
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -492,40 +491,3 @@ void fsnotify_init_mark(struct fsnotify_mark *mark,
|
|||||||
atomic_set(&mark->refcnt, 1);
|
atomic_set(&mark->refcnt, 1);
|
||||||
mark->free_mark = free_mark;
|
mark->free_mark = free_mark;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int fsnotify_mark_destroy(void *ignored)
|
|
||||||
{
|
|
||||||
struct fsnotify_mark *mark, *next;
|
|
||||||
struct list_head private_destroy_list;
|
|
||||||
|
|
||||||
for (;;) {
|
|
||||||
spin_lock(&destroy_lock);
|
|
||||||
/* exchange the list head */
|
|
||||||
list_replace_init(&destroy_list, &private_destroy_list);
|
|
||||||
spin_unlock(&destroy_lock);
|
|
||||||
|
|
||||||
synchronize_srcu(&fsnotify_mark_srcu);
|
|
||||||
|
|
||||||
list_for_each_entry_safe(mark, next, &private_destroy_list, g_list) {
|
|
||||||
list_del_init(&mark->g_list);
|
|
||||||
fsnotify_put_mark(mark);
|
|
||||||
}
|
|
||||||
|
|
||||||
wait_event_interruptible(destroy_waitq, !list_empty(&destroy_list));
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int __init fsnotify_mark_init(void)
|
|
||||||
{
|
|
||||||
struct task_struct *thread;
|
|
||||||
|
|
||||||
thread = kthread_run(fsnotify_mark_destroy, NULL,
|
|
||||||
"fsnotify_mark");
|
|
||||||
if (IS_ERR(thread))
|
|
||||||
panic("unable to start fsnotify mark destruction thread.");
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
device_initcall(fsnotify_mark_init);
|
|
||||||
|
@ -220,7 +220,10 @@ struct fsnotify_mark {
|
|||||||
/* List of marks by group->i_fsnotify_marks. Also reused for queueing
|
/* List of marks by group->i_fsnotify_marks. Also reused for queueing
|
||||||
* mark into destroy_list when it's waiting for the end of SRCU period
|
* mark into destroy_list when it's waiting for the end of SRCU period
|
||||||
* before it can be freed. [group->mark_mutex] */
|
* before it can be freed. [group->mark_mutex] */
|
||||||
struct list_head g_list;
|
union {
|
||||||
|
struct list_head g_list;
|
||||||
|
struct rcu_head g_rcu;
|
||||||
|
};
|
||||||
/* Protects inode / mnt pointers, flags, masks */
|
/* Protects inode / mnt pointers, flags, masks */
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
/* List of marks for inode / vfsmount [obj_lock] */
|
/* List of marks for inode / vfsmount [obj_lock] */
|
||||||
|
Loading…
Reference in New Issue
Block a user