forked from Minki/linux
locking/mutex, sched/wait: Add mutex_lock_io()
We sometimes end up propagating IO blocking through mutexes; however, because there currently is no way of annotating mutex sleeps as iowait, there are cases where iowait and /proc/stat:procs_blocked report misleading numbers obscuring the actual state of the system. This patch adds mutex_lock_io() so that mutex sleeps can be marked as iowait in those cases. Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Jens Axboe <axboe@kernel.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: adilger.kernel@dilger.ca Cc: jack@suse.com Cc: kernel-team@fb.com Cc: mingbo@fb.com Cc: tytso@mit.edu Link: http://lkml.kernel.org/r/1477673892-28940-4-git-send-email-tj@kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
10ab56434f
commit
1460cb65a1
@ -156,10 +156,12 @@ extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock,
|
||||
unsigned int subclass);
|
||||
extern int __must_check mutex_lock_killable_nested(struct mutex *lock,
|
||||
unsigned int subclass);
|
||||
extern void mutex_lock_io_nested(struct mutex *lock, unsigned int subclass);
|
||||
|
||||
#define mutex_lock(lock) mutex_lock_nested(lock, 0)
|
||||
#define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0)
|
||||
#define mutex_lock_killable(lock) mutex_lock_killable_nested(lock, 0)
|
||||
#define mutex_lock_io(lock) mutex_lock_io_nested(lock, 0)
|
||||
|
||||
#define mutex_lock_nest_lock(lock, nest_lock) \
|
||||
do { \
|
||||
@ -171,11 +173,13 @@ do { \
|
||||
extern void mutex_lock(struct mutex *lock);
|
||||
extern int __must_check mutex_lock_interruptible(struct mutex *lock);
|
||||
extern int __must_check mutex_lock_killable(struct mutex *lock);
|
||||
extern void mutex_lock_io(struct mutex *lock);
|
||||
|
||||
# define mutex_lock_nested(lock, subclass) mutex_lock(lock)
|
||||
# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
|
||||
# define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock)
|
||||
# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
|
||||
# define mutex_lock_nest_io(lock, nest_lock) mutex_io(lock)
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -783,6 +783,20 @@ mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
|
||||
|
||||
void __sched
|
||||
mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
|
||||
{
|
||||
int token;
|
||||
|
||||
might_sleep();
|
||||
|
||||
token = io_schedule_prepare();
|
||||
__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
|
||||
subclass, NULL, _RET_IP_, NULL, 0);
|
||||
io_schedule_finish(token);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
|
||||
|
||||
static inline int
|
||||
ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
|
||||
{
|
||||
@ -950,6 +964,16 @@ int __sched mutex_lock_killable(struct mutex *lock)
|
||||
}
|
||||
EXPORT_SYMBOL(mutex_lock_killable);
|
||||
|
||||
void __sched mutex_lock_io(struct mutex *lock)
|
||||
{
|
||||
int token;
|
||||
|
||||
token = io_schedule_prepare();
|
||||
mutex_lock(lock);
|
||||
io_schedule_finish(token);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mutex_lock_io);
|
||||
|
||||
static noinline void __sched
|
||||
__mutex_lock_slowpath(struct mutex *lock)
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user