sched: Add cond_resched_rwlock
Safely rescheduling while holding a spin lock is essential for keeping long running kernel operations running smoothly. Add the facility to cond_resched rwlocks. CC: Ingo Molnar <mingo@redhat.com> CC: Will Deacon <will@kernel.org> Acked-by: Peter Zijlstra <peterz@infradead.org> Acked-by: Davidlohr Bueso <dbueso@suse.de> Acked-by: Waiman Long <longman@redhat.com> Acked-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Ben Gardon <bgardon@google.com> Message-Id: <20210202185734.1680553-9-bgardon@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
a09a689a53
commit
f3d4b4b1dc
@ -1883,12 +1883,24 @@ static inline int _cond_resched(void) { return 0; }
|
||||
})
|
||||
|
||||
extern int __cond_resched_lock(spinlock_t *lock);
|
||||
extern int __cond_resched_rwlock_read(rwlock_t *lock);
|
||||
extern int __cond_resched_rwlock_write(rwlock_t *lock);
|
||||
|
||||
#define cond_resched_lock(lock) ({ \
|
||||
___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
|
||||
__cond_resched_lock(lock); \
|
||||
})
|
||||
|
||||
#define cond_resched_rwlock_read(lock) ({ \
|
||||
__might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \
|
||||
__cond_resched_rwlock_read(lock); \
|
||||
})
|
||||
|
||||
#define cond_resched_rwlock_write(lock) ({ \
|
||||
__might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \
|
||||
__cond_resched_rwlock_write(lock); \
|
||||
})
|
||||
|
||||
static inline void cond_resched_rcu(void)
|
||||
{
|
||||
#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
|
||||
|
@ -6695,6 +6695,46 @@ int __cond_resched_lock(spinlock_t *lock)
|
||||
}
|
||||
EXPORT_SYMBOL(__cond_resched_lock);
|
||||
|
||||
int __cond_resched_rwlock_read(rwlock_t *lock)
|
||||
{
|
||||
int resched = should_resched(PREEMPT_LOCK_OFFSET);
|
||||
int ret = 0;
|
||||
|
||||
lockdep_assert_held_read(lock);
|
||||
|
||||
if (rwlock_needbreak(lock) || resched) {
|
||||
read_unlock(lock);
|
||||
if (resched)
|
||||
preempt_schedule_common();
|
||||
else
|
||||
cpu_relax();
|
||||
ret = 1;
|
||||
read_lock(lock);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(__cond_resched_rwlock_read);
|
||||
|
||||
int __cond_resched_rwlock_write(rwlock_t *lock)
|
||||
{
|
||||
int resched = should_resched(PREEMPT_LOCK_OFFSET);
|
||||
int ret = 0;
|
||||
|
||||
lockdep_assert_held_write(lock);
|
||||
|
||||
if (rwlock_needbreak(lock) || resched) {
|
||||
write_unlock(lock);
|
||||
if (resched)
|
||||
preempt_schedule_common();
|
||||
else
|
||||
cpu_relax();
|
||||
ret = 1;
|
||||
write_lock(lock);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(__cond_resched_rwlock_write);
|
||||
|
||||
/**
|
||||
* yield - yield the current processor to other threads.
|
||||
*
|
||||
|
Loading…
Reference in New Issue
Block a user