locking: Make owner_on_cpu() into <linux/sched.h>
Move the owner_on_cpu() from kernel/locking/rwsem.c into include/linux/sched.h with under CONFIG_SMP, then use it in the mutex/rwsem/rtmutex to simplify the code. Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20211203075935.136808-2-wangkefeng.wang@huawei.com
This commit is contained in:
committed by
Peter Zijlstra
parent
9a75bd0c52
commit
c0bed69daf
@@ -367,8 +367,7 @@ bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
|
||||
/*
|
||||
* Use vcpu_is_preempted to detect lock holder preemption issue.
|
||||
*/
|
||||
if (!owner->on_cpu || need_resched() ||
|
||||
vcpu_is_preempted(task_cpu(owner))) {
|
||||
if (!owner_on_cpu(owner) || need_resched()) {
|
||||
ret = false;
|
||||
break;
|
||||
}
|
||||
@@ -403,14 +402,8 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
|
||||
* structure won't go away during the spinning period.
|
||||
*/
|
||||
owner = __mutex_owner(lock);
|
||||
|
||||
/*
|
||||
* As lock holder preemption issue, we both skip spinning if task is not
|
||||
* on cpu or its cpu is preempted
|
||||
*/
|
||||
|
||||
if (owner)
|
||||
retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
|
||||
retval = owner_on_cpu(owner);
|
||||
|
||||
/*
|
||||
* If lock->owner is not set, the mutex has been released. Return true
|
||||
|
||||
Reference in New Issue
Block a user