diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index b31f6cbe3a30..3d0b29cb5e63 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -1106,7 +1106,7 @@ static void __sched remove_waiter(struct rt_mutex_base *lock, } /** - * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop + * rt_mutex_slowlock_block() - Perform the wait-wake-try-to-take loop * @lock: the rt_mutex to take * @state: the state the task should block in (TASK_INTERRUPTIBLE * or TASK_UNINTERRUPTIBLE) @@ -1115,10 +1115,10 @@ static void __sched remove_waiter(struct rt_mutex_base *lock, * * Must be called with lock->wait_lock held and interrupts disabled */ -static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock, - unsigned int state, - struct hrtimer_sleeper *timeout, - struct rt_mutex_waiter *waiter) +static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock, + unsigned int state, + struct hrtimer_sleeper *timeout, + struct rt_mutex_waiter *waiter) { int ret = 0; @@ -1168,20 +1168,72 @@ static void __sched rt_mutex_handle_deadlock(int res, int detect_deadlock, } } -/* - * Slow path lock function: +/** + * __rt_mutex_slowlock - Locking slowpath invoked with lock::wait_lock held + * @lock: The rtmutex to block lock + * @state: The task state for sleeping + * @chwalk: Indicator whether full or partial chainwalk is requested + * @waiter: Initializer waiter for blocking */ -static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock, - unsigned int state, - struct hrtimer_sleeper *timeout, - enum rtmutex_chainwalk chwalk) +static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock, + unsigned int state, + enum rtmutex_chainwalk chwalk, + struct rt_mutex_waiter *waiter) +{ + int ret; + + lockdep_assert_held(&lock->wait_lock); + + /* Try to acquire the lock again: */ + if (try_to_take_rt_mutex(lock, current, NULL)) + return 0; + + set_current_state(state); + + ret = task_blocks_on_rt_mutex(lock, waiter, current, chwalk); + + if (likely(!ret)) + ret = rt_mutex_slowlock_block(lock, state, NULL, waiter); + + if (unlikely(ret)) { + __set_current_state(TASK_RUNNING); + remove_waiter(lock, waiter); + rt_mutex_handle_deadlock(ret, chwalk, waiter); + } + + /* + * try_to_take_rt_mutex() sets the waiter bit + * unconditionally. We might have to fix that up. + */ + fixup_rt_mutex_waiters(lock); + return ret; +} + +static inline int __rt_mutex_slowlock_locked(struct rt_mutex_base *lock, + unsigned int state) { struct rt_mutex_waiter waiter; - unsigned long flags; - int ret = 0; + int ret; rt_mutex_init_waiter(&waiter); + ret = __rt_mutex_slowlock(lock, state, RT_MUTEX_MIN_CHAINWALK, &waiter); + + debug_rt_mutex_free_waiter(&waiter); + return ret; +} + +/* + * rt_mutex_slowlock - Locking slowpath invoked when fast path fails + * @lock: The rtmutex to block lock + * @state: The task state for sleeping + */ +static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock, + unsigned int state) +{ + unsigned long flags; + int ret; + /* * Technically we could use raw_spin_[un]lock_irq() here, but this can * be called in early boot if the cmpxchg() fast path is disabled @@ -1191,45 +1243,9 @@ static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock, * irqsave/restore variants. */ raw_spin_lock_irqsave(&lock->wait_lock, flags); - - /* Try to acquire the lock again: */ - if (try_to_take_rt_mutex(lock, current, NULL)) { - raw_spin_unlock_irqrestore(&lock->wait_lock, flags); - return 0; - } - - set_current_state(state); - - /* Setup the timer, when timeout != NULL */ - if (unlikely(timeout)) - hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS); - - ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk); - - if (likely(!ret)) - /* sleep on the mutex */ - ret = __rt_mutex_slowlock(lock, state, timeout, &waiter); - - if (unlikely(ret)) { - __set_current_state(TASK_RUNNING); - remove_waiter(lock, &waiter); - rt_mutex_handle_deadlock(ret, chwalk, &waiter); - } - - /* - * try_to_take_rt_mutex() sets the waiter bit - * unconditionally. We might have to fix that up. - */ - fixup_rt_mutex_waiters(lock); - + ret = __rt_mutex_slowlock_locked(lock, state); raw_spin_unlock_irqrestore(&lock->wait_lock, flags); - /* Remove pending timer: */ - if (unlikely(timeout)) - hrtimer_cancel(&timeout->timer); - - debug_rt_mutex_free_waiter(&waiter); - return ret; } @@ -1239,7 +1255,7 @@ static __always_inline int __rt_mutex_lock(struct rt_mutex_base *lock, if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) return 0; - return rt_mutex_slowlock(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK); + return rt_mutex_slowlock(lock, state); } static int __sched __rt_mutex_slowtrylock(struct rt_mutex_base *lock) diff --git a/kernel/locking/rtmutex_api.c b/kernel/locking/rtmutex_api.c index 38de4b137b9e..c5136f4998bb 100644 --- a/kernel/locking/rtmutex_api.c +++ b/kernel/locking/rtmutex_api.c @@ -342,7 +342,7 @@ int __sched rt_mutex_wait_proxy_lock(struct rt_mutex_base *lock, raw_spin_lock_irq(&lock->wait_lock); /* sleep on the mutex */ set_current_state(TASK_INTERRUPTIBLE); - ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter); + ret = rt_mutex_slowlock_block(lock, TASK_INTERRUPTIBLE, to, waiter); /* * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might * have to fix that up.