forked from Minki/linux
SUNRPC: Refactor rpc_sleep_on()
rpc_sleep_on() does not need to set the task->tk_callback under the queue lock, so move that out. Also refactor the check for whether the task is active. Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
This commit is contained in:
parent
8ba6a92d01
commit
87150aaed9
@ -362,7 +362,6 @@ static void rpc_make_runnable(struct workqueue_struct *wq,
|
||||
*/
|
||||
static void __rpc_sleep_on_priority(struct rpc_wait_queue *q,
|
||||
struct rpc_task *task,
|
||||
rpc_action action,
|
||||
unsigned char queue_priority)
|
||||
{
|
||||
dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n",
|
||||
@ -372,27 +371,39 @@ static void __rpc_sleep_on_priority(struct rpc_wait_queue *q,
|
||||
|
||||
__rpc_add_wait_queue(q, task, queue_priority);
|
||||
|
||||
WARN_ON_ONCE(task->tk_callback != NULL);
|
||||
task->tk_callback = action;
|
||||
__rpc_add_timer(q, task);
|
||||
}
|
||||
|
||||
static void rpc_set_tk_callback(struct rpc_task *task, rpc_action action)
|
||||
{
|
||||
if (action && !WARN_ON_ONCE(task->tk_callback != NULL))
|
||||
task->tk_callback = action;
|
||||
}
|
||||
|
||||
static bool rpc_sleep_check_activated(struct rpc_task *task)
|
||||
{
|
||||
/* We shouldn't ever put an inactive task to sleep */
|
||||
if (WARN_ON_ONCE(!RPC_IS_ACTIVATED(task))) {
|
||||
task->tk_status = -EIO;
|
||||
rpc_put_task_async(task);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
|
||||
rpc_action action)
|
||||
{
|
||||
/* We shouldn't ever put an inactive task to sleep */
|
||||
WARN_ON_ONCE(!RPC_IS_ACTIVATED(task));
|
||||
if (!RPC_IS_ACTIVATED(task)) {
|
||||
task->tk_status = -EIO;
|
||||
rpc_put_task_async(task);
|
||||
if (!rpc_sleep_check_activated(task))
|
||||
return;
|
||||
}
|
||||
|
||||
rpc_set_tk_callback(task, action);
|
||||
|
||||
/*
|
||||
* Protect the queue operations.
|
||||
*/
|
||||
spin_lock_bh(&q->lock);
|
||||
__rpc_sleep_on_priority(q, task, action, task->tk_priority);
|
||||
__rpc_sleep_on_priority(q, task, task->tk_priority);
|
||||
spin_unlock_bh(&q->lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rpc_sleep_on);
|
||||
@ -400,19 +411,16 @@ EXPORT_SYMBOL_GPL(rpc_sleep_on);
|
||||
void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task,
|
||||
rpc_action action, int priority)
|
||||
{
|
||||
/* We shouldn't ever put an inactive task to sleep */
|
||||
WARN_ON_ONCE(!RPC_IS_ACTIVATED(task));
|
||||
if (!RPC_IS_ACTIVATED(task)) {
|
||||
task->tk_status = -EIO;
|
||||
rpc_put_task_async(task);
|
||||
if (!rpc_sleep_check_activated(task))
|
||||
return;
|
||||
}
|
||||
|
||||
rpc_set_tk_callback(task, action);
|
||||
|
||||
/*
|
||||
* Protect the queue operations.
|
||||
*/
|
||||
spin_lock_bh(&q->lock);
|
||||
__rpc_sleep_on_priority(q, task, action, priority - RPC_PRIORITY_LOW);
|
||||
__rpc_sleep_on_priority(q, task, priority - RPC_PRIORITY_LOW);
|
||||
spin_unlock_bh(&q->lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rpc_sleep_on_priority);
|
||||
|
Loading…
Reference in New Issue
Block a user