nbd: Add locking for tasks

The timeout handling introduced in
	7e2893a16d (nbd: Fix timeout detection)
introduces a race condition which may lead to killing of tasks that are
not in nbd context anymore. This was not observed or reproducable yet.

This patch adds locking to critical use of task_recv and task_send to
avoid killing tasks that already left the NBD thread functions. This
lock is only acquired if a timeout occures or the nbd device
starts/stops.

Reported-by: Ben Hutchings <ben@decadent.org.uk>
Signed-off-by: Markus Pargmann <mpa@pengutronix.de>
Reviewed-by: Ben Hutchings <ben@decadent.org.uk>
Fixes: 7e2893a16d ("nbd: Fix timeout detection")
Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
Markus Pargmann 2015-10-06 20:03:54 +02:00 committed by Jens Axboe
parent c3984cc994
commit dcc909d90c

View File

@ -60,6 +60,7 @@ struct nbd_device {
bool disconnect; /* a disconnect has been requested by user */ bool disconnect; /* a disconnect has been requested by user */
struct timer_list timeout_timer; struct timer_list timeout_timer;
spinlock_t tasks_lock;
struct task_struct *task_recv; struct task_struct *task_recv;
struct task_struct *task_send; struct task_struct *task_send;
@ -140,21 +141,23 @@ static void sock_shutdown(struct nbd_device *nbd)
static void nbd_xmit_timeout(unsigned long arg) static void nbd_xmit_timeout(unsigned long arg)
{ {
struct nbd_device *nbd = (struct nbd_device *)arg; struct nbd_device *nbd = (struct nbd_device *)arg;
struct task_struct *task; unsigned long flags;
if (list_empty(&nbd->queue_head)) if (list_empty(&nbd->queue_head))
return; return;
nbd->disconnect = true; nbd->disconnect = true;
task = READ_ONCE(nbd->task_recv); spin_lock_irqsave(&nbd->tasks_lock, flags);
if (task)
force_sig(SIGKILL, task);
task = READ_ONCE(nbd->task_send); if (nbd->task_recv)
if (task) force_sig(SIGKILL, nbd->task_recv);
if (nbd->task_send)
force_sig(SIGKILL, nbd->task_send); force_sig(SIGKILL, nbd->task_send);
spin_unlock_irqrestore(&nbd->tasks_lock, flags);
dev_err(nbd_to_dev(nbd), "Connection timed out, killed receiver and sender, shutting down connection\n"); dev_err(nbd_to_dev(nbd), "Connection timed out, killed receiver and sender, shutting down connection\n");
} }
@ -403,17 +406,24 @@ static int nbd_thread_recv(struct nbd_device *nbd)
{ {
struct request *req; struct request *req;
int ret; int ret;
unsigned long flags;
BUG_ON(nbd->magic != NBD_MAGIC); BUG_ON(nbd->magic != NBD_MAGIC);
sk_set_memalloc(nbd->sock->sk); sk_set_memalloc(nbd->sock->sk);
spin_lock_irqsave(&nbd->tasks_lock, flags);
nbd->task_recv = current; nbd->task_recv = current;
spin_unlock_irqrestore(&nbd->tasks_lock, flags);
ret = device_create_file(disk_to_dev(nbd->disk), &pid_attr); ret = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
if (ret) { if (ret) {
dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n"); dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
spin_lock_irqsave(&nbd->tasks_lock, flags);
nbd->task_recv = NULL; nbd->task_recv = NULL;
spin_unlock_irqrestore(&nbd->tasks_lock, flags);
return ret; return ret;
} }
@ -429,7 +439,9 @@ static int nbd_thread_recv(struct nbd_device *nbd)
device_remove_file(disk_to_dev(nbd->disk), &pid_attr); device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
spin_lock_irqsave(&nbd->tasks_lock, flags);
nbd->task_recv = NULL; nbd->task_recv = NULL;
spin_unlock_irqrestore(&nbd->tasks_lock, flags);
if (signal_pending(current)) { if (signal_pending(current)) {
siginfo_t info; siginfo_t info;
@ -534,8 +546,11 @@ static int nbd_thread_send(void *data)
{ {
struct nbd_device *nbd = data; struct nbd_device *nbd = data;
struct request *req; struct request *req;
unsigned long flags;
spin_lock_irqsave(&nbd->tasks_lock, flags);
nbd->task_send = current; nbd->task_send = current;
spin_unlock_irqrestore(&nbd->tasks_lock, flags);
set_user_nice(current, MIN_NICE); set_user_nice(current, MIN_NICE);
while (!kthread_should_stop() || !list_empty(&nbd->waiting_queue)) { while (!kthread_should_stop() || !list_empty(&nbd->waiting_queue)) {
@ -572,7 +587,15 @@ static int nbd_thread_send(void *data)
nbd_handle_req(nbd, req); nbd_handle_req(nbd, req);
} }
spin_lock_irqsave(&nbd->tasks_lock, flags);
nbd->task_send = NULL; nbd->task_send = NULL;
spin_unlock_irqrestore(&nbd->tasks_lock, flags);
/* Clear maybe pending signals */
if (signal_pending(current)) {
siginfo_t info;
dequeue_signal_lock(current, &current->blocked, &info);
}
return 0; return 0;
} }
@ -1052,6 +1075,7 @@ static int __init nbd_init(void)
nbd_dev[i].magic = NBD_MAGIC; nbd_dev[i].magic = NBD_MAGIC;
INIT_LIST_HEAD(&nbd_dev[i].waiting_queue); INIT_LIST_HEAD(&nbd_dev[i].waiting_queue);
spin_lock_init(&nbd_dev[i].queue_lock); spin_lock_init(&nbd_dev[i].queue_lock);
spin_lock_init(&nbd_dev[i].tasks_lock);
INIT_LIST_HEAD(&nbd_dev[i].queue_head); INIT_LIST_HEAD(&nbd_dev[i].queue_head);
mutex_init(&nbd_dev[i].tx_lock); mutex_init(&nbd_dev[i].tx_lock);
init_timer(&nbd_dev[i].timeout_timer); init_timer(&nbd_dev[i].timeout_timer);