mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
xen: privcmd: Switch from mutex to spinlock for irqfds
irqfd_wakeup() gets EPOLLHUP, when it is called by eventfd_release() by way of wake_up_poll(&ctx->wqh, EPOLLHUP), which gets called under spin_lock_irqsave(). We can't use a mutex here as it will lead to a deadlock. Fix it by switching over to a spin lock. Reported-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org> Reviewed-by: Juergen Gross <jgross@suse.com> Link: https://lore.kernel.org/r/a66d7a7a9001424d432f52a9fc3931a1f345464f.1718703669.git.viresh.kumar@linaro.org Signed-off-by: Juergen Gross <jgross@suse.com>
This commit is contained in:
parent
7cd23c1817
commit
1c68259309
@ -846,7 +846,7 @@ out:
|
||||
#ifdef CONFIG_XEN_PRIVCMD_EVENTFD
|
||||
/* Irqfd support */
|
||||
static struct workqueue_struct *irqfd_cleanup_wq;
|
||||
static DEFINE_MUTEX(irqfds_lock);
|
||||
static DEFINE_SPINLOCK(irqfds_lock);
|
||||
static LIST_HEAD(irqfds_list);
|
||||
|
||||
struct privcmd_kernel_irqfd {
|
||||
@ -910,9 +910,11 @@ irqfd_wakeup(wait_queue_entry_t *wait, unsigned int mode, int sync, void *key)
|
||||
irqfd_inject(kirqfd);
|
||||
|
||||
if (flags & EPOLLHUP) {
|
||||
mutex_lock(&irqfds_lock);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&irqfds_lock, flags);
|
||||
irqfd_deactivate(kirqfd);
|
||||
mutex_unlock(&irqfds_lock);
|
||||
spin_unlock_irqrestore(&irqfds_lock, flags);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -930,6 +932,7 @@ irqfd_poll_func(struct file *file, wait_queue_head_t *wqh, poll_table *pt)
|
||||
static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd)
|
||||
{
|
||||
struct privcmd_kernel_irqfd *kirqfd, *tmp;
|
||||
unsigned long flags;
|
||||
__poll_t events;
|
||||
struct fd f;
|
||||
void *dm_op;
|
||||
@ -969,18 +972,18 @@ static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd)
|
||||
init_waitqueue_func_entry(&kirqfd->wait, irqfd_wakeup);
|
||||
init_poll_funcptr(&kirqfd->pt, irqfd_poll_func);
|
||||
|
||||
mutex_lock(&irqfds_lock);
|
||||
spin_lock_irqsave(&irqfds_lock, flags);
|
||||
|
||||
list_for_each_entry(tmp, &irqfds_list, list) {
|
||||
if (kirqfd->eventfd == tmp->eventfd) {
|
||||
ret = -EBUSY;
|
||||
mutex_unlock(&irqfds_lock);
|
||||
spin_unlock_irqrestore(&irqfds_lock, flags);
|
||||
goto error_eventfd;
|
||||
}
|
||||
}
|
||||
|
||||
list_add_tail(&kirqfd->list, &irqfds_list);
|
||||
mutex_unlock(&irqfds_lock);
|
||||
spin_unlock_irqrestore(&irqfds_lock, flags);
|
||||
|
||||
/*
|
||||
* Check if there was an event already pending on the eventfd before we
|
||||
@ -1012,12 +1015,13 @@ static int privcmd_irqfd_deassign(struct privcmd_irqfd *irqfd)
|
||||
{
|
||||
struct privcmd_kernel_irqfd *kirqfd;
|
||||
struct eventfd_ctx *eventfd;
|
||||
unsigned long flags;
|
||||
|
||||
eventfd = eventfd_ctx_fdget(irqfd->fd);
|
||||
if (IS_ERR(eventfd))
|
||||
return PTR_ERR(eventfd);
|
||||
|
||||
mutex_lock(&irqfds_lock);
|
||||
spin_lock_irqsave(&irqfds_lock, flags);
|
||||
|
||||
list_for_each_entry(kirqfd, &irqfds_list, list) {
|
||||
if (kirqfd->eventfd == eventfd) {
|
||||
@ -1026,7 +1030,7 @@ static int privcmd_irqfd_deassign(struct privcmd_irqfd *irqfd)
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&irqfds_lock);
|
||||
spin_unlock_irqrestore(&irqfds_lock, flags);
|
||||
|
||||
eventfd_ctx_put(eventfd);
|
||||
|
||||
@ -1074,13 +1078,14 @@ static int privcmd_irqfd_init(void)
|
||||
static void privcmd_irqfd_exit(void)
|
||||
{
|
||||
struct privcmd_kernel_irqfd *kirqfd, *tmp;
|
||||
unsigned long flags;
|
||||
|
||||
mutex_lock(&irqfds_lock);
|
||||
spin_lock_irqsave(&irqfds_lock, flags);
|
||||
|
||||
list_for_each_entry_safe(kirqfd, tmp, &irqfds_list, list)
|
||||
irqfd_deactivate(kirqfd);
|
||||
|
||||
mutex_unlock(&irqfds_lock);
|
||||
spin_unlock_irqrestore(&irqfds_lock, flags);
|
||||
|
||||
destroy_workqueue(irqfd_cleanup_wq);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user