epoll: pull all code between fetch_events and send_event into the loop

This is a no-op change which simplifies the follow up patches.

Link: https://lkml.kernel.org/r/20201106231635.3528496-7-soheil.kdev@gmail.com
Signed-off-by: Soheil Hassas Yeganeh <soheil@google.com>
Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: Willem de Bruijn <willemb@google.com>
Reviewed-by: Khazhismel Kumykov <khazhy@google.com>
Cc: Guantao Liu <guantaol@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Soheil Hassas Yeganeh 2020-12-18 14:02:00 -08:00 committed by Linus Torvalds
parent 1493c47fb1
commit e8c85328b1

View File

@ -1774,14 +1774,14 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
}
fetch_events:
eavail = ep_events_available(ep);
if (!eavail)
eavail = ep_busy_loop(ep, timed_out);
if (eavail)
goto send_events;
do {
eavail = ep_events_available(ep);
if (!eavail)
eavail = ep_busy_loop(ep, timed_out);
if (eavail)
goto send_events;
if (signal_pending(current))
return -EINTR;
@ -1830,21 +1830,22 @@ fetch_events:
* carefully under lock, below.
*/
eavail = 1;
} while (0);
if (!list_empty_careful(&wait.entry)) {
write_lock_irq(&ep->lock);
/*
* If the thread timed out and is not on the wait queue, it
* means that the thread was woken up after its timeout expired
* before it could reacquire the lock. Thus, when wait.entry is
* empty, it needs to harvest events.
*/
if (timed_out)
eavail = list_empty(&wait.entry);
__remove_wait_queue(&ep->wq, &wait);
write_unlock_irq(&ep->lock);
}
if (!list_empty_careful(&wait.entry)) {
write_lock_irq(&ep->lock);
/*
* If the thread timed out and is not on the wait queue,
* it means that the thread was woken up after its
* timeout expired before it could reacquire the lock.
* Thus, when wait.entry is empty, it needs to harvest
* events.
*/
if (timed_out)
eavail = list_empty(&wait.entry);
__remove_wait_queue(&ep->wq, &wait);
write_unlock_irq(&ep->lock);
}
} while (0);
send_events:
/*