fs/epoll: loosen irq safety in epoll_insert() and epoll_remove()

Both functions are similar to the context of ep_modify(), called via
epoll_ctl(2).  Just like ep_modify(), saving and restoring interrupts is
an overkill in these calls as it will never be called with irqs disabled.
While ep_remove() can be called directly from EPOLL_CTL_DEL, it can also
be called when releasing the file, but this also complies with the above.

Link: http://lkml.kernel.org/r/20180720172956.2883-3-dave@stgolabs.net
Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
Cc: Jason Baron <jbaron@akamai.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Davidlohr Bueso 2018-08-21 21:56:41 -07:00 committed by Linus Torvalds
parent 002b343669
commit 304b18b8d6
1 changed files with 6 additions and 8 deletions

View File

@ -762,7 +762,6 @@ static void epi_rcu_free(struct rcu_head *head)
*/ */
static int ep_remove(struct eventpoll *ep, struct epitem *epi) static int ep_remove(struct eventpoll *ep, struct epitem *epi)
{ {
unsigned long flags;
struct file *file = epi->ffd.file; struct file *file = epi->ffd.file;
/* /*
@ -777,10 +776,10 @@ static int ep_remove(struct eventpoll *ep, struct epitem *epi)
rb_erase_cached(&epi->rbn, &ep->rbr); rb_erase_cached(&epi->rbn, &ep->rbr);
spin_lock_irqsave(&ep->wq.lock, flags); spin_lock_irq(&ep->wq.lock);
if (ep_is_linked(&epi->rdllink)) if (ep_is_linked(&epi->rdllink))
list_del_init(&epi->rdllink); list_del_init(&epi->rdllink);
spin_unlock_irqrestore(&ep->wq.lock, flags); spin_unlock_irq(&ep->wq.lock);
wakeup_source_unregister(ep_wakeup_source(epi)); wakeup_source_unregister(ep_wakeup_source(epi));
/* /*
@ -1409,7 +1408,6 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
{ {
int error, pwake = 0; int error, pwake = 0;
__poll_t revents; __poll_t revents;
unsigned long flags;
long user_watches; long user_watches;
struct epitem *epi; struct epitem *epi;
struct ep_pqueue epq; struct ep_pqueue epq;
@ -1476,7 +1474,7 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
goto error_remove_epi; goto error_remove_epi;
/* We have to drop the new item inside our item list to keep track of it */ /* We have to drop the new item inside our item list to keep track of it */
spin_lock_irqsave(&ep->wq.lock, flags); spin_lock_irq(&ep->wq.lock);
/* record NAPI ID of new item if present */ /* record NAPI ID of new item if present */
ep_set_busy_poll_napi_id(epi); ep_set_busy_poll_napi_id(epi);
@ -1493,7 +1491,7 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
pwake++; pwake++;
} }
spin_unlock_irqrestore(&ep->wq.lock, flags); spin_unlock_irq(&ep->wq.lock);
atomic_long_inc(&ep->user->epoll_watches); atomic_long_inc(&ep->user->epoll_watches);
@ -1519,10 +1517,10 @@ error_unregister:
* list, since that is used/cleaned only inside a section bound by "mtx". * list, since that is used/cleaned only inside a section bound by "mtx".
* And ep_insert() is called with "mtx" held. * And ep_insert() is called with "mtx" held.
*/ */
spin_lock_irqsave(&ep->wq.lock, flags); spin_lock_irq(&ep->wq.lock);
if (ep_is_linked(&epi->rdllink)) if (ep_is_linked(&epi->rdllink))
list_del_init(&epi->rdllink); list_del_init(&epi->rdllink);
spin_unlock_irqrestore(&ep->wq.lock, flags); spin_unlock_irq(&ep->wq.lock);
wakeup_source_unregister(ep_wakeup_source(epi)); wakeup_source_unregister(ep_wakeup_source(epi));