Merge branch 'socket-poll-related-cleanups-v2'

Christoph Hellwig says:

====================
socket poll related cleanups v2

A couple of cleanups I stumbled upon when studying the networking
poll code.

Changes since v1:
 - drop a dispute patch from this series (to be sent separately)
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2018-07-30 09:10:25 -07:00
commit b0a0381987
15 changed files with 30 additions and 36 deletions

View File

@ -1071,7 +1071,7 @@ __poll_t af_alg_poll(struct file *file, struct socket *sock,
struct af_alg_ctx *ctx = ask->private; struct af_alg_ctx *ctx = ask->private;
__poll_t mask; __poll_t mask;
sock_poll_wait(file, sk_sleep(sk), wait); sock_poll_wait(file, wait);
mask = 0; mask = 0;
if (!ctx->more || ctx->used) if (!ctx->more || ctx->used)

View File

@ -121,21 +121,6 @@ static inline void sk_busy_loop(struct sock *sk, int nonblock)
#endif #endif
} }
static inline void sock_poll_busy_loop(struct socket *sock, __poll_t events)
{
if (sk_can_busy_loop(sock->sk) &&
events && (events & POLL_BUSY_LOOP)) {
/* once, only if requested by syscall */
sk_busy_loop(sock->sk, 1);
}
}
/* if this socket can poll_ll, tell the system call */
static inline __poll_t sock_poll_busy_flag(struct socket *sock)
{
return sk_can_busy_loop(sock->sk) ? POLL_BUSY_LOOP : 0;
}
/* used in the NIC receive handler to mark the skb */ /* used in the NIC receive handler to mark the skb */
static inline void skb_mark_napi_id(struct sk_buff *skb, static inline void skb_mark_napi_id(struct sk_buff *skb,
struct napi_struct *napi) struct napi_struct *napi)

View File

@ -2057,16 +2057,16 @@ static inline bool skwq_has_sleeper(struct socket_wq *wq)
/** /**
* sock_poll_wait - place memory barrier behind the poll_wait call. * sock_poll_wait - place memory barrier behind the poll_wait call.
* @filp: file * @filp: file
* @wait_address: socket wait queue
* @p: poll_table * @p: poll_table
* *
* See the comments in the wq_has_sleeper function. * See the comments in the wq_has_sleeper function.
*/ */
static inline void sock_poll_wait(struct file *filp, static inline void sock_poll_wait(struct file *filp, poll_table *p)
wait_queue_head_t *wait_address, poll_table *p)
{ {
if (!poll_does_not_wait(p) && wait_address) { struct socket *sock = filp->private_data;
poll_wait(filp, wait_address, p);
if (!poll_does_not_wait(p)) {
poll_wait(filp, &sock->wq->wait, p);
/* We need to be sure we are in sync with the /* We need to be sure we are in sync with the
* socket flags modification. * socket flags modification.
* *

View File

@ -653,7 +653,7 @@ __poll_t vcc_poll(struct file *file, struct socket *sock, poll_table *wait)
struct atm_vcc *vcc; struct atm_vcc *vcc;
__poll_t mask; __poll_t mask;
sock_poll_wait(file, sk_sleep(sk), wait); sock_poll_wait(file, wait);
mask = 0; mask = 0;
vcc = ATM_SD(sock); vcc = ATM_SD(sock);

View File

@ -941,7 +941,7 @@ static __poll_t caif_poll(struct file *file,
__poll_t mask; __poll_t mask;
struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
sock_poll_wait(file, sk_sleep(sk), wait); sock_poll_wait(file, wait);
mask = 0; mask = 0;
/* exceptional events? */ /* exceptional events? */

View File

@ -837,7 +837,7 @@ __poll_t datagram_poll(struct file *file, struct socket *sock,
struct sock *sk = sock->sk; struct sock *sk = sock->sk;
__poll_t mask; __poll_t mask;
sock_poll_wait(file, sk_sleep(sk), wait); sock_poll_wait(file, wait);
mask = 0; mask = 0;
/* exceptional events? */ /* exceptional events? */

View File

@ -325,7 +325,7 @@ __poll_t dccp_poll(struct file *file, struct socket *sock,
__poll_t mask; __poll_t mask;
struct sock *sk = sock->sk; struct sock *sk = sock->sk;
sock_poll_wait(file, sk_sleep(sk), wait); sock_poll_wait(file, wait);
if (sk->sk_state == DCCP_LISTEN) if (sk->sk_state == DCCP_LISTEN)
return inet_csk_listen_poll(sk); return inet_csk_listen_poll(sk);

View File

@ -507,7 +507,7 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
const struct tcp_sock *tp = tcp_sk(sk); const struct tcp_sock *tp = tcp_sk(sk);
int state; int state;
sock_poll_wait(file, sk_sleep(sk), wait); sock_poll_wait(file, wait);
state = inet_sk_state_load(sk); state = inet_sk_state_load(sk);
if (state == TCP_LISTEN) if (state == TCP_LISTEN)

View File

@ -1494,7 +1494,7 @@ __poll_t iucv_sock_poll(struct file *file, struct socket *sock,
struct sock *sk = sock->sk; struct sock *sk = sock->sk;
__poll_t mask = 0; __poll_t mask = 0;
sock_poll_wait(file, sk_sleep(sk), wait); sock_poll_wait(file, wait);
if (sk->sk_state == IUCV_LISTEN) if (sk->sk_state == IUCV_LISTEN)
return iucv_accept_poll(sk); return iucv_accept_poll(sk);

View File

@ -556,7 +556,7 @@ static __poll_t llcp_sock_poll(struct file *file, struct socket *sock,
pr_debug("%p\n", sk); pr_debug("%p\n", sk);
sock_poll_wait(file, sk_sleep(sk), wait); sock_poll_wait(file, wait);
if (sk->sk_state == LLCP_LISTEN) if (sk->sk_state == LLCP_LISTEN)
return llcp_accept_poll(sk); return llcp_accept_poll(sk);

View File

@ -741,7 +741,7 @@ static __poll_t rxrpc_poll(struct file *file, struct socket *sock,
struct rxrpc_sock *rx = rxrpc_sk(sk); struct rxrpc_sock *rx = rxrpc_sk(sk);
__poll_t mask; __poll_t mask;
sock_poll_wait(file, sk_sleep(sk), wait); sock_poll_wait(file, wait);
mask = 0; mask = 0;
/* the socket is readable if there are any messages waiting on the Rx /* the socket is readable if there are any messages waiting on the Rx

View File

@ -1535,7 +1535,7 @@ static __poll_t smc_poll(struct file *file, struct socket *sock,
mask |= EPOLLERR; mask |= EPOLLERR;
} else { } else {
if (sk->sk_state != SMC_CLOSED) if (sk->sk_state != SMC_CLOSED)
sock_poll_wait(file, sk_sleep(sk), wait); sock_poll_wait(file, wait);
if (sk->sk_err) if (sk->sk_err)
mask |= EPOLLERR; mask |= EPOLLERR;
if ((sk->sk_shutdown == SHUTDOWN_MASK) || if ((sk->sk_shutdown == SHUTDOWN_MASK) ||

View File

@ -1130,12 +1130,21 @@ EXPORT_SYMBOL(sock_create_lite);
static __poll_t sock_poll(struct file *file, poll_table *wait) static __poll_t sock_poll(struct file *file, poll_table *wait)
{ {
struct socket *sock = file->private_data; struct socket *sock = file->private_data;
__poll_t events = poll_requested_events(wait); __poll_t events = poll_requested_events(wait), flag = 0;
sock_poll_busy_loop(sock, events);
if (!sock->ops->poll) if (!sock->ops->poll)
return 0; return 0;
return sock->ops->poll(file, sock, wait) | sock_poll_busy_flag(sock);
if (sk_can_busy_loop(sock->sk)) {
/* poll once if requested by the syscall */
if (events & POLL_BUSY_LOOP)
sk_busy_loop(sock->sk, 1);
/* if this socket can poll_ll, tell the system call */
flag = POLL_BUSY_LOOP;
}
return sock->ops->poll(file, sock, wait) | flag;
} }
static int sock_mmap(struct file *file, struct vm_area_struct *vma) static int sock_mmap(struct file *file, struct vm_area_struct *vma)

View File

@ -716,7 +716,7 @@ static __poll_t tipc_poll(struct file *file, struct socket *sock,
struct tipc_sock *tsk = tipc_sk(sk); struct tipc_sock *tsk = tipc_sk(sk);
__poll_t revents = 0; __poll_t revents = 0;
sock_poll_wait(file, sk_sleep(sk), wait); sock_poll_wait(file, wait);
if (sk->sk_shutdown & RCV_SHUTDOWN) if (sk->sk_shutdown & RCV_SHUTDOWN)
revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM; revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;

View File

@ -2635,7 +2635,7 @@ static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wa
struct sock *sk = sock->sk; struct sock *sk = sock->sk;
__poll_t mask; __poll_t mask;
sock_poll_wait(file, sk_sleep(sk), wait); sock_poll_wait(file, wait);
mask = 0; mask = 0;
/* exceptional events? */ /* exceptional events? */
@ -2672,7 +2672,7 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
unsigned int writable; unsigned int writable;
__poll_t mask; __poll_t mask;
sock_poll_wait(file, sk_sleep(sk), wait); sock_poll_wait(file, wait);
mask = 0; mask = 0;
/* exceptional events? */ /* exceptional events? */