net: refactor bind_bucket fastreuse into helper
Refactor the fastreuse update code in inet_csk_get_port into a small helper function that can be called from other places. Acked-by: Matthieu Baerts <matthieu.baerts@tessares.net> Signed-off-by: Tim Froidcoeur <tim.froidcoeur@tessares.net> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
1b8ef1423d
commit
62ffc589ab
|
@ -304,6 +304,10 @@ void inet_csk_listen_stop(struct sock *sk);
|
||||||
|
|
||||||
void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
|
void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
|
||||||
|
|
||||||
|
/* update the fast reuse flag when adding a socket */
|
||||||
|
void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
|
||||||
|
struct sock *sk);
|
||||||
|
|
||||||
struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu);
|
struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu);
|
||||||
|
|
||||||
#define TCP_PINGPONG_THRESH 3
|
#define TCP_PINGPONG_THRESH 3
|
||||||
|
|
|
@ -296,55 +296,12 @@ static inline int sk_reuseport_match(struct inet_bind_bucket *tb,
|
||||||
ipv6_only_sock(sk), true, false);
|
ipv6_only_sock(sk), true, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Obtain a reference to a local port for the given sock,
|
void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
|
||||||
* if snum is zero it means select any available local port.
|
struct sock *sk)
|
||||||
* We try to allocate an odd port (and leave even ports for connect())
|
|
||||||
*/
|
|
||||||
int inet_csk_get_port(struct sock *sk, unsigned short snum)
|
|
||||||
{
|
{
|
||||||
bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
|
|
||||||
struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo;
|
|
||||||
int ret = 1, port = snum;
|
|
||||||
struct inet_bind_hashbucket *head;
|
|
||||||
struct net *net = sock_net(sk);
|
|
||||||
struct inet_bind_bucket *tb = NULL;
|
|
||||||
kuid_t uid = sock_i_uid(sk);
|
kuid_t uid = sock_i_uid(sk);
|
||||||
int l3mdev;
|
bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
|
||||||
|
|
||||||
l3mdev = inet_sk_bound_l3mdev(sk);
|
|
||||||
|
|
||||||
if (!port) {
|
|
||||||
head = inet_csk_find_open_port(sk, &tb, &port);
|
|
||||||
if (!head)
|
|
||||||
return ret;
|
|
||||||
if (!tb)
|
|
||||||
goto tb_not_found;
|
|
||||||
goto success;
|
|
||||||
}
|
|
||||||
head = &hinfo->bhash[inet_bhashfn(net, port,
|
|
||||||
hinfo->bhash_size)];
|
|
||||||
spin_lock_bh(&head->lock);
|
|
||||||
inet_bind_bucket_for_each(tb, &head->chain)
|
|
||||||
if (net_eq(ib_net(tb), net) && tb->l3mdev == l3mdev &&
|
|
||||||
tb->port == port)
|
|
||||||
goto tb_found;
|
|
||||||
tb_not_found:
|
|
||||||
tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
|
|
||||||
net, head, port, l3mdev);
|
|
||||||
if (!tb)
|
|
||||||
goto fail_unlock;
|
|
||||||
tb_found:
|
|
||||||
if (!hlist_empty(&tb->owners)) {
|
|
||||||
if (sk->sk_reuse == SK_FORCE_REUSE)
|
|
||||||
goto success;
|
|
||||||
|
|
||||||
if ((tb->fastreuse > 0 && reuse) ||
|
|
||||||
sk_reuseport_match(tb, sk))
|
|
||||||
goto success;
|
|
||||||
if (inet_csk_bind_conflict(sk, tb, true, true))
|
|
||||||
goto fail_unlock;
|
|
||||||
}
|
|
||||||
success:
|
|
||||||
if (hlist_empty(&tb->owners)) {
|
if (hlist_empty(&tb->owners)) {
|
||||||
tb->fastreuse = reuse;
|
tb->fastreuse = reuse;
|
||||||
if (sk->sk_reuseport) {
|
if (sk->sk_reuseport) {
|
||||||
|
@ -388,6 +345,58 @@ success:
|
||||||
tb->fastreuseport = 0;
|
tb->fastreuseport = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Obtain a reference to a local port for the given sock,
|
||||||
|
* if snum is zero it means select any available local port.
|
||||||
|
* We try to allocate an odd port (and leave even ports for connect())
|
||||||
|
*/
|
||||||
|
int inet_csk_get_port(struct sock *sk, unsigned short snum)
|
||||||
|
{
|
||||||
|
bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
|
||||||
|
struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo;
|
||||||
|
int ret = 1, port = snum;
|
||||||
|
struct inet_bind_hashbucket *head;
|
||||||
|
struct net *net = sock_net(sk);
|
||||||
|
struct inet_bind_bucket *tb = NULL;
|
||||||
|
int l3mdev;
|
||||||
|
|
||||||
|
l3mdev = inet_sk_bound_l3mdev(sk);
|
||||||
|
|
||||||
|
if (!port) {
|
||||||
|
head = inet_csk_find_open_port(sk, &tb, &port);
|
||||||
|
if (!head)
|
||||||
|
return ret;
|
||||||
|
if (!tb)
|
||||||
|
goto tb_not_found;
|
||||||
|
goto success;
|
||||||
|
}
|
||||||
|
head = &hinfo->bhash[inet_bhashfn(net, port,
|
||||||
|
hinfo->bhash_size)];
|
||||||
|
spin_lock_bh(&head->lock);
|
||||||
|
inet_bind_bucket_for_each(tb, &head->chain)
|
||||||
|
if (net_eq(ib_net(tb), net) && tb->l3mdev == l3mdev &&
|
||||||
|
tb->port == port)
|
||||||
|
goto tb_found;
|
||||||
|
tb_not_found:
|
||||||
|
tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
|
||||||
|
net, head, port, l3mdev);
|
||||||
|
if (!tb)
|
||||||
|
goto fail_unlock;
|
||||||
|
tb_found:
|
||||||
|
if (!hlist_empty(&tb->owners)) {
|
||||||
|
if (sk->sk_reuse == SK_FORCE_REUSE)
|
||||||
|
goto success;
|
||||||
|
|
||||||
|
if ((tb->fastreuse > 0 && reuse) ||
|
||||||
|
sk_reuseport_match(tb, sk))
|
||||||
|
goto success;
|
||||||
|
if (inet_csk_bind_conflict(sk, tb, true, true))
|
||||||
|
goto fail_unlock;
|
||||||
|
}
|
||||||
|
success:
|
||||||
|
inet_csk_update_fastreuse(tb, sk);
|
||||||
|
|
||||||
if (!inet_csk(sk)->icsk_bind_hash)
|
if (!inet_csk(sk)->icsk_bind_hash)
|
||||||
inet_bind_hash(sk, tb, port);
|
inet_bind_hash(sk, tb, port);
|
||||||
WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
|
WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
|
||||||
|
|
Loading…
Reference in New Issue