ipv4: Namespaceify tcp_max_syn_backlog knob

Different namespace application might require different maximal
number of remembered connection requests.

Signed-off-by: Haishuang Yan <yanhaishuang@cmss.chinamobile.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Haishuang Yan 2016-12-28 17:52:33 +08:00 committed by David S. Miller
parent 1946e672c1
commit fee83d097b
7 changed files with 16 additions and 18 deletions

View File

@ -122,6 +122,7 @@ struct netns_ipv4 {
unsigned int sysctl_tcp_notsent_lowat;
int sysctl_tcp_tw_reuse;
struct inet_timewait_death_row tcp_death_row;
int sysctl_max_syn_backlog;
int sysctl_igmp_max_memberships;
int sysctl_igmp_max_msf;

View File

@ -1,7 +1,7 @@
/*
* NET Generic infrastructure for Network protocols.
*
* Definitions for request_sock
* Definitions for request_sock
*
* Authors: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
*
@ -123,8 +123,6 @@ static inline void reqsk_put(struct request_sock *req)
reqsk_free(req);
}
extern int sysctl_max_syn_backlog;
/*
* For a TCP Fast Open listener -
* lock - protects the access to all the reqsk, which is co-owned by

View File

@ -34,8 +34,6 @@
* and it will increase in proportion to the memory of machine.
* Note : Dont forget somaxconn that may limit backlog too.
*/
int sysctl_max_syn_backlog = 256;
EXPORT_SYMBOL(sysctl_max_syn_backlog);
void reqsk_queue_alloc(struct request_sock_queue *queue)
{

View File

@ -323,13 +323,6 @@ static struct ctl_table ipv4_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec
},
{
.procname = "tcp_max_syn_backlog",
.data = &sysctl_max_syn_backlog,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
},
{
.procname = "inet_peer_threshold",
.data = &inet_peer_threshold,
@ -960,6 +953,13 @@ static struct ctl_table ipv4_net_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec
},
{
.procname = "tcp_max_syn_backlog",
.data = &init_net.ipv4.sysctl_max_syn_backlog,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
},
#ifdef CONFIG_IP_ROUTE_MULTIPATH
{
.procname = "fib_multipath_use_neigh",

View File

@ -3378,9 +3378,7 @@ void __init tcp_init(void)
cnt = tcp_hashinfo.ehash_mask + 1;
sysctl_tcp_max_orphans = cnt / 2;
sysctl_max_syn_backlog = max(128, cnt / 256);
tcp_init_mem();
/* Set per-socket limits to no more than 1/128 the pressure threshold */

View File

@ -6377,8 +6377,8 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
}
/* Kill the following clause, if you dislike this way. */
else if (!net->ipv4.sysctl_tcp_syncookies &&
(sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
(sysctl_max_syn_backlog >> 2)) &&
(net->ipv4.sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
(net->ipv4.sysctl_max_syn_backlog >> 2)) &&
!tcp_peer_is_proven(req, dst, false,
tmp_opt.saw_tstamp)) {
/* Without syncookies last quarter of

View File

@ -2419,7 +2419,7 @@ static void __net_exit tcp_sk_exit(struct net *net)
static int __net_init tcp_sk_init(struct net *net)
{
int res, cpu;
int res, cpu, cnt;
net->ipv4.tcp_sk = alloc_percpu(struct sock *);
if (!net->ipv4.tcp_sk)
@ -2458,10 +2458,13 @@ static int __net_init tcp_sk_init(struct net *net)
net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
net->ipv4.sysctl_tcp_tw_reuse = 0;
cnt = tcp_hashinfo.ehash_mask + 1;
net->ipv4.tcp_death_row.sysctl_tw_recycle = 0;
net->ipv4.tcp_death_row.sysctl_max_tw_buckets = (tcp_hashinfo.ehash_mask + 1) / 2;
net->ipv4.tcp_death_row.sysctl_max_tw_buckets = (cnt + 1) / 2;
net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo;
net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 256);
return 0;
fail:
tcp_sk_exit(net);