[ICMP]: Allocate data for __icmp(v6)_sk dynamically.

Own __icmp(v6)_sk should be present in each namespace. So, it should be
allocated dynamically. Though, alloc_percpu does not fit the case as it
implies additional dereferrence for no bonus.

Allocate data for pointers just like __percpu_alloc_mask does and place
pointers to struct sock into this array.

Signed-off-by: Denis V. Lunev <den@openvz.org>
Acked-by: Daniel Lezcano <dlezcano@fr.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Denis V. Lunev 2008-02-29 11:17:11 -08:00 committed by David S. Miller
parent 405666db84
commit 79c9115953
2 changed files with 19 additions and 10 deletions

View File

@ -229,8 +229,8 @@ static const struct icmp_control icmp_pointers[NR_ICMP_TYPES+1];
* *
* On SMP we have one ICMP socket per-cpu. * On SMP we have one ICMP socket per-cpu.
*/ */
static DEFINE_PER_CPU(struct sock *, __icmp_sk) = NULL; static struct sock **__icmp_sk = NULL;
#define icmp_sk __get_cpu_var(__icmp_sk) #define icmp_sk (__icmp_sk[smp_processor_id()])
static inline int icmp_xmit_lock(struct sock *sk) static inline int icmp_xmit_lock(struct sock *sk)
{ {
@ -1149,18 +1149,23 @@ static void __exit icmp_exit(void)
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
struct sock *sk; struct sock *sk;
sk = per_cpu(__icmp_sk, i); sk = __icmp_sk[i];
if (sk == NULL) if (sk == NULL)
continue; continue;
per_cpu(__icmp_sk, i) = NULL;
sock_release(sk->sk_socket); sock_release(sk->sk_socket);
} }
kfree(__icmp_sk);
__icmp_sk = NULL;
} }
int __init icmp_init(void) int __init icmp_init(void)
{ {
int i, err; int i, err;
__icmp_sk = kzalloc(nr_cpu_ids * sizeof(struct sock *), GFP_KERNEL);
if (__icmp_sk == NULL)
return -ENOMEM;
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
struct sock *sk; struct sock *sk;
struct socket *sock; struct socket *sock;
@ -1170,7 +1175,7 @@ int __init icmp_init(void)
if (err < 0) if (err < 0)
goto fail; goto fail;
per_cpu(__icmp_sk, i) = sk = sock->sk; __icmp_sk[i] = sk = sock->sk;
sk->sk_allocation = GFP_ATOMIC; sk->sk_allocation = GFP_ATOMIC;
/* Enough space for 2 64K ICMP packets, including /* Enough space for 2 64K ICMP packets, including

View File

@ -80,8 +80,8 @@ EXPORT_SYMBOL(icmpv6msg_statistics);
* *
* On SMP we have one ICMP socket per-cpu. * On SMP we have one ICMP socket per-cpu.
*/ */
static DEFINE_PER_CPU(struct sock *, __icmpv6_sk) = NULL; static struct sock **__icmpv6_sk = NULL;
#define icmpv6_sk __get_cpu_var(__icmpv6_sk) #define icmpv6_sk (__icmpv6_sk[smp_processor_id()])
static int icmpv6_rcv(struct sk_buff *skb); static int icmpv6_rcv(struct sk_buff *skb);
@ -785,6 +785,10 @@ int __init icmpv6_init(void)
struct sock *sk; struct sock *sk;
int err, i, j; int err, i, j;
__icmpv6_sk = kzalloc(nr_cpu_ids * sizeof(struct sock *), GFP_KERNEL);
if (__icmpv6_sk == NULL)
return -ENOMEM;
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
struct socket *sock; struct socket *sock;
err = sock_create_kern(PF_INET6, SOCK_RAW, IPPROTO_ICMPV6, err = sock_create_kern(PF_INET6, SOCK_RAW, IPPROTO_ICMPV6,
@ -797,7 +801,7 @@ int __init icmpv6_init(void)
goto fail; goto fail;
} }
per_cpu(__icmpv6_sk, i) = sk = sock->sk; __icmpv6_sk[i] = sk = sock->sk;
sk->sk_allocation = GFP_ATOMIC; sk->sk_allocation = GFP_ATOMIC;
/* /*
* Split off their lock-class, because sk->sk_dst_lock * Split off their lock-class, because sk->sk_dst_lock
@ -830,7 +834,7 @@ int __init icmpv6_init(void)
for (j = 0; j < i; j++) { for (j = 0; j < i; j++) {
if (!cpu_possible(j)) if (!cpu_possible(j))
continue; continue;
sock_release(per_cpu(__icmpv6_sk, j)->sk_socket); sock_release(__icmpv6_sk[j]->sk_socket);
} }
return err; return err;
@ -841,7 +845,7 @@ void icmpv6_cleanup(void)
int i; int i;
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
sock_release(per_cpu(__icmpv6_sk, i)->sk_socket); sock_release(__icmpv6_sk[i]->sk_socket);
} }
inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6); inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6);
} }