Merge branch 'master' of git://1984.lsi.us.es/net-2.6

This commit is contained in:
David S. Miller 2011-01-11 15:43:03 -08:00
commit 60dbb011df
7 changed files with 55 additions and 109 deletions

View File

@ -103,7 +103,7 @@ struct __fdb_entry {
extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *)); extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));
typedef int (*br_should_route_hook_t)(struct sk_buff *skb); typedef int br_should_route_hook_t(struct sk_buff *skb);
extern br_should_route_hook_t __rcu *br_should_route_hook; extern br_should_route_hook_t __rcu *br_should_route_hook;
#endif #endif

View File

@ -472,7 +472,7 @@ extern void xt_free_table_info(struct xt_table_info *info);
* necessary for reading the counters. * necessary for reading the counters.
*/ */
struct xt_info_lock { struct xt_info_lock {
spinlock_t lock; seqlock_t lock;
unsigned char readers; unsigned char readers;
}; };
DECLARE_PER_CPU(struct xt_info_lock, xt_info_locks); DECLARE_PER_CPU(struct xt_info_lock, xt_info_locks);
@ -497,7 +497,7 @@ static inline void xt_info_rdlock_bh(void)
local_bh_disable(); local_bh_disable();
lock = &__get_cpu_var(xt_info_locks); lock = &__get_cpu_var(xt_info_locks);
if (likely(!lock->readers++)) if (likely(!lock->readers++))
spin_lock(&lock->lock); write_seqlock(&lock->lock);
} }
static inline void xt_info_rdunlock_bh(void) static inline void xt_info_rdunlock_bh(void)
@ -505,7 +505,7 @@ static inline void xt_info_rdunlock_bh(void)
struct xt_info_lock *lock = &__get_cpu_var(xt_info_locks); struct xt_info_lock *lock = &__get_cpu_var(xt_info_locks);
if (likely(!--lock->readers)) if (likely(!--lock->readers))
spin_unlock(&lock->lock); write_sequnlock(&lock->lock);
local_bh_enable(); local_bh_enable();
} }
@ -516,12 +516,12 @@ static inline void xt_info_rdunlock_bh(void)
*/ */
static inline void xt_info_wrlock(unsigned int cpu) static inline void xt_info_wrlock(unsigned int cpu)
{ {
spin_lock(&per_cpu(xt_info_locks, cpu).lock); write_seqlock(&per_cpu(xt_info_locks, cpu).lock);
} }
static inline void xt_info_wrunlock(unsigned int cpu) static inline void xt_info_wrunlock(unsigned int cpu)
{ {
spin_unlock(&per_cpu(xt_info_locks, cpu).lock); write_sequnlock(&per_cpu(xt_info_locks, cpu).lock);
} }
/* /*

View File

@ -710,42 +710,25 @@ static void get_counters(const struct xt_table_info *t,
struct arpt_entry *iter; struct arpt_entry *iter;
unsigned int cpu; unsigned int cpu;
unsigned int i; unsigned int i;
unsigned int curcpu = get_cpu();
/* Instead of clearing (by a previous call to memset())
* the counters and using adds, we set the counters
* with data used by 'current' CPU
*
* Bottom half has to be disabled to prevent deadlock
* if new softirq were to run and call ipt_do_table
*/
local_bh_disable();
i = 0;
xt_entry_foreach(iter, t->entries[curcpu], t->size) {
SET_COUNTER(counters[i], iter->counters.bcnt,
iter->counters.pcnt);
++i;
}
local_bh_enable();
/* Processing counters from other cpus, we can let bottom half enabled,
* (preemption is disabled)
*/
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
if (cpu == curcpu) seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock;
continue;
i = 0; i = 0;
local_bh_disable();
xt_info_wrlock(cpu);
xt_entry_foreach(iter, t->entries[cpu], t->size) { xt_entry_foreach(iter, t->entries[cpu], t->size) {
ADD_COUNTER(counters[i], iter->counters.bcnt, u64 bcnt, pcnt;
iter->counters.pcnt); unsigned int start;
do {
start = read_seqbegin(lock);
bcnt = iter->counters.bcnt;
pcnt = iter->counters.pcnt;
} while (read_seqretry(lock, start));
ADD_COUNTER(counters[i], bcnt, pcnt);
++i; ++i;
} }
xt_info_wrunlock(cpu);
local_bh_enable();
} }
put_cpu();
} }
static struct xt_counters *alloc_counters(const struct xt_table *table) static struct xt_counters *alloc_counters(const struct xt_table *table)
@ -759,7 +742,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
* about). * about).
*/ */
countersize = sizeof(struct xt_counters) * private->number; countersize = sizeof(struct xt_counters) * private->number;
counters = vmalloc(countersize); counters = vzalloc(countersize);
if (counters == NULL) if (counters == NULL)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
@ -1007,7 +990,7 @@ static int __do_replace(struct net *net, const char *name,
struct arpt_entry *iter; struct arpt_entry *iter;
ret = 0; ret = 0;
counters = vmalloc(num_counters * sizeof(struct xt_counters)); counters = vzalloc(num_counters * sizeof(struct xt_counters));
if (!counters) { if (!counters) {
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;

View File

@ -884,42 +884,25 @@ get_counters(const struct xt_table_info *t,
struct ipt_entry *iter; struct ipt_entry *iter;
unsigned int cpu; unsigned int cpu;
unsigned int i; unsigned int i;
unsigned int curcpu = get_cpu();
/* Instead of clearing (by a previous call to memset())
* the counters and using adds, we set the counters
* with data used by 'current' CPU.
*
* Bottom half has to be disabled to prevent deadlock
* if new softirq were to run and call ipt_do_table
*/
local_bh_disable();
i = 0;
xt_entry_foreach(iter, t->entries[curcpu], t->size) {
SET_COUNTER(counters[i], iter->counters.bcnt,
iter->counters.pcnt);
++i;
}
local_bh_enable();
/* Processing counters from other cpus, we can let bottom half enabled,
* (preemption is disabled)
*/
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
if (cpu == curcpu) seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock;
continue;
i = 0; i = 0;
local_bh_disable();
xt_info_wrlock(cpu);
xt_entry_foreach(iter, t->entries[cpu], t->size) { xt_entry_foreach(iter, t->entries[cpu], t->size) {
ADD_COUNTER(counters[i], iter->counters.bcnt, u64 bcnt, pcnt;
iter->counters.pcnt); unsigned int start;
do {
start = read_seqbegin(lock);
bcnt = iter->counters.bcnt;
pcnt = iter->counters.pcnt;
} while (read_seqretry(lock, start));
ADD_COUNTER(counters[i], bcnt, pcnt);
++i; /* macro does multi eval of i */ ++i; /* macro does multi eval of i */
} }
xt_info_wrunlock(cpu);
local_bh_enable();
} }
put_cpu();
} }
static struct xt_counters *alloc_counters(const struct xt_table *table) static struct xt_counters *alloc_counters(const struct xt_table *table)
@ -932,7 +915,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
(other than comefrom, which userspace doesn't care (other than comefrom, which userspace doesn't care
about). */ about). */
countersize = sizeof(struct xt_counters) * private->number; countersize = sizeof(struct xt_counters) * private->number;
counters = vmalloc(countersize); counters = vzalloc(countersize);
if (counters == NULL) if (counters == NULL)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
@ -1203,7 +1186,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
struct ipt_entry *iter; struct ipt_entry *iter;
ret = 0; ret = 0;
counters = vmalloc(num_counters * sizeof(struct xt_counters)); counters = vzalloc(num_counters * sizeof(struct xt_counters));
if (!counters) { if (!counters) {
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;

View File

@ -897,42 +897,25 @@ get_counters(const struct xt_table_info *t,
struct ip6t_entry *iter; struct ip6t_entry *iter;
unsigned int cpu; unsigned int cpu;
unsigned int i; unsigned int i;
unsigned int curcpu = get_cpu();
/* Instead of clearing (by a previous call to memset())
* the counters and using adds, we set the counters
* with data used by 'current' CPU
*
* Bottom half has to be disabled to prevent deadlock
* if new softirq were to run and call ipt_do_table
*/
local_bh_disable();
i = 0;
xt_entry_foreach(iter, t->entries[curcpu], t->size) {
SET_COUNTER(counters[i], iter->counters.bcnt,
iter->counters.pcnt);
++i;
}
local_bh_enable();
/* Processing counters from other cpus, we can let bottom half enabled,
* (preemption is disabled)
*/
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
if (cpu == curcpu) seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock;
continue;
i = 0; i = 0;
local_bh_disable();
xt_info_wrlock(cpu);
xt_entry_foreach(iter, t->entries[cpu], t->size) { xt_entry_foreach(iter, t->entries[cpu], t->size) {
ADD_COUNTER(counters[i], iter->counters.bcnt, u64 bcnt, pcnt;
iter->counters.pcnt); unsigned int start;
do {
start = read_seqbegin(lock);
bcnt = iter->counters.bcnt;
pcnt = iter->counters.pcnt;
} while (read_seqretry(lock, start));
ADD_COUNTER(counters[i], bcnt, pcnt);
++i; ++i;
} }
xt_info_wrunlock(cpu);
local_bh_enable();
} }
put_cpu();
} }
static struct xt_counters *alloc_counters(const struct xt_table *table) static struct xt_counters *alloc_counters(const struct xt_table *table)
@ -945,7 +928,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
(other than comefrom, which userspace doesn't care (other than comefrom, which userspace doesn't care
about). */ about). */
countersize = sizeof(struct xt_counters) * private->number; countersize = sizeof(struct xt_counters) * private->number;
counters = vmalloc(countersize); counters = vzalloc(countersize);
if (counters == NULL) if (counters == NULL)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
@ -1216,7 +1199,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
struct ip6t_entry *iter; struct ip6t_entry *iter;
ret = 0; ret = 0;
counters = vmalloc(num_counters * sizeof(struct xt_counters)); counters = vzalloc(num_counters * sizeof(struct xt_counters));
if (!counters) { if (!counters) {
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;

View File

@ -645,25 +645,23 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
u_int8_t l3proto = nfmsg->nfgen_family; u_int8_t l3proto = nfmsg->nfgen_family;
rcu_read_lock(); spin_lock_bh(&nf_conntrack_lock);
last = (struct nf_conn *)cb->args[1]; last = (struct nf_conn *)cb->args[1];
for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) { for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) {
restart: restart:
hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[cb->args[0]], hlist_nulls_for_each_entry(h, n, &net->ct.hash[cb->args[0]],
hnnode) { hnnode) {
if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL) if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
continue; continue;
ct = nf_ct_tuplehash_to_ctrack(h); ct = nf_ct_tuplehash_to_ctrack(h);
if (!atomic_inc_not_zero(&ct->ct_general.use))
continue;
/* Dump entries of a given L3 protocol number. /* Dump entries of a given L3 protocol number.
* If it is not specified, ie. l3proto == 0, * If it is not specified, ie. l3proto == 0,
* then dump everything. */ * then dump everything. */
if (l3proto && nf_ct_l3num(ct) != l3proto) if (l3proto && nf_ct_l3num(ct) != l3proto)
goto releasect; continue;
if (cb->args[1]) { if (cb->args[1]) {
if (ct != last) if (ct != last)
goto releasect; continue;
cb->args[1] = 0; cb->args[1] = 0;
} }
if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid, if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
@ -681,8 +679,6 @@ restart:
if (acct) if (acct)
memset(acct, 0, sizeof(struct nf_conn_counter[IP_CT_DIR_MAX])); memset(acct, 0, sizeof(struct nf_conn_counter[IP_CT_DIR_MAX]));
} }
releasect:
nf_ct_put(ct);
} }
if (cb->args[1]) { if (cb->args[1]) {
cb->args[1] = 0; cb->args[1] = 0;
@ -690,7 +686,7 @@ releasect:
} }
} }
out: out:
rcu_read_unlock(); spin_unlock_bh(&nf_conntrack_lock);
if (last) if (last)
nf_ct_put(last); nf_ct_put(last);

View File

@ -1325,7 +1325,8 @@ static int __init xt_init(void)
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
struct xt_info_lock *lock = &per_cpu(xt_info_locks, i); struct xt_info_lock *lock = &per_cpu(xt_info_locks, i);
spin_lock_init(&lock->lock);
seqlock_init(&lock->lock);
lock->readers = 0; lock->readers = 0;
} }