Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf

Pablo Neira Ayuso:

====================
Netfilter fixes for net

The following patchset contains Netfilter fixes for your net tree. This
large batch that includes fixes for ipset, netfilter ingress, nf_tables
dynamic set instantiation and a longstanding Kconfig dependency problem.
More specifically, they are:

1) Add missing check for empty hook list at the ingress hook, from
   Florian Westphal.

2) Input and output interface are swapped at the ingress hook,
   reported by Patrick McHardy.

3) Resolve ipset extension alignment issues on ARM, patch from Jozsef
   Kadlecsik.

4) Fix bit check on bitmap in ipset hash type, also from Jozsef.

5) Release buckets when all entries have expired in ipset hash type,
   again from Jozsef.

6) Oneliner to initialize conntrack tuple object in the PPTP helper,
   otherwise the conntrack lookup may fail due to random bits in the
   structure holes, patch from Anthony Lineham.

7) Silence a bogus gcc warning in nfnetlink_log, from Arnd Bergmann.

8) Fix Kconfig dependency problems with TPROXY, socket and dup, also
   from Arnd.

9) Add __netdev_alloc_pcpu_stats() to allow creating percpu counters
   from atomic context, this is required by the follow up fix for
   nf_tables.

10) Fix crash from the dynamic set expression, we have to add new clone
    operation that should be defined when a simple memcpy is not enough.
    This resolves a crash when using per-cpu counters with new Patrick
    McHardy's flow table nft support.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2015-11-12 14:17:16 -05:00
commit 382a483e53
16 changed files with 161 additions and 119 deletions

View File

@ -2068,20 +2068,23 @@ struct pcpu_sw_netstats {
struct u64_stats_sync syncp;
};
#define netdev_alloc_pcpu_stats(type) \
({ \
typeof(type) __percpu *pcpu_stats = alloc_percpu(type); \
if (pcpu_stats) { \
int __cpu; \
for_each_possible_cpu(__cpu) { \
typeof(type) *stat; \
stat = per_cpu_ptr(pcpu_stats, __cpu); \
u64_stats_init(&stat->syncp); \
} \
} \
pcpu_stats; \
#define __netdev_alloc_pcpu_stats(type, gfp) \
({ \
typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\
if (pcpu_stats) { \
int __cpu; \
for_each_possible_cpu(__cpu) { \
typeof(type) *stat; \
stat = per_cpu_ptr(pcpu_stats, __cpu); \
u64_stats_init(&stat->syncp); \
} \
} \
pcpu_stats; \
})
#define netdev_alloc_pcpu_stats(type) \
__netdev_alloc_pcpu_stats(type, GFP_KERNEL);
#include <linux/notifier.h>
/* netdevice notifier chain. Please remember to update the rtnetlink

View File

@ -421,7 +421,7 @@ extern void ip_set_free(void *members);
extern int ip_set_get_ipaddr4(struct nlattr *nla, __be32 *ipaddr);
extern int ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr);
extern size_t ip_set_elem_len(struct ip_set *set, struct nlattr *tb[],
size_t len);
size_t len, size_t align);
extern int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[],
struct ip_set_ext *ext);

View File

@ -5,10 +5,13 @@
#include <linux/netdevice.h>
#ifdef CONFIG_NETFILTER_INGRESS
static inline int nf_hook_ingress_active(struct sk_buff *skb)
static inline bool nf_hook_ingress_active(const struct sk_buff *skb)
{
return nf_hook_list_active(&skb->dev->nf_hooks_ingress,
NFPROTO_NETDEV, NF_NETDEV_INGRESS);
#ifdef HAVE_JUMP_LABEL
if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_INGRESS]))
return false;
#endif
return !list_empty(&skb->dev->nf_hooks_ingress);
}
static inline int nf_hook_ingress(struct sk_buff *skb)
@ -16,8 +19,8 @@ static inline int nf_hook_ingress(struct sk_buff *skb)
struct nf_hook_state state;
nf_hook_state_init(&state, &skb->dev->nf_hooks_ingress,
NF_NETDEV_INGRESS, INT_MIN, NFPROTO_NETDEV, NULL,
skb->dev, NULL, dev_net(skb->dev), NULL);
NF_NETDEV_INGRESS, INT_MIN, NFPROTO_NETDEV,
skb->dev, NULL, NULL, dev_net(skb->dev), NULL);
return nf_hook_slow(skb, &state);
}

View File

@ -618,6 +618,8 @@ struct nft_expr_ops {
void (*eval)(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt);
int (*clone)(struct nft_expr *dst,
const struct nft_expr *src);
unsigned int size;
int (*init)(const struct nft_ctx *ctx,
@ -660,10 +662,20 @@ void nft_expr_destroy(const struct nft_ctx *ctx, struct nft_expr *expr);
int nft_expr_dump(struct sk_buff *skb, unsigned int attr,
const struct nft_expr *expr);
static inline void nft_expr_clone(struct nft_expr *dst, struct nft_expr *src)
static inline int nft_expr_clone(struct nft_expr *dst, struct nft_expr *src)
{
int err;
__module_get(src->ops->type->owner);
memcpy(dst, src, src->ops->size);
if (src->ops->clone) {
dst->ops = src->ops;
err = src->ops->clone(dst, src);
if (err < 0)
return err;
} else {
memcpy(dst, src, src->ops->size);
}
return 0;
}
/**

View File

@ -45,7 +45,7 @@ static void pptp_nat_expected(struct nf_conn *ct,
struct net *net = nf_ct_net(ct);
const struct nf_conn *master = ct->master;
struct nf_conntrack_expect *other_exp;
struct nf_conntrack_tuple t;
struct nf_conntrack_tuple t = {};
const struct nf_ct_pptp_master *ct_pptp_info;
const struct nf_nat_pptp *nat_pptp_info;
struct nf_nat_range range;

View File

@ -869,7 +869,7 @@ config NETFILTER_XT_TARGET_TEE
depends on IPV6 || IPV6=n
depends on !NF_CONNTRACK || NF_CONNTRACK
select NF_DUP_IPV4
select NF_DUP_IPV6 if IP6_NF_IPTABLES
select NF_DUP_IPV6 if IP6_NF_IPTABLES != n
---help---
This option adds a "TEE" target with which a packet can be cloned and
this clone be rerouted to another nexthop.
@ -882,7 +882,7 @@ config NETFILTER_XT_TARGET_TPROXY
depends on IP6_NF_IPTABLES || IP6_NF_IPTABLES=n
depends on IP_NF_MANGLE
select NF_DEFRAG_IPV4
select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES
select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES != n
help
This option adds a `TPROXY' target, which is somewhat similar to
REDIRECT. It can only be used in the mangle table and is useful
@ -1375,7 +1375,7 @@ config NETFILTER_XT_MATCH_SOCKET
depends on IPV6 || IPV6=n
depends on IP6_NF_IPTABLES || IP6_NF_IPTABLES=n
select NF_DEFRAG_IPV4
select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES
select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES != n
help
This option adds a `socket' match, which can be used to match
packets for which a TCP or UDP socket lookup finds a valid socket.

View File

@ -33,7 +33,7 @@
#define mtype_gc IPSET_TOKEN(MTYPE, _gc)
#define mtype MTYPE
#define get_ext(set, map, id) ((map)->extensions + (set)->dsize * (id))
#define get_ext(set, map, id) ((map)->extensions + ((set)->dsize * (id)))
static void
mtype_gc_init(struct ip_set *set, void (*gc)(unsigned long ul_set))
@ -67,12 +67,9 @@ mtype_destroy(struct ip_set *set)
del_timer_sync(&map->gc);
ip_set_free(map->members);
if (set->dsize) {
if (set->extensions & IPSET_EXT_DESTROY)
mtype_ext_cleanup(set);
ip_set_free(map->extensions);
}
kfree(map);
if (set->dsize && set->extensions & IPSET_EXT_DESTROY)
mtype_ext_cleanup(set);
ip_set_free(map);
set->data = NULL;
}
@ -92,16 +89,14 @@ mtype_head(struct ip_set *set, struct sk_buff *skb)
{
const struct mtype *map = set->data;
struct nlattr *nested;
size_t memsize = sizeof(*map) + map->memsize;
nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
if (!nested)
goto nla_put_failure;
if (mtype_do_head(skb, map) ||
nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
nla_put_net32(skb, IPSET_ATTR_MEMSIZE,
htonl(sizeof(*map) +
map->memsize +
set->dsize * map->elements)))
nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)))
goto nla_put_failure;
if (unlikely(ip_set_put_flags(skb, set)))
goto nla_put_failure;

View File

@ -41,7 +41,6 @@ MODULE_ALIAS("ip_set_bitmap:ip");
/* Type structure */
struct bitmap_ip {
void *members; /* the set members */
void *extensions; /* data extensions */
u32 first_ip; /* host byte order, included in range */
u32 last_ip; /* host byte order, included in range */
u32 elements; /* number of max elements in the set */
@ -49,6 +48,8 @@ struct bitmap_ip {
size_t memsize; /* members size */
u8 netmask; /* subnet netmask */
struct timer_list gc; /* garbage collection */
unsigned char extensions[0] /* data extensions */
__aligned(__alignof__(u64));
};
/* ADT structure for generic function args */
@ -224,13 +225,6 @@ init_map_ip(struct ip_set *set, struct bitmap_ip *map,
map->members = ip_set_alloc(map->memsize);
if (!map->members)
return false;
if (set->dsize) {
map->extensions = ip_set_alloc(set->dsize * elements);
if (!map->extensions) {
kfree(map->members);
return false;
}
}
map->first_ip = first_ip;
map->last_ip = last_ip;
map->elements = elements;
@ -316,13 +310,13 @@ bitmap_ip_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
pr_debug("hosts %u, elements %llu\n",
hosts, (unsigned long long)elements);
map = kzalloc(sizeof(*map), GFP_KERNEL);
set->dsize = ip_set_elem_len(set, tb, 0, 0);
map = ip_set_alloc(sizeof(*map) + elements * set->dsize);
if (!map)
return -ENOMEM;
map->memsize = bitmap_bytes(0, elements - 1);
set->variant = &bitmap_ip;
set->dsize = ip_set_elem_len(set, tb, 0);
if (!init_map_ip(set, map, first_ip, last_ip,
elements, hosts, netmask)) {
kfree(map);

View File

@ -47,24 +47,26 @@ enum {
/* Type structure */
struct bitmap_ipmac {
void *members; /* the set members */
void *extensions; /* MAC + data extensions */
u32 first_ip; /* host byte order, included in range */
u32 last_ip; /* host byte order, included in range */
u32 elements; /* number of max elements in the set */
size_t memsize; /* members size */
struct timer_list gc; /* garbage collector */
unsigned char extensions[0] /* MAC + data extensions */
__aligned(__alignof__(u64));
};
/* ADT structure for generic function args */
struct bitmap_ipmac_adt_elem {
unsigned char ether[ETH_ALEN] __aligned(2);
u16 id;
unsigned char *ether;
u16 add_mac;
};
struct bitmap_ipmac_elem {
unsigned char ether[ETH_ALEN];
unsigned char filled;
} __attribute__ ((aligned));
} __aligned(__alignof__(u64));
static inline u32
ip_to_id(const struct bitmap_ipmac *m, u32 ip)
@ -72,11 +74,11 @@ ip_to_id(const struct bitmap_ipmac *m, u32 ip)
return ip - m->first_ip;
}
static inline struct bitmap_ipmac_elem *
get_elem(void *extensions, u16 id, size_t dsize)
{
return (struct bitmap_ipmac_elem *)(extensions + id * dsize);
}
#define get_elem(extensions, id, dsize) \
(struct bitmap_ipmac_elem *)(extensions + (id) * (dsize))
#define get_const_elem(extensions, id, dsize) \
(const struct bitmap_ipmac_elem *)(extensions + (id) * (dsize))
/* Common functions */
@ -88,10 +90,9 @@ bitmap_ipmac_do_test(const struct bitmap_ipmac_adt_elem *e,
if (!test_bit(e->id, map->members))
return 0;
elem = get_elem(map->extensions, e->id, dsize);
if (elem->filled == MAC_FILLED)
return !e->ether ||
ether_addr_equal(e->ether, elem->ether);
elem = get_const_elem(map->extensions, e->id, dsize);
if (e->add_mac && elem->filled == MAC_FILLED)
return ether_addr_equal(e->ether, elem->ether);
/* Trigger kernel to fill out the ethernet address */
return -EAGAIN;
}
@ -103,7 +104,7 @@ bitmap_ipmac_gc_test(u16 id, const struct bitmap_ipmac *map, size_t dsize)
if (!test_bit(id, map->members))
return 0;
elem = get_elem(map->extensions, id, dsize);
elem = get_const_elem(map->extensions, id, dsize);
/* Timer not started for the incomplete elements */
return elem->filled == MAC_FILLED;
}
@ -133,7 +134,7 @@ bitmap_ipmac_add_timeout(unsigned long *timeout,
* and we can reuse it later when MAC is filled out,
* possibly by the kernel
*/
if (e->ether)
if (e->add_mac)
ip_set_timeout_set(timeout, t);
else
*timeout = t;
@ -150,7 +151,7 @@ bitmap_ipmac_do_add(const struct bitmap_ipmac_adt_elem *e,
elem = get_elem(map->extensions, e->id, dsize);
if (test_bit(e->id, map->members)) {
if (elem->filled == MAC_FILLED) {
if (e->ether &&
if (e->add_mac &&
(flags & IPSET_FLAG_EXIST) &&
!ether_addr_equal(e->ether, elem->ether)) {
/* memcpy isn't atomic */
@ -159,7 +160,7 @@ bitmap_ipmac_do_add(const struct bitmap_ipmac_adt_elem *e,
ether_addr_copy(elem->ether, e->ether);
}
return IPSET_ADD_FAILED;
} else if (!e->ether)
} else if (!e->add_mac)
/* Already added without ethernet address */
return IPSET_ADD_FAILED;
/* Fill the MAC address and trigger the timer activation */
@ -168,7 +169,7 @@ bitmap_ipmac_do_add(const struct bitmap_ipmac_adt_elem *e,
ether_addr_copy(elem->ether, e->ether);
elem->filled = MAC_FILLED;
return IPSET_ADD_START_STORED_TIMEOUT;
} else if (e->ether) {
} else if (e->add_mac) {
/* We can store MAC too */
ether_addr_copy(elem->ether, e->ether);
elem->filled = MAC_FILLED;
@ -191,7 +192,7 @@ bitmap_ipmac_do_list(struct sk_buff *skb, const struct bitmap_ipmac *map,
u32 id, size_t dsize)
{
const struct bitmap_ipmac_elem *elem =
get_elem(map->extensions, id, dsize);
get_const_elem(map->extensions, id, dsize);
return nla_put_ipaddr4(skb, IPSET_ATTR_IP,
htonl(map->first_ip + id)) ||
@ -213,7 +214,7 @@ bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
{
struct bitmap_ipmac *map = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct bitmap_ipmac_adt_elem e = { .id = 0 };
struct bitmap_ipmac_adt_elem e = { .id = 0, .add_mac = 1 };
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
u32 ip;
@ -231,7 +232,7 @@ bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
return -EINVAL;
e.id = ip_to_id(map, ip);
e.ether = eth_hdr(skb)->h_source;
memcpy(e.ether, eth_hdr(skb)->h_source, ETH_ALEN);
return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
}
@ -265,11 +266,10 @@ bitmap_ipmac_uadt(struct ip_set *set, struct nlattr *tb[],
return -IPSET_ERR_BITMAP_RANGE;
e.id = ip_to_id(map, ip);
if (tb[IPSET_ATTR_ETHER])
e.ether = nla_data(tb[IPSET_ATTR_ETHER]);
else
e.ether = NULL;
if (tb[IPSET_ATTR_ETHER]) {
memcpy(e.ether, nla_data(tb[IPSET_ATTR_ETHER]), ETH_ALEN);
e.add_mac = 1;
}
ret = adtfn(set, &e, &ext, &ext, flags);
return ip_set_eexist(ret, flags) ? 0 : ret;
@ -300,13 +300,6 @@ init_map_ipmac(struct ip_set *set, struct bitmap_ipmac *map,
map->members = ip_set_alloc(map->memsize);
if (!map->members)
return false;
if (set->dsize) {
map->extensions = ip_set_alloc(set->dsize * elements);
if (!map->extensions) {
kfree(map->members);
return false;
}
}
map->first_ip = first_ip;
map->last_ip = last_ip;
map->elements = elements;
@ -361,14 +354,15 @@ bitmap_ipmac_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
if (elements > IPSET_BITMAP_MAX_RANGE + 1)
return -IPSET_ERR_BITMAP_RANGE_SIZE;
map = kzalloc(sizeof(*map), GFP_KERNEL);
set->dsize = ip_set_elem_len(set, tb,
sizeof(struct bitmap_ipmac_elem),
__alignof__(struct bitmap_ipmac_elem));
map = ip_set_alloc(sizeof(*map) + elements * set->dsize);
if (!map)
return -ENOMEM;
map->memsize = bitmap_bytes(0, elements - 1);
set->variant = &bitmap_ipmac;
set->dsize = ip_set_elem_len(set, tb,
sizeof(struct bitmap_ipmac_elem));
if (!init_map_ipmac(set, map, first_ip, last_ip, elements)) {
kfree(map);
return -ENOMEM;

View File

@ -35,12 +35,13 @@ MODULE_ALIAS("ip_set_bitmap:port");
/* Type structure */
struct bitmap_port {
void *members; /* the set members */
void *extensions; /* data extensions */
u16 first_port; /* host byte order, included in range */
u16 last_port; /* host byte order, included in range */
u32 elements; /* number of max elements in the set */
size_t memsize; /* members size */
struct timer_list gc; /* garbage collection */
unsigned char extensions[0] /* data extensions */
__aligned(__alignof__(u64));
};
/* ADT structure for generic function args */
@ -209,13 +210,6 @@ init_map_port(struct ip_set *set, struct bitmap_port *map,
map->members = ip_set_alloc(map->memsize);
if (!map->members)
return false;
if (set->dsize) {
map->extensions = ip_set_alloc(set->dsize * map->elements);
if (!map->extensions) {
kfree(map->members);
return false;
}
}
map->first_port = first_port;
map->last_port = last_port;
set->timeout = IPSET_NO_TIMEOUT;
@ -232,6 +226,7 @@ bitmap_port_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
{
struct bitmap_port *map;
u16 first_port, last_port;
u32 elements;
if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT_TO) ||
@ -248,14 +243,15 @@ bitmap_port_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
last_port = tmp;
}
map = kzalloc(sizeof(*map), GFP_KERNEL);
elements = last_port - first_port + 1;
set->dsize = ip_set_elem_len(set, tb, 0, 0);
map = ip_set_alloc(sizeof(*map) + elements * set->dsize);
if (!map)
return -ENOMEM;
map->elements = last_port - first_port + 1;
map->elements = elements;
map->memsize = bitmap_bytes(0, map->elements);
set->variant = &bitmap_port;
set->dsize = ip_set_elem_len(set, tb, 0);
if (!init_map_port(set, map, first_port, last_port)) {
kfree(map);
return -ENOMEM;

View File

@ -364,25 +364,27 @@ add_extension(enum ip_set_ext_id id, u32 flags, struct nlattr *tb[])
}
size_t
ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], size_t len)
ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], size_t len,
size_t align)
{
enum ip_set_ext_id id;
size_t offset = len;
u32 cadt_flags = 0;
if (tb[IPSET_ATTR_CADT_FLAGS])
cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
if (cadt_flags & IPSET_FLAG_WITH_FORCEADD)
set->flags |= IPSET_CREATE_FLAG_FORCEADD;
if (!align)
align = 1;
for (id = 0; id < IPSET_EXT_ID_MAX; id++) {
if (!add_extension(id, cadt_flags, tb))
continue;
offset = ALIGN(offset, ip_set_extensions[id].align);
set->offset[id] = offset;
len = ALIGN(len, ip_set_extensions[id].align);
set->offset[id] = len;
set->extensions |= ip_set_extensions[id].type;
offset += ip_set_extensions[id].len;
len += ip_set_extensions[id].len;
}
return offset;
return ALIGN(len, align);
}
EXPORT_SYMBOL_GPL(ip_set_elem_len);

View File

@ -72,8 +72,9 @@ struct hbucket {
DECLARE_BITMAP(used, AHASH_MAX_TUNED);
u8 size; /* size of the array */
u8 pos; /* position of the first free entry */
unsigned char value[0]; /* the array of the values */
} __attribute__ ((aligned));
unsigned char value[0] /* the array of the values */
__aligned(__alignof__(u64));
};
/* The hash table: the table size stored here in order to make resizing easy */
struct htable {
@ -475,7 +476,7 @@ static void
mtype_expire(struct ip_set *set, struct htype *h, u8 nets_length, size_t dsize)
{
struct htable *t;
struct hbucket *n;
struct hbucket *n, *tmp;
struct mtype_elem *data;
u32 i, j, d;
#ifdef IP_SET_HASH_WITH_NETS
@ -510,9 +511,14 @@ mtype_expire(struct ip_set *set, struct htype *h, u8 nets_length, size_t dsize)
}
}
if (d >= AHASH_INIT_SIZE) {
struct hbucket *tmp = kzalloc(sizeof(*tmp) +
(n->size - AHASH_INIT_SIZE) * dsize,
GFP_ATOMIC);
if (d >= n->size) {
rcu_assign_pointer(hbucket(t, i), NULL);
kfree_rcu(n, rcu);
continue;
}
tmp = kzalloc(sizeof(*tmp) +
(n->size - AHASH_INIT_SIZE) * dsize,
GFP_ATOMIC);
if (!tmp)
/* Still try to delete expired elements */
continue;
@ -522,7 +528,7 @@ mtype_expire(struct ip_set *set, struct htype *h, u8 nets_length, size_t dsize)
continue;
data = ahash_data(n, j, dsize);
memcpy(tmp->value + d * dsize, data, dsize);
set_bit(j, tmp->used);
set_bit(d, tmp->used);
d++;
}
tmp->pos = d;
@ -1323,12 +1329,14 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
#endif
set->variant = &IPSET_TOKEN(HTYPE, 4_variant);
set->dsize = ip_set_elem_len(set, tb,
sizeof(struct IPSET_TOKEN(HTYPE, 4_elem)));
sizeof(struct IPSET_TOKEN(HTYPE, 4_elem)),
__alignof__(struct IPSET_TOKEN(HTYPE, 4_elem)));
#ifndef IP_SET_PROTO_UNDEF
} else {
set->variant = &IPSET_TOKEN(HTYPE, 6_variant);
set->dsize = ip_set_elem_len(set, tb,
sizeof(struct IPSET_TOKEN(HTYPE, 6_elem)));
sizeof(struct IPSET_TOKEN(HTYPE, 6_elem)),
__alignof__(struct IPSET_TOKEN(HTYPE, 6_elem)));
}
#endif
if (tb[IPSET_ATTR_TIMEOUT]) {

View File

@ -31,7 +31,7 @@ struct set_elem {
struct rcu_head rcu;
struct list_head list;
ip_set_id_t id;
};
} __aligned(__alignof__(u64));
struct set_adt_elem {
ip_set_id_t id;
@ -618,7 +618,8 @@ list_set_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
size = IP_SET_LIST_MIN_SIZE;
set->variant = &set_variant;
set->dsize = ip_set_elem_len(set, tb, sizeof(struct set_elem));
set->dsize = ip_set_elem_len(set, tb, sizeof(struct set_elem),
__alignof__(struct set_elem));
if (!init_list_set(net, set, size))
return -ENOMEM;
if (tb[IPSET_ATTR_TIMEOUT]) {

View File

@ -825,7 +825,7 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
struct net *net = sock_net(ctnl);
struct nfnl_log_net *log = nfnl_log_pernet(net);
int ret = 0;
u16 flags;
u16 flags = 0;
if (nfula[NFULA_CFG_CMD]) {
u_int8_t pf = nfmsg->nfgen_family;

View File

@ -47,27 +47,34 @@ static void nft_counter_eval(const struct nft_expr *expr,
local_bh_enable();
}
static int nft_counter_dump(struct sk_buff *skb, const struct nft_expr *expr)
static void nft_counter_fetch(const struct nft_counter_percpu __percpu *counter,
struct nft_counter *total)
{
struct nft_counter_percpu_priv *priv = nft_expr_priv(expr);
struct nft_counter_percpu *cpu_stats;
struct nft_counter total;
const struct nft_counter_percpu *cpu_stats;
u64 bytes, packets;
unsigned int seq;
int cpu;
memset(&total, 0, sizeof(total));
memset(total, 0, sizeof(*total));
for_each_possible_cpu(cpu) {
cpu_stats = per_cpu_ptr(priv->counter, cpu);
cpu_stats = per_cpu_ptr(counter, cpu);
do {
seq = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
bytes = cpu_stats->counter.bytes;
packets = cpu_stats->counter.packets;
} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, seq));
total.packets += packets;
total.bytes += bytes;
total->packets += packets;
total->bytes += bytes;
}
}
static int nft_counter_dump(struct sk_buff *skb, const struct nft_expr *expr)
{
struct nft_counter_percpu_priv *priv = nft_expr_priv(expr);
struct nft_counter total;
nft_counter_fetch(priv->counter, &total);
if (nla_put_be64(skb, NFTA_COUNTER_BYTES, cpu_to_be64(total.bytes)) ||
nla_put_be64(skb, NFTA_COUNTER_PACKETS, cpu_to_be64(total.packets)))
@ -118,6 +125,31 @@ static void nft_counter_destroy(const struct nft_ctx *ctx,
free_percpu(priv->counter);
}
static int nft_counter_clone(struct nft_expr *dst, const struct nft_expr *src)
{
struct nft_counter_percpu_priv *priv = nft_expr_priv(src);
struct nft_counter_percpu_priv *priv_clone = nft_expr_priv(dst);
struct nft_counter_percpu __percpu *cpu_stats;
struct nft_counter_percpu *this_cpu;
struct nft_counter total;
nft_counter_fetch(priv->counter, &total);
cpu_stats = __netdev_alloc_pcpu_stats(struct nft_counter_percpu,
GFP_ATOMIC);
if (cpu_stats == NULL)
return ENOMEM;
preempt_disable();
this_cpu = this_cpu_ptr(cpu_stats);
this_cpu->counter.packets = total.packets;
this_cpu->counter.bytes = total.bytes;
preempt_enable();
priv_clone->counter = cpu_stats;
return 0;
}
static struct nft_expr_type nft_counter_type;
static const struct nft_expr_ops nft_counter_ops = {
.type = &nft_counter_type,
@ -126,6 +158,7 @@ static const struct nft_expr_ops nft_counter_ops = {
.init = nft_counter_init,
.destroy = nft_counter_destroy,
.dump = nft_counter_dump,
.clone = nft_counter_clone,
};
static struct nft_expr_type nft_counter_type __read_mostly = {

View File

@ -50,8 +50,9 @@ static void *nft_dynset_new(struct nft_set *set, const struct nft_expr *expr,
}
ext = nft_set_elem_ext(set, elem);
if (priv->expr != NULL)
nft_expr_clone(nft_set_ext_expr(ext), priv->expr);
if (priv->expr != NULL &&
nft_expr_clone(nft_set_ext_expr(ext), priv->expr) < 0)
return NULL;
return elem;
}