Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf

Pablo Neira Ayuso says:

====================
Netfilter/IPVS fixes for net

The following patchset contains Netfilter/IPVS fixes for your net tree,
they are:

1) Fix SIP conntrack with phones sending session descriptions for different
   media types but same port numbers, from Florian Westphal.

2) Fix incorrect rtnl_lock mutex logic from IPVS sync thread, from Julian
   Anastasov.

3) Skip compat array allocation in ebtables if there is no entries, also
   from Florian.

4) Do not lose left/right bits when shifting marks from xt_connmark, from
   Jack Ma.

5) Silence false positive memleak in conntrack extensions, from Cong Wang.

6) Fix CONFIG_NF_REJECT_IPV6=m link problems, from Arnd Bergmann.

7) Cannot kfree rule that is already in list in nf_tables, switch order
   so this error handling is not required, from Florian Westphal.

8) Release set name in error path, from Florian.

9) include kmemleak.h in nf_conntrack_extend.c, from Stepheh Rothwell.

10) NAT chain and extensions depend on NF_TABLES.

11) Out of bound access when renaming chains, from Taehee Yoo.

12) Incorrect casting in xt_connmark leads to wrong bitshifting.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2018-04-23 16:22:24 -04:00
commit 77621f024d
10 changed files with 199 additions and 170 deletions

View File

@ -1825,13 +1825,14 @@ static int compat_table_info(const struct ebt_table_info *info,
{ {
unsigned int size = info->entries_size; unsigned int size = info->entries_size;
const void *entries = info->entries; const void *entries = info->entries;
int ret;
newinfo->entries_size = size; newinfo->entries_size = size;
if (info->nentries) {
ret = xt_compat_init_offsets(NFPROTO_BRIDGE, info->nentries); int ret = xt_compat_init_offsets(NFPROTO_BRIDGE,
if (ret) info->nentries);
return ret; if (ret)
return ret;
}
return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info, return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
entries, newinfo); entries, newinfo);

View File

@ -48,6 +48,34 @@ config NFT_CHAIN_ROUTE_IPV6
fields such as the source, destination, flowlabel, hop-limit and fields such as the source, destination, flowlabel, hop-limit and
the packet mark. the packet mark.
if NF_NAT_IPV6
config NFT_CHAIN_NAT_IPV6
tristate "IPv6 nf_tables nat chain support"
help
This option enables the "nat" chain for IPv6 in nf_tables. This
chain type is used to perform Network Address Translation (NAT)
packet transformations such as the source, destination address and
source and destination ports.
config NFT_MASQ_IPV6
tristate "IPv6 masquerade support for nf_tables"
depends on NFT_MASQ
select NF_NAT_MASQUERADE_IPV6
help
This is the expression that provides IPv4 masquerading support for
nf_tables.
config NFT_REDIR_IPV6
tristate "IPv6 redirect support for nf_tables"
depends on NFT_REDIR
select NF_NAT_REDIRECT
help
This is the expression that provides IPv4 redirect support for
nf_tables.
endif # NF_NAT_IPV6
config NFT_REJECT_IPV6 config NFT_REJECT_IPV6
select NF_REJECT_IPV6 select NF_REJECT_IPV6
default NFT_REJECT default NFT_REJECT
@ -107,39 +135,12 @@ config NF_NAT_IPV6
if NF_NAT_IPV6 if NF_NAT_IPV6
config NFT_CHAIN_NAT_IPV6
depends on NF_TABLES_IPV6
tristate "IPv6 nf_tables nat chain support"
help
This option enables the "nat" chain for IPv6 in nf_tables. This
chain type is used to perform Network Address Translation (NAT)
packet transformations such as the source, destination address and
source and destination ports.
config NF_NAT_MASQUERADE_IPV6 config NF_NAT_MASQUERADE_IPV6
tristate "IPv6 masquerade support" tristate "IPv6 masquerade support"
help help
This is the kernel functionality to provide NAT in the masquerade This is the kernel functionality to provide NAT in the masquerade
flavour (automatic source address selection) for IPv6. flavour (automatic source address selection) for IPv6.
config NFT_MASQ_IPV6
tristate "IPv6 masquerade support for nf_tables"
depends on NF_TABLES_IPV6
depends on NFT_MASQ
select NF_NAT_MASQUERADE_IPV6
help
This is the expression that provides IPv4 masquerading support for
nf_tables.
config NFT_REDIR_IPV6
tristate "IPv6 redirect support for nf_tables"
depends on NF_TABLES_IPV6
depends on NFT_REDIR
select NF_NAT_REDIRECT
help
This is the expression that provides IPv4 redirect support for
nf_tables.
endif # NF_NAT_IPV6 endif # NF_NAT_IPV6
config IP6_NF_IPTABLES config IP6_NF_IPTABLES

View File

@ -594,6 +594,7 @@ config NFT_QUOTA
config NFT_REJECT config NFT_REJECT
default m if NETFILTER_ADVANCED=n default m if NETFILTER_ADVANCED=n
tristate "Netfilter nf_tables reject support" tristate "Netfilter nf_tables reject support"
depends on !NF_TABLES_INET || (IPV6!=m || m)
help help
This option adds the "reject" expression that you can use to This option adds the "reject" expression that you can use to
explicitly deny and notify via TCP reset/ICMP informational errors explicitly deny and notify via TCP reset/ICMP informational errors

View File

@ -2384,11 +2384,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
strlcpy(cfg.mcast_ifn, dm->mcast_ifn, strlcpy(cfg.mcast_ifn, dm->mcast_ifn,
sizeof(cfg.mcast_ifn)); sizeof(cfg.mcast_ifn));
cfg.syncid = dm->syncid; cfg.syncid = dm->syncid;
rtnl_lock();
mutex_lock(&ipvs->sync_mutex);
ret = start_sync_thread(ipvs, &cfg, dm->state); ret = start_sync_thread(ipvs, &cfg, dm->state);
mutex_unlock(&ipvs->sync_mutex);
rtnl_unlock();
} else { } else {
mutex_lock(&ipvs->sync_mutex); mutex_lock(&ipvs->sync_mutex);
ret = stop_sync_thread(ipvs, dm->state); ret = stop_sync_thread(ipvs, dm->state);
@ -3481,12 +3477,8 @@ static int ip_vs_genl_new_daemon(struct netns_ipvs *ipvs, struct nlattr **attrs)
if (ipvs->mixed_address_family_dests > 0) if (ipvs->mixed_address_family_dests > 0)
return -EINVAL; return -EINVAL;
rtnl_lock();
mutex_lock(&ipvs->sync_mutex);
ret = start_sync_thread(ipvs, &c, ret = start_sync_thread(ipvs, &c,
nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE])); nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
mutex_unlock(&ipvs->sync_mutex);
rtnl_unlock();
return ret; return ret;
} }

View File

@ -49,6 +49,7 @@
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/wait.h> #include <linux/wait.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/sched/signal.h>
#include <asm/unaligned.h> /* Used for ntoh_seq and hton_seq */ #include <asm/unaligned.h> /* Used for ntoh_seq and hton_seq */
@ -1360,15 +1361,9 @@ static void set_mcast_pmtudisc(struct sock *sk, int val)
/* /*
* Specifiy default interface for outgoing multicasts * Specifiy default interface for outgoing multicasts
*/ */
static int set_mcast_if(struct sock *sk, char *ifname) static int set_mcast_if(struct sock *sk, struct net_device *dev)
{ {
struct net_device *dev;
struct inet_sock *inet = inet_sk(sk); struct inet_sock *inet = inet_sk(sk);
struct net *net = sock_net(sk);
dev = __dev_get_by_name(net, ifname);
if (!dev)
return -ENODEV;
if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if) if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
return -EINVAL; return -EINVAL;
@ -1396,19 +1391,14 @@ static int set_mcast_if(struct sock *sk, char *ifname)
* in the in_addr structure passed in as a parameter. * in the in_addr structure passed in as a parameter.
*/ */
static int static int
join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname) join_mcast_group(struct sock *sk, struct in_addr *addr, struct net_device *dev)
{ {
struct net *net = sock_net(sk);
struct ip_mreqn mreq; struct ip_mreqn mreq;
struct net_device *dev;
int ret; int ret;
memset(&mreq, 0, sizeof(mreq)); memset(&mreq, 0, sizeof(mreq));
memcpy(&mreq.imr_multiaddr, addr, sizeof(struct in_addr)); memcpy(&mreq.imr_multiaddr, addr, sizeof(struct in_addr));
dev = __dev_get_by_name(net, ifname);
if (!dev)
return -ENODEV;
if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if) if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
return -EINVAL; return -EINVAL;
@ -1423,15 +1413,10 @@ join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname)
#ifdef CONFIG_IP_VS_IPV6 #ifdef CONFIG_IP_VS_IPV6
static int join_mcast_group6(struct sock *sk, struct in6_addr *addr, static int join_mcast_group6(struct sock *sk, struct in6_addr *addr,
char *ifname) struct net_device *dev)
{ {
struct net *net = sock_net(sk);
struct net_device *dev;
int ret; int ret;
dev = __dev_get_by_name(net, ifname);
if (!dev)
return -ENODEV;
if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if) if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
return -EINVAL; return -EINVAL;
@ -1443,24 +1428,18 @@ static int join_mcast_group6(struct sock *sk, struct in6_addr *addr,
} }
#endif #endif
static int bind_mcastif_addr(struct socket *sock, char *ifname) static int bind_mcastif_addr(struct socket *sock, struct net_device *dev)
{ {
struct net *net = sock_net(sock->sk);
struct net_device *dev;
__be32 addr; __be32 addr;
struct sockaddr_in sin; struct sockaddr_in sin;
dev = __dev_get_by_name(net, ifname);
if (!dev)
return -ENODEV;
addr = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE); addr = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
if (!addr) if (!addr)
pr_err("You probably need to specify IP address on " pr_err("You probably need to specify IP address on "
"multicast interface.\n"); "multicast interface.\n");
IP_VS_DBG(7, "binding socket with (%s) %pI4\n", IP_VS_DBG(7, "binding socket with (%s) %pI4\n",
ifname, &addr); dev->name, &addr);
/* Now bind the socket with the address of multicast interface */ /* Now bind the socket with the address of multicast interface */
sin.sin_family = AF_INET; sin.sin_family = AF_INET;
@ -1493,7 +1472,8 @@ static void get_mcast_sockaddr(union ipvs_sockaddr *sa, int *salen,
/* /*
* Set up sending multicast socket over UDP * Set up sending multicast socket over UDP
*/ */
static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id) static int make_send_sock(struct netns_ipvs *ipvs, int id,
struct net_device *dev, struct socket **sock_ret)
{ {
/* multicast addr */ /* multicast addr */
union ipvs_sockaddr mcast_addr; union ipvs_sockaddr mcast_addr;
@ -1505,9 +1485,10 @@ static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id)
IPPROTO_UDP, &sock); IPPROTO_UDP, &sock);
if (result < 0) { if (result < 0) {
pr_err("Error during creation of socket; terminating\n"); pr_err("Error during creation of socket; terminating\n");
return ERR_PTR(result); goto error;
} }
result = set_mcast_if(sock->sk, ipvs->mcfg.mcast_ifn); *sock_ret = sock;
result = set_mcast_if(sock->sk, dev);
if (result < 0) { if (result < 0) {
pr_err("Error setting outbound mcast interface\n"); pr_err("Error setting outbound mcast interface\n");
goto error; goto error;
@ -1522,7 +1503,7 @@ static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id)
set_sock_size(sock->sk, 1, result); set_sock_size(sock->sk, 1, result);
if (AF_INET == ipvs->mcfg.mcast_af) if (AF_INET == ipvs->mcfg.mcast_af)
result = bind_mcastif_addr(sock, ipvs->mcfg.mcast_ifn); result = bind_mcastif_addr(sock, dev);
else else
result = 0; result = 0;
if (result < 0) { if (result < 0) {
@ -1538,19 +1519,18 @@ static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id)
goto error; goto error;
} }
return sock; return 0;
error: error:
sock_release(sock); return result;
return ERR_PTR(result);
} }
/* /*
* Set up receiving multicast socket over UDP * Set up receiving multicast socket over UDP
*/ */
static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id, static int make_receive_sock(struct netns_ipvs *ipvs, int id,
int ifindex) struct net_device *dev, struct socket **sock_ret)
{ {
/* multicast addr */ /* multicast addr */
union ipvs_sockaddr mcast_addr; union ipvs_sockaddr mcast_addr;
@ -1562,8 +1542,9 @@ static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id,
IPPROTO_UDP, &sock); IPPROTO_UDP, &sock);
if (result < 0) { if (result < 0) {
pr_err("Error during creation of socket; terminating\n"); pr_err("Error during creation of socket; terminating\n");
return ERR_PTR(result); goto error;
} }
*sock_ret = sock;
/* it is equivalent to the REUSEADDR option in user-space */ /* it is equivalent to the REUSEADDR option in user-space */
sock->sk->sk_reuse = SK_CAN_REUSE; sock->sk->sk_reuse = SK_CAN_REUSE;
result = sysctl_sync_sock_size(ipvs); result = sysctl_sync_sock_size(ipvs);
@ -1571,7 +1552,7 @@ static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id,
set_sock_size(sock->sk, 0, result); set_sock_size(sock->sk, 0, result);
get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->bcfg, id); get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->bcfg, id);
sock->sk->sk_bound_dev_if = ifindex; sock->sk->sk_bound_dev_if = dev->ifindex;
result = sock->ops->bind(sock, (struct sockaddr *)&mcast_addr, salen); result = sock->ops->bind(sock, (struct sockaddr *)&mcast_addr, salen);
if (result < 0) { if (result < 0) {
pr_err("Error binding to the multicast addr\n"); pr_err("Error binding to the multicast addr\n");
@ -1582,21 +1563,20 @@ static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id,
#ifdef CONFIG_IP_VS_IPV6 #ifdef CONFIG_IP_VS_IPV6
if (ipvs->bcfg.mcast_af == AF_INET6) if (ipvs->bcfg.mcast_af == AF_INET6)
result = join_mcast_group6(sock->sk, &mcast_addr.in6.sin6_addr, result = join_mcast_group6(sock->sk, &mcast_addr.in6.sin6_addr,
ipvs->bcfg.mcast_ifn); dev);
else else
#endif #endif
result = join_mcast_group(sock->sk, &mcast_addr.in.sin_addr, result = join_mcast_group(sock->sk, &mcast_addr.in.sin_addr,
ipvs->bcfg.mcast_ifn); dev);
if (result < 0) { if (result < 0) {
pr_err("Error joining to the multicast group\n"); pr_err("Error joining to the multicast group\n");
goto error; goto error;
} }
return sock; return 0;
error: error:
sock_release(sock); return result;
return ERR_PTR(result);
} }
@ -1778,13 +1758,12 @@ static int sync_thread_backup(void *data)
int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c, int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
int state) int state)
{ {
struct ip_vs_sync_thread_data *tinfo; struct ip_vs_sync_thread_data *tinfo = NULL;
struct task_struct **array = NULL, *task; struct task_struct **array = NULL, *task;
struct socket *sock;
struct net_device *dev; struct net_device *dev;
char *name; char *name;
int (*threadfn)(void *data); int (*threadfn)(void *data);
int id, count, hlen; int id = 0, count, hlen;
int result = -ENOMEM; int result = -ENOMEM;
u16 mtu, min_mtu; u16 mtu, min_mtu;
@ -1792,6 +1771,18 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %zd bytes\n", IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %zd bytes\n",
sizeof(struct ip_vs_sync_conn_v0)); sizeof(struct ip_vs_sync_conn_v0));
/* Do not hold one mutex and then to block on another */
for (;;) {
rtnl_lock();
if (mutex_trylock(&ipvs->sync_mutex))
break;
rtnl_unlock();
mutex_lock(&ipvs->sync_mutex);
if (rtnl_trylock())
break;
mutex_unlock(&ipvs->sync_mutex);
}
if (!ipvs->sync_state) { if (!ipvs->sync_state) {
count = clamp(sysctl_sync_ports(ipvs), 1, IPVS_SYNC_PORTS_MAX); count = clamp(sysctl_sync_ports(ipvs), 1, IPVS_SYNC_PORTS_MAX);
ipvs->threads_mask = count - 1; ipvs->threads_mask = count - 1;
@ -1810,7 +1801,8 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
dev = __dev_get_by_name(ipvs->net, c->mcast_ifn); dev = __dev_get_by_name(ipvs->net, c->mcast_ifn);
if (!dev) { if (!dev) {
pr_err("Unknown mcast interface: %s\n", c->mcast_ifn); pr_err("Unknown mcast interface: %s\n", c->mcast_ifn);
return -ENODEV; result = -ENODEV;
goto out_early;
} }
hlen = (AF_INET6 == c->mcast_af) ? hlen = (AF_INET6 == c->mcast_af) ?
sizeof(struct ipv6hdr) + sizeof(struct udphdr) : sizeof(struct ipv6hdr) + sizeof(struct udphdr) :
@ -1827,26 +1819,30 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
c->sync_maxlen = mtu - hlen; c->sync_maxlen = mtu - hlen;
if (state == IP_VS_STATE_MASTER) { if (state == IP_VS_STATE_MASTER) {
result = -EEXIST;
if (ipvs->ms) if (ipvs->ms)
return -EEXIST; goto out_early;
ipvs->mcfg = *c; ipvs->mcfg = *c;
name = "ipvs-m:%d:%d"; name = "ipvs-m:%d:%d";
threadfn = sync_thread_master; threadfn = sync_thread_master;
} else if (state == IP_VS_STATE_BACKUP) { } else if (state == IP_VS_STATE_BACKUP) {
result = -EEXIST;
if (ipvs->backup_threads) if (ipvs->backup_threads)
return -EEXIST; goto out_early;
ipvs->bcfg = *c; ipvs->bcfg = *c;
name = "ipvs-b:%d:%d"; name = "ipvs-b:%d:%d";
threadfn = sync_thread_backup; threadfn = sync_thread_backup;
} else { } else {
return -EINVAL; result = -EINVAL;
goto out_early;
} }
if (state == IP_VS_STATE_MASTER) { if (state == IP_VS_STATE_MASTER) {
struct ipvs_master_sync_state *ms; struct ipvs_master_sync_state *ms;
result = -ENOMEM;
ipvs->ms = kcalloc(count, sizeof(ipvs->ms[0]), GFP_KERNEL); ipvs->ms = kcalloc(count, sizeof(ipvs->ms[0]), GFP_KERNEL);
if (!ipvs->ms) if (!ipvs->ms)
goto out; goto out;
@ -1862,39 +1858,38 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
} else { } else {
array = kcalloc(count, sizeof(struct task_struct *), array = kcalloc(count, sizeof(struct task_struct *),
GFP_KERNEL); GFP_KERNEL);
result = -ENOMEM;
if (!array) if (!array)
goto out; goto out;
} }
tinfo = NULL;
for (id = 0; id < count; id++) { for (id = 0; id < count; id++) {
if (state == IP_VS_STATE_MASTER) result = -ENOMEM;
sock = make_send_sock(ipvs, id);
else
sock = make_receive_sock(ipvs, id, dev->ifindex);
if (IS_ERR(sock)) {
result = PTR_ERR(sock);
goto outtinfo;
}
tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL); tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL);
if (!tinfo) if (!tinfo)
goto outsocket; goto out;
tinfo->ipvs = ipvs; tinfo->ipvs = ipvs;
tinfo->sock = sock; tinfo->sock = NULL;
if (state == IP_VS_STATE_BACKUP) { if (state == IP_VS_STATE_BACKUP) {
tinfo->buf = kmalloc(ipvs->bcfg.sync_maxlen, tinfo->buf = kmalloc(ipvs->bcfg.sync_maxlen,
GFP_KERNEL); GFP_KERNEL);
if (!tinfo->buf) if (!tinfo->buf)
goto outtinfo; goto out;
} else { } else {
tinfo->buf = NULL; tinfo->buf = NULL;
} }
tinfo->id = id; tinfo->id = id;
if (state == IP_VS_STATE_MASTER)
result = make_send_sock(ipvs, id, dev, &tinfo->sock);
else
result = make_receive_sock(ipvs, id, dev, &tinfo->sock);
if (result < 0)
goto out;
task = kthread_run(threadfn, tinfo, name, ipvs->gen, id); task = kthread_run(threadfn, tinfo, name, ipvs->gen, id);
if (IS_ERR(task)) { if (IS_ERR(task)) {
result = PTR_ERR(task); result = PTR_ERR(task);
goto outtinfo; goto out;
} }
tinfo = NULL; tinfo = NULL;
if (state == IP_VS_STATE_MASTER) if (state == IP_VS_STATE_MASTER)
@ -1911,20 +1906,20 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
ipvs->sync_state |= state; ipvs->sync_state |= state;
spin_unlock_bh(&ipvs->sync_buff_lock); spin_unlock_bh(&ipvs->sync_buff_lock);
mutex_unlock(&ipvs->sync_mutex);
rtnl_unlock();
/* increase the module use count */ /* increase the module use count */
ip_vs_use_count_inc(); ip_vs_use_count_inc();
return 0; return 0;
outsocket: out:
sock_release(sock); /* We do not need RTNL lock anymore, release it here so that
* sock_release below and in the kthreads can use rtnl_lock
outtinfo: * to leave the mcast group.
if (tinfo) { */
sock_release(tinfo->sock); rtnl_unlock();
kfree(tinfo->buf);
kfree(tinfo);
}
count = id; count = id;
while (count-- > 0) { while (count-- > 0) {
if (state == IP_VS_STATE_MASTER) if (state == IP_VS_STATE_MASTER)
@ -1932,13 +1927,23 @@ outtinfo:
else else
kthread_stop(array[count]); kthread_stop(array[count]);
} }
kfree(array);
out:
if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) { if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) {
kfree(ipvs->ms); kfree(ipvs->ms);
ipvs->ms = NULL; ipvs->ms = NULL;
} }
mutex_unlock(&ipvs->sync_mutex);
if (tinfo) {
if (tinfo->sock)
sock_release(tinfo->sock);
kfree(tinfo->buf);
kfree(tinfo);
}
kfree(array);
return result;
out_early:
mutex_unlock(&ipvs->sync_mutex);
rtnl_unlock();
return result; return result;
} }

View File

@ -252,7 +252,7 @@ static inline int expect_clash(const struct nf_conntrack_expect *a,
static inline int expect_matches(const struct nf_conntrack_expect *a, static inline int expect_matches(const struct nf_conntrack_expect *a,
const struct nf_conntrack_expect *b) const struct nf_conntrack_expect *b)
{ {
return a->master == b->master && a->class == b->class && return a->master == b->master &&
nf_ct_tuple_equal(&a->tuple, &b->tuple) && nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
nf_ct_tuple_mask_equal(&a->mask, &b->mask) && nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
net_eq(nf_ct_net(a->master), nf_ct_net(b->master)) && net_eq(nf_ct_net(a->master), nf_ct_net(b->master)) &&
@ -421,6 +421,9 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
h = nf_ct_expect_dst_hash(net, &expect->tuple); h = nf_ct_expect_dst_hash(net, &expect->tuple);
hlist_for_each_entry_safe(i, next, &nf_ct_expect_hash[h], hnode) { hlist_for_each_entry_safe(i, next, &nf_ct_expect_hash[h], hnode) {
if (expect_matches(i, expect)) { if (expect_matches(i, expect)) {
if (i->class != expect->class)
return -EALREADY;
if (nf_ct_remove_expect(i)) if (nf_ct_remove_expect(i))
break; break;
} else if (expect_clash(i, expect)) { } else if (expect_clash(i, expect)) {

View File

@ -9,6 +9,7 @@
* 2 of the License, or (at your option) any later version. * 2 of the License, or (at your option) any later version.
*/ */
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/kmemleak.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
@ -71,6 +72,7 @@ void *nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp)
rcu_read_unlock(); rcu_read_unlock();
alloc = max(newlen, NF_CT_EXT_PREALLOC); alloc = max(newlen, NF_CT_EXT_PREALLOC);
kmemleak_not_leak(old);
new = __krealloc(old, alloc, gfp); new = __krealloc(old, alloc, gfp);
if (!new) if (!new)
return NULL; return NULL;

View File

@ -938,11 +938,19 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int protoff,
datalen, rtp_exp, rtcp_exp, datalen, rtp_exp, rtcp_exp,
mediaoff, medialen, daddr); mediaoff, medialen, daddr);
else { else {
if (nf_ct_expect_related(rtp_exp) == 0) { /* -EALREADY handling works around end-points that send
if (nf_ct_expect_related(rtcp_exp) != 0) * SDP messages with identical port but different media type,
nf_ct_unexpect_related(rtp_exp); * we pretend expectation was set up.
else */
int errp = nf_ct_expect_related(rtp_exp);
if (errp == 0 || errp == -EALREADY) {
int errcp = nf_ct_expect_related(rtcp_exp);
if (errcp == 0 || errcp == -EALREADY)
ret = NF_ACCEPT; ret = NF_ACCEPT;
else if (errp == 0)
nf_ct_unexpect_related(rtp_exp);
} }
} }
nf_ct_expect_put(rtcp_exp); nf_ct_expect_put(rtcp_exp);

View File

@ -2361,41 +2361,46 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
} }
if (nlh->nlmsg_flags & NLM_F_REPLACE) { if (nlh->nlmsg_flags & NLM_F_REPLACE) {
if (nft_is_active_next(net, old_rule)) { if (!nft_is_active_next(net, old_rule)) {
trans = nft_trans_rule_add(&ctx, NFT_MSG_DELRULE,
old_rule);
if (trans == NULL) {
err = -ENOMEM;
goto err2;
}
nft_deactivate_next(net, old_rule);
chain->use--;
list_add_tail_rcu(&rule->list, &old_rule->list);
} else {
err = -ENOENT; err = -ENOENT;
goto err2; goto err2;
} }
} else if (nlh->nlmsg_flags & NLM_F_APPEND) trans = nft_trans_rule_add(&ctx, NFT_MSG_DELRULE,
if (old_rule) old_rule);
list_add_rcu(&rule->list, &old_rule->list); if (trans == NULL) {
else err = -ENOMEM;
list_add_tail_rcu(&rule->list, &chain->rules); goto err2;
else { }
if (old_rule) nft_deactivate_next(net, old_rule);
list_add_tail_rcu(&rule->list, &old_rule->list); chain->use--;
else
list_add_rcu(&rule->list, &chain->rules);
}
if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) { if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) {
err = -ENOMEM; err = -ENOMEM;
goto err3; goto err2;
}
list_add_tail_rcu(&rule->list, &old_rule->list);
} else {
if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) {
err = -ENOMEM;
goto err2;
}
if (nlh->nlmsg_flags & NLM_F_APPEND) {
if (old_rule)
list_add_rcu(&rule->list, &old_rule->list);
else
list_add_tail_rcu(&rule->list, &chain->rules);
} else {
if (old_rule)
list_add_tail_rcu(&rule->list, &old_rule->list);
else
list_add_rcu(&rule->list, &chain->rules);
}
} }
chain->use++; chain->use++;
return 0; return 0;
err3:
list_del_rcu(&rule->list);
err2: err2:
nf_tables_rule_destroy(&ctx, rule); nf_tables_rule_destroy(&ctx, rule);
err1: err1:
@ -3207,18 +3212,20 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
err = ops->init(set, &desc, nla); err = ops->init(set, &desc, nla);
if (err < 0) if (err < 0)
goto err2; goto err3;
err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set); err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set);
if (err < 0) if (err < 0)
goto err3; goto err4;
list_add_tail_rcu(&set->list, &table->sets); list_add_tail_rcu(&set->list, &table->sets);
table->use++; table->use++;
return 0; return 0;
err3: err4:
ops->destroy(set); ops->destroy(set);
err3:
kfree(set->name);
err2: err2:
kvfree(set); kvfree(set);
err1: err1:
@ -5738,7 +5745,7 @@ static void nft_chain_commit_update(struct nft_trans *trans)
struct nft_base_chain *basechain; struct nft_base_chain *basechain;
if (nft_trans_chain_name(trans)) if (nft_trans_chain_name(trans))
strcpy(trans->ctx.chain->name, nft_trans_chain_name(trans)); swap(trans->ctx.chain->name, nft_trans_chain_name(trans));
if (!nft_is_base_chain(trans->ctx.chain)) if (!nft_is_base_chain(trans->ctx.chain))
return; return;

View File

@ -36,11 +36,10 @@ MODULE_ALIAS("ipt_connmark");
MODULE_ALIAS("ip6t_connmark"); MODULE_ALIAS("ip6t_connmark");
static unsigned int static unsigned int
connmark_tg_shift(struct sk_buff *skb, connmark_tg_shift(struct sk_buff *skb, const struct xt_connmark_tginfo2 *info)
const struct xt_connmark_tginfo1 *info,
u8 shift_bits, u8 shift_dir)
{ {
enum ip_conntrack_info ctinfo; enum ip_conntrack_info ctinfo;
u_int32_t new_targetmark;
struct nf_conn *ct; struct nf_conn *ct;
u_int32_t newmark; u_int32_t newmark;
@ -51,34 +50,39 @@ connmark_tg_shift(struct sk_buff *skb,
switch (info->mode) { switch (info->mode) {
case XT_CONNMARK_SET: case XT_CONNMARK_SET:
newmark = (ct->mark & ~info->ctmask) ^ info->ctmark; newmark = (ct->mark & ~info->ctmask) ^ info->ctmark;
if (shift_dir == D_SHIFT_RIGHT) if (info->shift_dir == D_SHIFT_RIGHT)
newmark >>= shift_bits; newmark >>= info->shift_bits;
else else
newmark <<= shift_bits; newmark <<= info->shift_bits;
if (ct->mark != newmark) { if (ct->mark != newmark) {
ct->mark = newmark; ct->mark = newmark;
nf_conntrack_event_cache(IPCT_MARK, ct); nf_conntrack_event_cache(IPCT_MARK, ct);
} }
break; break;
case XT_CONNMARK_SAVE: case XT_CONNMARK_SAVE:
newmark = (ct->mark & ~info->ctmask) ^ new_targetmark = (skb->mark & info->nfmask);
(skb->mark & info->nfmask); if (info->shift_dir == D_SHIFT_RIGHT)
if (shift_dir == D_SHIFT_RIGHT) new_targetmark >>= info->shift_bits;
newmark >>= shift_bits;
else else
newmark <<= shift_bits; new_targetmark <<= info->shift_bits;
newmark = (ct->mark & ~info->ctmask) ^
new_targetmark;
if (ct->mark != newmark) { if (ct->mark != newmark) {
ct->mark = newmark; ct->mark = newmark;
nf_conntrack_event_cache(IPCT_MARK, ct); nf_conntrack_event_cache(IPCT_MARK, ct);
} }
break; break;
case XT_CONNMARK_RESTORE: case XT_CONNMARK_RESTORE:
newmark = (skb->mark & ~info->nfmask) ^ new_targetmark = (ct->mark & info->ctmask);
(ct->mark & info->ctmask); if (info->shift_dir == D_SHIFT_RIGHT)
if (shift_dir == D_SHIFT_RIGHT) new_targetmark >>= info->shift_bits;
newmark >>= shift_bits;
else else
newmark <<= shift_bits; new_targetmark <<= info->shift_bits;
newmark = (skb->mark & ~info->nfmask) ^
new_targetmark;
skb->mark = newmark; skb->mark = newmark;
break; break;
} }
@ -89,8 +93,14 @@ static unsigned int
connmark_tg(struct sk_buff *skb, const struct xt_action_param *par) connmark_tg(struct sk_buff *skb, const struct xt_action_param *par)
{ {
const struct xt_connmark_tginfo1 *info = par->targinfo; const struct xt_connmark_tginfo1 *info = par->targinfo;
const struct xt_connmark_tginfo2 info2 = {
.ctmark = info->ctmark,
.ctmask = info->ctmask,
.nfmask = info->nfmask,
.mode = info->mode,
};
return connmark_tg_shift(skb, info, 0, 0); return connmark_tg_shift(skb, &info2);
} }
static unsigned int static unsigned int
@ -98,8 +108,7 @@ connmark_tg_v2(struct sk_buff *skb, const struct xt_action_param *par)
{ {
const struct xt_connmark_tginfo2 *info = par->targinfo; const struct xt_connmark_tginfo2 *info = par->targinfo;
return connmark_tg_shift(skb, (const struct xt_connmark_tginfo1 *)info, return connmark_tg_shift(skb, info);
info->shift_bits, info->shift_dir);
} }
static int connmark_tg_check(const struct xt_tgchk_param *par) static int connmark_tg_check(const struct xt_tgchk_param *par)