2005-08-10 04:58:39 +02:00
|
|
|
/*
|
|
|
|
* This is a module which is used for logging packets to userspace via
|
|
|
|
* nfetlink.
|
|
|
|
*
|
|
|
|
* (C) 2005 by Harald Welte <laforge@netfilter.org>
|
2013-04-06 15:24:29 +02:00
|
|
|
* (C) 2006-2012 Patrick McHardy <kaber@trash.net>
|
2005-08-10 04:58:39 +02:00
|
|
|
*
|
|
|
|
* Based on the old ipv4-only ipt_ULOG.c:
|
|
|
|
* (C) 2000-2004 by Harald Welte <laforge@netfilter.org>
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*/
|
2014-11-06 12:32:30 +01:00
|
|
|
|
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
|
2005-08-10 04:58:39 +02:00
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/skbuff.h>
|
2012-12-17 01:12:00 +01:00
|
|
|
#include <linux/if_arp.h>
|
2005-08-10 04:58:39 +02:00
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/ip.h>
|
|
|
|
#include <linux/ipv6.h>
|
|
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <linux/netfilter.h>
|
2013-03-27 07:47:04 +01:00
|
|
|
#include <net/netlink.h>
|
2005-08-10 04:58:39 +02:00
|
|
|
#include <linux/netfilter/nfnetlink.h>
|
|
|
|
#include <linux/netfilter/nfnetlink_log.h>
|
|
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <linux/sysctl.h>
|
|
|
|
#include <linux/proc_fs.h>
|
|
|
|
#include <linux/security.h>
|
|
|
|
#include <linux/list.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 09:04:11 +01:00
|
|
|
#include <linux/slab.h>
|
2005-08-10 04:58:39 +02:00
|
|
|
#include <net/sock.h>
|
2007-12-18 07:38:49 +01:00
|
|
|
#include <net/netfilter/nf_log.h>
|
2013-03-25 00:50:45 +01:00
|
|
|
#include <net/netns/generic.h>
|
2008-11-18 12:16:52 +01:00
|
|
|
#include <net/netfilter/nfnetlink_log.h>
|
2005-08-10 04:58:39 +02:00
|
|
|
|
2011-07-27 01:09:06 +02:00
|
|
|
#include <linux/atomic.h>
|
2005-08-10 04:58:39 +02:00
|
|
|
|
2014-10-01 11:19:17 +02:00
|
|
|
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
|
2005-08-10 05:22:10 +02:00
|
|
|
#include "../bridge/br_private.h"
|
|
|
|
#endif
|
|
|
|
|
2006-02-04 11:13:14 +01:00
|
|
|
#define NFULNL_NLBUFSIZ_DEFAULT NLMSG_GOODSIZE
|
2009-02-18 15:29:49 +01:00
|
|
|
#define NFULNL_TIMEOUT_DEFAULT 100 /* every second */
|
2005-08-10 04:58:39 +02:00
|
|
|
#define NFULNL_QTHRESH_DEFAULT 100 /* 100 packets */
|
2014-10-23 10:36:07 +02:00
|
|
|
/* max packet size is limited by 16-bit struct nfattr nfa_len field */
|
|
|
|
#define NFULNL_COPY_RANGE_MAX (0xFFFF - NLA_HDRLEN)
|
2005-08-10 04:58:39 +02:00
|
|
|
|
|
|
|
#define PRINTR(x, args...) do { if (net_ratelimit()) \
|
|
|
|
printk(x, ## args); } while (0);
|
|
|
|
|
|
|
|
struct nfulnl_instance {
|
|
|
|
struct hlist_node hlist; /* global list of instances */
|
|
|
|
spinlock_t lock;
|
|
|
|
atomic_t use; /* use count */
|
|
|
|
|
|
|
|
unsigned int qlen; /* number of nlmsgs in skb */
|
|
|
|
struct sk_buff *skb; /* pre-allocatd skb */
|
|
|
|
struct timer_list timer;
|
2013-03-25 00:50:45 +01:00
|
|
|
struct net *net;
|
2012-05-25 18:42:54 +02:00
|
|
|
struct user_namespace *peer_user_ns; /* User namespace of the peer process */
|
2012-09-07 22:12:54 +02:00
|
|
|
int peer_portid; /* PORTID of the peer process */
|
2005-08-10 04:58:39 +02:00
|
|
|
|
|
|
|
/* configurable parameters */
|
|
|
|
unsigned int flushtimeout; /* timeout until queue flush */
|
|
|
|
unsigned int nlbufsiz; /* netlink buffer allocation size */
|
|
|
|
unsigned int qthreshold; /* threshold of the queue */
|
|
|
|
u_int32_t copy_range;
|
2006-03-21 02:15:11 +01:00
|
|
|
u_int32_t seq; /* instance-local sequential counter */
|
2005-08-10 04:58:39 +02:00
|
|
|
u_int16_t group_num; /* number of this queue */
|
2006-03-21 02:15:11 +01:00
|
|
|
u_int16_t flags;
|
2007-02-12 20:15:49 +01:00
|
|
|
u_int8_t copy_mode;
|
2010-06-09 18:14:58 +02:00
|
|
|
struct rcu_head rcu;
|
2005-08-10 04:58:39 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
#define INSTANCE_BUCKETS 16
|
|
|
|
|
2013-03-25 00:50:45 +01:00
|
|
|
static int nfnl_log_net_id __read_mostly;
|
|
|
|
|
|
|
|
struct nfnl_log_net {
|
|
|
|
spinlock_t instances_lock;
|
|
|
|
struct hlist_head instance_table[INSTANCE_BUCKETS];
|
|
|
|
atomic_t global_seq;
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct nfnl_log_net *nfnl_log_pernet(struct net *net)
|
|
|
|
{
|
|
|
|
return net_generic(net, nfnl_log_net_id);
|
|
|
|
}
|
|
|
|
|
2005-08-10 04:58:39 +02:00
|
|
|
static inline u_int8_t instance_hashfn(u_int16_t group_num)
|
|
|
|
{
|
|
|
|
return ((group_num & 0xff) % INSTANCE_BUCKETS);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct nfulnl_instance *
|
2013-03-25 00:50:45 +01:00
|
|
|
__instance_lookup(struct nfnl_log_net *log, u_int16_t group_num)
|
2005-08-10 04:58:39 +02:00
|
|
|
{
|
|
|
|
struct hlist_head *head;
|
|
|
|
struct nfulnl_instance *inst;
|
|
|
|
|
2013-03-25 00:50:45 +01:00
|
|
|
head = &log->instance_table[instance_hashfn(group_num)];
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 02:06:00 +01:00
|
|
|
hlist_for_each_entry_rcu(inst, head, hlist) {
|
2005-08-10 04:58:39 +02:00
|
|
|
if (inst->group_num == group_num)
|
|
|
|
return inst;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
instance_get(struct nfulnl_instance *inst)
|
|
|
|
{
|
|
|
|
atomic_inc(&inst->use);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct nfulnl_instance *
|
2013-03-25 00:50:45 +01:00
|
|
|
instance_lookup_get(struct nfnl_log_net *log, u_int16_t group_num)
|
2005-08-10 04:58:39 +02:00
|
|
|
{
|
|
|
|
struct nfulnl_instance *inst;
|
|
|
|
|
2010-06-09 18:14:58 +02:00
|
|
|
rcu_read_lock_bh();
|
2013-03-25 00:50:45 +01:00
|
|
|
inst = __instance_lookup(log, group_num);
|
2010-06-14 16:15:23 +02:00
|
|
|
if (inst && !atomic_inc_not_zero(&inst->use))
|
|
|
|
inst = NULL;
|
2010-06-09 18:14:58 +02:00
|
|
|
rcu_read_unlock_bh();
|
2005-08-10 04:58:39 +02:00
|
|
|
|
|
|
|
return inst;
|
|
|
|
}
|
|
|
|
|
2010-06-09 18:14:58 +02:00
|
|
|
static void nfulnl_instance_free_rcu(struct rcu_head *head)
|
|
|
|
{
|
2013-03-25 00:50:45 +01:00
|
|
|
struct nfulnl_instance *inst =
|
|
|
|
container_of(head, struct nfulnl_instance, rcu);
|
|
|
|
|
|
|
|
put_net(inst->net);
|
|
|
|
kfree(inst);
|
2010-06-09 18:14:58 +02:00
|
|
|
module_put(THIS_MODULE);
|
|
|
|
}
|
|
|
|
|
2005-08-10 04:58:39 +02:00
|
|
|
static void
|
|
|
|
instance_put(struct nfulnl_instance *inst)
|
|
|
|
{
|
2010-06-09 18:14:58 +02:00
|
|
|
if (inst && atomic_dec_and_test(&inst->use))
|
|
|
|
call_rcu_bh(&inst->rcu, nfulnl_instance_free_rcu);
|
2005-08-10 04:58:39 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void nfulnl_timer(unsigned long data);
|
|
|
|
|
|
|
|
static struct nfulnl_instance *
|
2013-03-25 00:50:45 +01:00
|
|
|
instance_create(struct net *net, u_int16_t group_num,
|
|
|
|
int portid, struct user_namespace *user_ns)
|
2005-08-10 04:58:39 +02:00
|
|
|
{
|
|
|
|
struct nfulnl_instance *inst;
|
2013-03-25 00:50:45 +01:00
|
|
|
struct nfnl_log_net *log = nfnl_log_pernet(net);
|
2007-12-18 07:41:21 +01:00
|
|
|
int err;
|
2005-08-10 04:58:39 +02:00
|
|
|
|
2013-03-25 00:50:45 +01:00
|
|
|
spin_lock_bh(&log->instances_lock);
|
|
|
|
if (__instance_lookup(log, group_num)) {
|
2007-12-18 07:41:21 +01:00
|
|
|
err = -EEXIST;
|
2005-08-10 04:58:39 +02:00
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
2005-11-03 19:20:07 +01:00
|
|
|
inst = kzalloc(sizeof(*inst), GFP_ATOMIC);
|
2007-12-18 07:41:21 +01:00
|
|
|
if (!inst) {
|
|
|
|
err = -ENOMEM;
|
2005-08-10 04:58:39 +02:00
|
|
|
goto out_unlock;
|
2007-12-18 07:41:21 +01:00
|
|
|
}
|
2005-08-10 04:58:39 +02:00
|
|
|
|
2007-09-28 23:45:27 +02:00
|
|
|
if (!try_module_get(THIS_MODULE)) {
|
|
|
|
kfree(inst);
|
2007-12-18 07:41:21 +01:00
|
|
|
err = -EAGAIN;
|
2007-09-28 23:45:27 +02:00
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
2005-08-10 04:58:39 +02:00
|
|
|
INIT_HLIST_NODE(&inst->hlist);
|
2006-01-04 22:56:54 +01:00
|
|
|
spin_lock_init(&inst->lock);
|
2005-08-10 04:58:39 +02:00
|
|
|
/* needs to be two, since we _put() after creation */
|
|
|
|
atomic_set(&inst->use, 2);
|
|
|
|
|
2007-03-23 19:16:30 +01:00
|
|
|
setup_timer(&inst->timer, nfulnl_timer, (unsigned long)inst);
|
2005-08-10 04:58:39 +02:00
|
|
|
|
2013-03-25 00:50:45 +01:00
|
|
|
inst->net = get_net(net);
|
2012-05-25 18:42:54 +02:00
|
|
|
inst->peer_user_ns = user_ns;
|
2012-09-07 22:12:54 +02:00
|
|
|
inst->peer_portid = portid;
|
2005-08-10 04:58:39 +02:00
|
|
|
inst->group_num = group_num;
|
|
|
|
|
|
|
|
inst->qthreshold = NFULNL_QTHRESH_DEFAULT;
|
|
|
|
inst->flushtimeout = NFULNL_TIMEOUT_DEFAULT;
|
|
|
|
inst->nlbufsiz = NFULNL_NLBUFSIZ_DEFAULT;
|
|
|
|
inst->copy_mode = NFULNL_COPY_PACKET;
|
2007-09-28 23:45:52 +02:00
|
|
|
inst->copy_range = NFULNL_COPY_RANGE_MAX;
|
2005-08-10 04:58:39 +02:00
|
|
|
|
2010-06-14 16:15:23 +02:00
|
|
|
hlist_add_head_rcu(&inst->hlist,
|
2013-03-25 00:50:45 +01:00
|
|
|
&log->instance_table[instance_hashfn(group_num)]);
|
|
|
|
|
2005-08-10 04:58:39 +02:00
|
|
|
|
2013-03-25 00:50:45 +01:00
|
|
|
spin_unlock_bh(&log->instances_lock);
|
2005-08-10 04:58:39 +02:00
|
|
|
|
|
|
|
return inst;
|
|
|
|
|
|
|
|
out_unlock:
|
2013-03-25 00:50:45 +01:00
|
|
|
spin_unlock_bh(&log->instances_lock);
|
2007-12-18 07:41:21 +01:00
|
|
|
return ERR_PTR(err);
|
2005-08-10 04:58:39 +02:00
|
|
|
}
|
|
|
|
|
2007-09-28 23:44:21 +02:00
|
|
|
static void __nfulnl_flush(struct nfulnl_instance *inst);
|
2005-08-10 04:58:39 +02:00
|
|
|
|
2010-06-14 16:15:23 +02:00
|
|
|
/* called with BH disabled */
|
2005-08-10 04:58:39 +02:00
|
|
|
static void
|
2007-03-23 19:12:50 +01:00
|
|
|
__instance_destroy(struct nfulnl_instance *inst)
|
2005-08-10 04:58:39 +02:00
|
|
|
{
|
|
|
|
/* first pull it out of the global list */
|
2010-06-14 16:15:23 +02:00
|
|
|
hlist_del_rcu(&inst->hlist);
|
2005-08-10 04:58:39 +02:00
|
|
|
|
|
|
|
/* then flush all pending packets from skb */
|
|
|
|
|
2010-06-14 16:15:23 +02:00
|
|
|
spin_lock(&inst->lock);
|
|
|
|
|
|
|
|
/* lockless readers wont be able to use us */
|
|
|
|
inst->copy_mode = NFULNL_COPY_DISABLED;
|
|
|
|
|
2007-09-28 23:44:21 +02:00
|
|
|
if (inst->skb)
|
|
|
|
__nfulnl_flush(inst);
|
2010-06-14 16:15:23 +02:00
|
|
|
spin_unlock(&inst->lock);
|
2005-08-10 04:58:39 +02:00
|
|
|
|
|
|
|
/* and finally put the refcount */
|
|
|
|
instance_put(inst);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
2013-03-25 00:50:45 +01:00
|
|
|
instance_destroy(struct nfnl_log_net *log,
|
|
|
|
struct nfulnl_instance *inst)
|
2005-08-10 04:58:39 +02:00
|
|
|
{
|
2013-03-25 00:50:45 +01:00
|
|
|
spin_lock_bh(&log->instances_lock);
|
2007-03-23 19:12:50 +01:00
|
|
|
__instance_destroy(inst);
|
2013-03-25 00:50:45 +01:00
|
|
|
spin_unlock_bh(&log->instances_lock);
|
2005-08-10 04:58:39 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
nfulnl_set_mode(struct nfulnl_instance *inst, u_int8_t mode,
|
|
|
|
unsigned int range)
|
|
|
|
{
|
|
|
|
int status = 0;
|
|
|
|
|
|
|
|
spin_lock_bh(&inst->lock);
|
2007-02-12 20:15:49 +01:00
|
|
|
|
2005-08-10 04:58:39 +02:00
|
|
|
switch (mode) {
|
|
|
|
case NFULNL_COPY_NONE:
|
|
|
|
case NFULNL_COPY_META:
|
|
|
|
inst->copy_mode = mode;
|
|
|
|
inst->copy_range = 0;
|
|
|
|
break;
|
2007-02-12 20:15:49 +01:00
|
|
|
|
2005-08-10 04:58:39 +02:00
|
|
|
case NFULNL_COPY_PACKET:
|
|
|
|
inst->copy_mode = mode;
|
2014-10-23 10:36:07 +02:00
|
|
|
if (range == 0)
|
|
|
|
range = NFULNL_COPY_RANGE_MAX;
|
2007-09-28 23:45:52 +02:00
|
|
|
inst->copy_range = min_t(unsigned int,
|
|
|
|
range, NFULNL_COPY_RANGE_MAX);
|
2005-08-10 04:58:39 +02:00
|
|
|
break;
|
2007-02-12 20:15:49 +01:00
|
|
|
|
2005-08-10 04:58:39 +02:00
|
|
|
default:
|
|
|
|
status = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock_bh(&inst->lock);
|
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
nfulnl_set_nlbufsiz(struct nfulnl_instance *inst, u_int32_t nlbufsiz)
|
|
|
|
{
|
|
|
|
int status;
|
|
|
|
|
|
|
|
spin_lock_bh(&inst->lock);
|
|
|
|
if (nlbufsiz < NFULNL_NLBUFSIZ_DEFAULT)
|
|
|
|
status = -ERANGE;
|
|
|
|
else if (nlbufsiz > 131072)
|
|
|
|
status = -ERANGE;
|
|
|
|
else {
|
|
|
|
inst->nlbufsiz = nlbufsiz;
|
|
|
|
status = 0;
|
|
|
|
}
|
|
|
|
spin_unlock_bh(&inst->lock);
|
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
nfulnl_set_timeout(struct nfulnl_instance *inst, u_int32_t timeout)
|
|
|
|
{
|
|
|
|
spin_lock_bh(&inst->lock);
|
|
|
|
inst->flushtimeout = timeout;
|
|
|
|
spin_unlock_bh(&inst->lock);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
nfulnl_set_qthresh(struct nfulnl_instance *inst, u_int32_t qthresh)
|
|
|
|
{
|
|
|
|
spin_lock_bh(&inst->lock);
|
|
|
|
inst->qthreshold = qthresh;
|
|
|
|
spin_unlock_bh(&inst->lock);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-03-21 02:15:11 +01:00
|
|
|
static int
|
|
|
|
nfulnl_set_flags(struct nfulnl_instance *inst, u_int16_t flags)
|
|
|
|
{
|
|
|
|
spin_lock_bh(&inst->lock);
|
2006-05-19 11:17:18 +02:00
|
|
|
inst->flags = flags;
|
2006-03-21 02:15:11 +01:00
|
|
|
spin_unlock_bh(&inst->lock);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-09-28 23:45:06 +02:00
|
|
|
static struct sk_buff *
|
2013-09-23 13:20:56 +02:00
|
|
|
nfulnl_alloc_skb(struct net *net, u32 peer_portid, unsigned int inst_size,
|
|
|
|
unsigned int pkt_size)
|
2005-08-10 04:58:39 +02:00
|
|
|
{
|
|
|
|
struct sk_buff *skb;
|
2006-02-04 11:13:57 +01:00
|
|
|
unsigned int n;
|
2005-08-10 04:58:39 +02:00
|
|
|
|
|
|
|
/* alloc skb which should be big enough for a whole multipart
|
|
|
|
* message. WARNING: has to be <= 128k due to slab restrictions */
|
|
|
|
|
2006-02-04 11:13:57 +01:00
|
|
|
n = max(inst_size, pkt_size);
|
2013-09-23 13:20:56 +02:00
|
|
|
skb = nfnetlink_alloc_skb(net, n, peer_portid, GFP_ATOMIC);
|
2005-08-10 04:58:39 +02:00
|
|
|
if (!skb) {
|
2006-02-04 11:13:57 +01:00
|
|
|
if (n > pkt_size) {
|
|
|
|
/* try to allocate only as much as we need for current
|
|
|
|
* packet */
|
2005-08-10 04:58:39 +02:00
|
|
|
|
2013-09-23 13:20:56 +02:00
|
|
|
skb = nfnetlink_alloc_skb(net, pkt_size,
|
2013-04-17 08:47:09 +02:00
|
|
|
peer_portid, GFP_ATOMIC);
|
2006-02-04 11:13:57 +01:00
|
|
|
}
|
2005-08-10 04:58:39 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return skb;
|
|
|
|
}
|
|
|
|
|
2014-10-23 10:36:08 +02:00
|
|
|
static void
|
2005-08-10 04:58:39 +02:00
|
|
|
__nfulnl_send(struct nfulnl_instance *inst)
|
|
|
|
{
|
2012-06-27 06:34:03 +02:00
|
|
|
if (inst->qlen > 1) {
|
|
|
|
struct nlmsghdr *nlh = nlmsg_put(inst->skb, 0, 0,
|
|
|
|
NLMSG_DONE,
|
|
|
|
sizeof(struct nfgenmsg),
|
|
|
|
0);
|
2014-10-23 10:36:08 +02:00
|
|
|
if (WARN_ONCE(!nlh, "bad nlskb size: %u, tailroom %d\n",
|
|
|
|
inst->skb->len, skb_tailroom(inst->skb))) {
|
|
|
|
kfree_skb(inst->skb);
|
2012-06-27 06:34:03 +02:00
|
|
|
goto out;
|
2014-10-23 10:36:08 +02:00
|
|
|
}
|
2012-06-27 06:34:03 +02:00
|
|
|
}
|
2014-10-23 10:36:08 +02:00
|
|
|
nfnetlink_unicast(inst->skb, inst->net, inst->peer_portid,
|
|
|
|
MSG_DONTWAIT);
|
|
|
|
out:
|
2005-08-10 04:58:39 +02:00
|
|
|
inst->qlen = 0;
|
|
|
|
inst->skb = NULL;
|
|
|
|
}
|
|
|
|
|
2007-09-28 23:44:21 +02:00
|
|
|
static void
|
|
|
|
__nfulnl_flush(struct nfulnl_instance *inst)
|
|
|
|
{
|
|
|
|
/* timer holds a reference */
|
|
|
|
if (del_timer(&inst->timer))
|
|
|
|
instance_put(inst);
|
|
|
|
if (inst->skb)
|
|
|
|
__nfulnl_send(inst);
|
|
|
|
}
|
|
|
|
|
2007-09-28 23:45:06 +02:00
|
|
|
static void
|
|
|
|
nfulnl_timer(unsigned long data)
|
2005-08-10 04:58:39 +02:00
|
|
|
{
|
2007-02-12 20:15:49 +01:00
|
|
|
struct nfulnl_instance *inst = (struct nfulnl_instance *)data;
|
2005-08-10 04:58:39 +02:00
|
|
|
|
|
|
|
spin_lock_bh(&inst->lock);
|
2007-03-23 19:12:21 +01:00
|
|
|
if (inst->skb)
|
|
|
|
__nfulnl_send(inst);
|
2005-08-10 04:58:39 +02:00
|
|
|
spin_unlock_bh(&inst->lock);
|
2007-03-05 00:58:40 +01:00
|
|
|
instance_put(inst);
|
2005-08-10 04:58:39 +02:00
|
|
|
}
|
|
|
|
|
2006-03-21 02:15:11 +01:00
|
|
|
/* This is an inline function, we don't really care about a long
|
|
|
|
* list of arguments */
|
2007-02-12 20:15:49 +01:00
|
|
|
static inline int
|
2013-03-25 00:50:45 +01:00
|
|
|
__build_packet_message(struct nfnl_log_net *log,
|
|
|
|
struct nfulnl_instance *inst,
|
2007-02-12 20:15:49 +01:00
|
|
|
const struct sk_buff *skb,
|
2005-08-10 04:58:39 +02:00
|
|
|
unsigned int data_len,
|
2008-10-08 11:35:00 +02:00
|
|
|
u_int8_t pf,
|
2005-08-10 04:58:39 +02:00
|
|
|
unsigned int hooknum,
|
|
|
|
const struct net_device *indev,
|
|
|
|
const struct net_device *outdev,
|
2006-11-29 02:35:34 +01:00
|
|
|
const char *prefix, unsigned int plen)
|
2005-08-10 04:58:39 +02:00
|
|
|
{
|
|
|
|
struct nfulnl_msg_packet_hdr pmsg;
|
|
|
|
struct nlmsghdr *nlh;
|
|
|
|
struct nfgenmsg *nfmsg;
|
2007-04-20 05:29:13 +02:00
|
|
|
sk_buff_data_t old_tail = inst->skb->tail;
|
2012-09-04 09:49:03 +02:00
|
|
|
struct sock *sk;
|
2012-12-16 19:34:11 +01:00
|
|
|
const unsigned char *hwhdrp;
|
2005-08-10 04:58:39 +02:00
|
|
|
|
2012-06-27 06:34:03 +02:00
|
|
|
nlh = nlmsg_put(inst->skb, 0, 0,
|
2005-08-10 04:58:39 +02:00
|
|
|
NFNL_SUBSYS_ULOG << 8 | NFULNL_MSG_PACKET,
|
2012-06-27 06:34:03 +02:00
|
|
|
sizeof(struct nfgenmsg), 0);
|
|
|
|
if (!nlh)
|
|
|
|
return -1;
|
|
|
|
nfmsg = nlmsg_data(nlh);
|
2005-08-10 04:58:39 +02:00
|
|
|
nfmsg->nfgen_family = pf;
|
|
|
|
nfmsg->version = NFNETLINK_V0;
|
|
|
|
nfmsg->res_id = htons(inst->group_num);
|
|
|
|
|
2013-08-01 11:36:57 +02:00
|
|
|
memset(&pmsg, 0, sizeof(pmsg));
|
2006-11-03 09:59:17 +01:00
|
|
|
pmsg.hw_protocol = skb->protocol;
|
2005-08-10 04:58:39 +02:00
|
|
|
pmsg.hook = hooknum;
|
|
|
|
|
2012-03-30 05:31:16 +02:00
|
|
|
if (nla_put(inst->skb, NFULA_PACKET_HDR, sizeof(pmsg), &pmsg))
|
|
|
|
goto nla_put_failure;
|
2005-08-10 04:58:39 +02:00
|
|
|
|
2012-03-30 05:31:16 +02:00
|
|
|
if (prefix &&
|
|
|
|
nla_put(inst->skb, NFULA_PREFIX, plen, prefix))
|
|
|
|
goto nla_put_failure;
|
2005-08-10 04:58:39 +02:00
|
|
|
|
|
|
|
if (indev) {
|
2014-10-01 11:19:17 +02:00
|
|
|
#if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
|
2012-03-30 05:31:16 +02:00
|
|
|
if (nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
|
|
|
|
htonl(indev->ifindex)))
|
|
|
|
goto nla_put_failure;
|
2005-08-10 05:22:10 +02:00
|
|
|
#else
|
|
|
|
if (pf == PF_BRIDGE) {
|
|
|
|
/* Case 1: outdev is physical input device, we need to
|
|
|
|
* look for bridge group (when called from
|
|
|
|
* netfilter_bridge) */
|
2012-03-30 05:31:16 +02:00
|
|
|
if (nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
|
|
|
|
htonl(indev->ifindex)) ||
|
2005-08-10 05:22:10 +02:00
|
|
|
/* this is the bridge group "brX" */
|
2010-06-15 08:50:45 +02:00
|
|
|
/* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */
|
2012-03-30 05:31:16 +02:00
|
|
|
nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
|
|
|
|
htonl(br_port_get_rcu(indev)->br->dev->ifindex)))
|
|
|
|
goto nla_put_failure;
|
2005-08-10 05:22:10 +02:00
|
|
|
} else {
|
|
|
|
/* Case 2: indev is bridge group, we need to look for
|
|
|
|
* physical device (when called from ipv4) */
|
2012-03-30 05:31:16 +02:00
|
|
|
if (nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
|
|
|
|
htonl(indev->ifindex)))
|
|
|
|
goto nla_put_failure;
|
|
|
|
if (skb->nf_bridge && skb->nf_bridge->physindev &&
|
|
|
|
nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
|
|
|
|
htonl(skb->nf_bridge->physindev->ifindex)))
|
|
|
|
goto nla_put_failure;
|
2005-08-10 05:22:10 +02:00
|
|
|
}
|
|
|
|
#endif
|
2005-08-10 04:58:39 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (outdev) {
|
2014-10-01 11:19:17 +02:00
|
|
|
#if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
|
2012-03-30 05:31:16 +02:00
|
|
|
if (nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
|
|
|
|
htonl(outdev->ifindex)))
|
|
|
|
goto nla_put_failure;
|
2005-08-10 05:22:10 +02:00
|
|
|
#else
|
|
|
|
if (pf == PF_BRIDGE) {
|
|
|
|
/* Case 1: outdev is physical output device, we need to
|
|
|
|
* look for bridge group (when called from
|
|
|
|
* netfilter_bridge) */
|
2012-03-30 05:31:16 +02:00
|
|
|
if (nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
|
|
|
|
htonl(outdev->ifindex)) ||
|
2005-08-10 05:22:10 +02:00
|
|
|
/* this is the bridge group "brX" */
|
2010-06-15 08:50:45 +02:00
|
|
|
/* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */
|
2012-03-30 05:31:16 +02:00
|
|
|
nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
|
|
|
|
htonl(br_port_get_rcu(outdev)->br->dev->ifindex)))
|
|
|
|
goto nla_put_failure;
|
2005-08-10 05:22:10 +02:00
|
|
|
} else {
|
|
|
|
/* Case 2: indev is a bridge group, we need to look
|
|
|
|
* for physical device (when called from ipv4) */
|
2012-03-30 05:31:16 +02:00
|
|
|
if (nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
|
|
|
|
htonl(outdev->ifindex)))
|
|
|
|
goto nla_put_failure;
|
|
|
|
if (skb->nf_bridge && skb->nf_bridge->physoutdev &&
|
|
|
|
nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
|
|
|
|
htonl(skb->nf_bridge->physoutdev->ifindex)))
|
|
|
|
goto nla_put_failure;
|
2005-08-10 05:22:10 +02:00
|
|
|
}
|
|
|
|
#endif
|
2005-08-10 04:58:39 +02:00
|
|
|
}
|
|
|
|
|
2012-03-30 05:31:16 +02:00
|
|
|
if (skb->mark &&
|
|
|
|
nla_put_be32(inst->skb, NFULA_MARK, htonl(skb->mark)))
|
|
|
|
goto nla_put_failure;
|
2005-08-10 04:58:39 +02:00
|
|
|
|
2011-06-16 17:27:04 +02:00
|
|
|
if (indev && skb->dev &&
|
|
|
|
skb->mac_header != skb->network_header) {
|
2005-08-10 04:58:39 +02:00
|
|
|
struct nfulnl_msg_packet_hw phw;
|
2013-08-01 11:36:57 +02:00
|
|
|
int len;
|
|
|
|
|
|
|
|
memset(&phw, 0, sizeof(phw));
|
|
|
|
len = dev_parse_header(skb, phw.hw_addr);
|
2007-09-27 07:13:38 +02:00
|
|
|
if (len > 0) {
|
|
|
|
phw.hw_addrlen = htons(len);
|
2012-03-30 05:31:16 +02:00
|
|
|
if (nla_put(inst->skb, NFULA_HWADDR, sizeof(phw), &phw))
|
|
|
|
goto nla_put_failure;
|
2007-09-27 07:13:38 +02:00
|
|
|
}
|
2005-08-10 04:58:39 +02:00
|
|
|
}
|
|
|
|
|
2008-07-21 19:02:35 +02:00
|
|
|
if (indev && skb_mac_header_was_set(skb)) {
|
2012-08-19 12:16:08 +02:00
|
|
|
if (nla_put_be16(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) ||
|
2012-03-30 05:31:16 +02:00
|
|
|
nla_put_be16(inst->skb, NFULA_HWLEN,
|
2012-12-16 19:34:11 +01:00
|
|
|
htons(skb->dev->hard_header_len)))
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
hwhdrp = skb_mac_header(skb);
|
|
|
|
|
|
|
|
if (skb->dev->type == ARPHRD_SIT)
|
|
|
|
hwhdrp -= ETH_HLEN;
|
|
|
|
|
|
|
|
if (hwhdrp >= skb->head &&
|
|
|
|
nla_put(inst->skb, NFULA_HWHEADER,
|
|
|
|
skb->dev->hard_header_len, hwhdrp))
|
2012-03-30 05:31:16 +02:00
|
|
|
goto nla_put_failure;
|
2008-07-21 19:02:35 +02:00
|
|
|
}
|
|
|
|
|
2007-04-20 01:16:32 +02:00
|
|
|
if (skb->tstamp.tv64) {
|
2005-08-10 04:58:39 +02:00
|
|
|
struct nfulnl_msg_packet_timestamp ts;
|
2007-04-20 01:16:32 +02:00
|
|
|
struct timeval tv = ktime_to_timeval(skb->tstamp);
|
|
|
|
ts.sec = cpu_to_be64(tv.tv_sec);
|
|
|
|
ts.usec = cpu_to_be64(tv.tv_usec);
|
2005-08-10 04:58:39 +02:00
|
|
|
|
2012-03-30 05:31:16 +02:00
|
|
|
if (nla_put(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts))
|
|
|
|
goto nla_put_failure;
|
2005-08-10 04:58:39 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* UID */
|
2012-09-04 09:49:03 +02:00
|
|
|
sk = skb->sk;
|
|
|
|
if (sk && sk->sk_state != TCP_TIME_WAIT) {
|
|
|
|
read_lock_bh(&sk->sk_callback_lock);
|
|
|
|
if (sk->sk_socket && sk->sk_socket->file) {
|
|
|
|
struct file *file = sk->sk_socket->file;
|
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiederm/user-namespace
Pull user namespace changes from Eric Biederman:
"This is a mostly modest set of changes to enable basic user namespace
support. This allows the code to code to compile with user namespaces
enabled and removes the assumption there is only the initial user
namespace. Everything is converted except for the most complex of the
filesystems: autofs4, 9p, afs, ceph, cifs, coda, fuse, gfs2, ncpfs,
nfs, ocfs2 and xfs as those patches need a bit more review.
The strategy is to push kuid_t and kgid_t values are far down into
subsystems and filesystems as reasonable. Leaving the make_kuid and
from_kuid operations to happen at the edge of userspace, as the values
come off the disk, and as the values come in from the network.
Letting compile type incompatible compile errors (present when user
namespaces are enabled) guide me to find the issues.
The most tricky areas have been the places where we had an implicit
union of uid and gid values and were storing them in an unsigned int.
Those places were converted into explicit unions. I made certain to
handle those places with simple trivial patches.
Out of that work I discovered we have generic interfaces for storing
quota by projid. I had never heard of the project identifiers before.
Adding full user namespace support for project identifiers accounts
for most of the code size growth in my git tree.
Ultimately there will be work to relax privlige checks from
"capable(FOO)" to "ns_capable(user_ns, FOO)" where it is safe allowing
root in a user names to do those things that today we only forbid to
non-root users because it will confuse suid root applications.
While I was pushing kuid_t and kgid_t changes deep into the audit code
I made a few other cleanups. I capitalized on the fact we process
netlink messages in the context of the message sender. I removed
usage of NETLINK_CRED, and started directly using current->tty.
Some of these patches have also made it into maintainer trees, with no
problems from identical code from different trees showing up in
linux-next.
After reading through all of this code I feel like I might be able to
win a game of kernel trivial pursuit."
Fix up some fairly trivial conflicts in netfilter uid/git logging code.
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiederm/user-namespace: (107 commits)
userns: Convert the ufs filesystem to use kuid/kgid where appropriate
userns: Convert the udf filesystem to use kuid/kgid where appropriate
userns: Convert ubifs to use kuid/kgid
userns: Convert squashfs to use kuid/kgid where appropriate
userns: Convert reiserfs to use kuid and kgid where appropriate
userns: Convert jfs to use kuid/kgid where appropriate
userns: Convert jffs2 to use kuid and kgid where appropriate
userns: Convert hpfs to use kuid and kgid where appropriate
userns: Convert btrfs to use kuid/kgid where appropriate
userns: Convert bfs to use kuid/kgid where appropriate
userns: Convert affs to use kuid/kgid wherwe appropriate
userns: On alpha modify linux_to_osf_stat to use convert from kuids and kgids
userns: On ia64 deal with current_uid and current_gid being kuid and kgid
userns: On ppc convert current_uid from a kuid before printing.
userns: Convert s390 getting uid and gid system calls to use kuid and kgid
userns: Convert s390 hypfs to use kuid and kgid where appropriate
userns: Convert binder ipc to use kuids
userns: Teach security_path_chown to take kuids and kgids
userns: Add user namespace support to IMA
userns: Convert EVM to deal with kuids and kgids in it's hmac computation
...
2012-10-02 20:11:09 +02:00
|
|
|
const struct cred *cred = file->f_cred;
|
|
|
|
struct user_namespace *user_ns = inst->peer_user_ns;
|
|
|
|
__be32 uid = htonl(from_kuid_munged(user_ns, cred->fsuid));
|
|
|
|
__be32 gid = htonl(from_kgid_munged(user_ns, cred->fsgid));
|
2012-09-04 09:49:03 +02:00
|
|
|
read_unlock_bh(&sk->sk_callback_lock);
|
2012-03-30 05:31:16 +02:00
|
|
|
if (nla_put_be32(inst->skb, NFULA_UID, uid) ||
|
|
|
|
nla_put_be32(inst->skb, NFULA_GID, gid))
|
|
|
|
goto nla_put_failure;
|
2005-08-10 04:58:39 +02:00
|
|
|
} else
|
2012-09-04 09:49:03 +02:00
|
|
|
read_unlock_bh(&sk->sk_callback_lock);
|
2005-08-10 04:58:39 +02:00
|
|
|
}
|
|
|
|
|
2006-03-21 02:15:11 +01:00
|
|
|
/* local sequence number */
|
2012-03-30 05:31:16 +02:00
|
|
|
if ((inst->flags & NFULNL_CFG_F_SEQ) &&
|
|
|
|
nla_put_be32(inst->skb, NFULA_SEQ, htonl(inst->seq++)))
|
|
|
|
goto nla_put_failure;
|
2007-12-18 07:41:35 +01:00
|
|
|
|
2006-03-21 02:15:11 +01:00
|
|
|
/* global sequence number */
|
2012-03-30 05:31:16 +02:00
|
|
|
if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
|
|
|
|
nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
|
2013-03-25 00:50:45 +01:00
|
|
|
htonl(atomic_inc_return(&log->global_seq))))
|
2012-03-30 05:31:16 +02:00
|
|
|
goto nla_put_failure;
|
2006-03-21 02:15:11 +01:00
|
|
|
|
2005-08-10 04:58:39 +02:00
|
|
|
if (data_len) {
|
2007-09-28 23:37:03 +02:00
|
|
|
struct nlattr *nla;
|
|
|
|
int size = nla_attr_size(data_len);
|
2005-08-10 04:58:39 +02:00
|
|
|
|
2014-11-06 12:32:28 +01:00
|
|
|
if (skb_tailroom(inst->skb) < nla_total_size(data_len))
|
|
|
|
goto nla_put_failure;
|
2005-08-10 04:58:39 +02:00
|
|
|
|
2007-09-28 23:37:03 +02:00
|
|
|
nla = (struct nlattr *)skb_put(inst->skb, nla_total_size(data_len));
|
|
|
|
nla->nla_type = NFULA_PAYLOAD;
|
|
|
|
nla->nla_len = size;
|
2005-08-10 04:58:39 +02:00
|
|
|
|
2007-09-28 23:37:03 +02:00
|
|
|
if (skb_copy_bits(skb, 0, nla_data(nla), data_len))
|
2005-08-10 04:58:39 +02:00
|
|
|
BUG();
|
|
|
|
}
|
2007-02-12 20:15:49 +01:00
|
|
|
|
2005-08-10 04:58:39 +02:00
|
|
|
nlh->nlmsg_len = inst->skb->tail - old_tail;
|
|
|
|
return 0;
|
|
|
|
|
2007-09-28 23:37:03 +02:00
|
|
|
nla_put_failure:
|
2005-08-10 04:58:39 +02:00
|
|
|
PRINTR(KERN_ERR "nfnetlink_log: error creating log nlmsg\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0)
|
|
|
|
|
|
|
|
static struct nf_loginfo default_loginfo = {
|
|
|
|
.type = NF_LOG_TYPE_ULOG,
|
|
|
|
.u = {
|
|
|
|
.ulog = {
|
|
|
|
.copy_len = 0xffff,
|
|
|
|
.group = 0,
|
|
|
|
.qthreshold = 1,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
/* log handler for internal netfilter logging api */
|
2008-11-04 14:21:08 +01:00
|
|
|
void
|
2013-05-15 03:23:45 +02:00
|
|
|
nfulnl_log_packet(struct net *net,
|
|
|
|
u_int8_t pf,
|
2005-08-10 04:58:39 +02:00
|
|
|
unsigned int hooknum,
|
|
|
|
const struct sk_buff *skb,
|
|
|
|
const struct net_device *in,
|
|
|
|
const struct net_device *out,
|
|
|
|
const struct nf_loginfo *li_user,
|
|
|
|
const char *prefix)
|
|
|
|
{
|
|
|
|
unsigned int size, data_len;
|
|
|
|
struct nfulnl_instance *inst;
|
|
|
|
const struct nf_loginfo *li;
|
|
|
|
unsigned int qthreshold;
|
2006-11-29 02:35:34 +01:00
|
|
|
unsigned int plen;
|
2013-03-25 00:50:45 +01:00
|
|
|
struct nfnl_log_net *log = nfnl_log_pernet(net);
|
2005-08-10 04:58:39 +02:00
|
|
|
|
2007-02-12 20:15:49 +01:00
|
|
|
if (li_user && li_user->type == NF_LOG_TYPE_ULOG)
|
2005-08-10 04:58:39 +02:00
|
|
|
li = li_user;
|
|
|
|
else
|
|
|
|
li = &default_loginfo;
|
|
|
|
|
2013-03-25 00:50:45 +01:00
|
|
|
inst = instance_lookup_get(log, li->u.ulog.group);
|
2005-08-10 04:58:39 +02:00
|
|
|
if (!inst)
|
|
|
|
return;
|
|
|
|
|
2006-11-29 02:35:34 +01:00
|
|
|
plen = 0;
|
|
|
|
if (prefix)
|
2007-03-07 05:24:35 +01:00
|
|
|
plen = strlen(prefix) + 1;
|
2006-11-29 02:35:34 +01:00
|
|
|
|
2005-08-10 04:58:39 +02:00
|
|
|
/* FIXME: do we want to make the size calculation conditional based on
|
|
|
|
* what is actually present? way more branches and checks, but more
|
|
|
|
* memory efficient... */
|
2013-03-27 07:47:04 +01:00
|
|
|
size = nlmsg_total_size(sizeof(struct nfgenmsg))
|
2007-09-28 23:37:03 +02:00
|
|
|
+ nla_total_size(sizeof(struct nfulnl_msg_packet_hdr))
|
|
|
|
+ nla_total_size(sizeof(u_int32_t)) /* ifindex */
|
|
|
|
+ nla_total_size(sizeof(u_int32_t)) /* ifindex */
|
2014-10-01 11:19:17 +02:00
|
|
|
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
|
2007-09-28 23:37:03 +02:00
|
|
|
+ nla_total_size(sizeof(u_int32_t)) /* ifindex */
|
|
|
|
+ nla_total_size(sizeof(u_int32_t)) /* ifindex */
|
2005-08-10 05:22:10 +02:00
|
|
|
#endif
|
2007-09-28 23:37:03 +02:00
|
|
|
+ nla_total_size(sizeof(u_int32_t)) /* mark */
|
|
|
|
+ nla_total_size(sizeof(u_int32_t)) /* uid */
|
2007-12-18 07:41:52 +01:00
|
|
|
+ nla_total_size(sizeof(u_int32_t)) /* gid */
|
2007-09-28 23:37:03 +02:00
|
|
|
+ nla_total_size(plen) /* prefix */
|
|
|
|
+ nla_total_size(sizeof(struct nfulnl_msg_packet_hw))
|
2014-10-23 10:36:06 +02:00
|
|
|
+ nla_total_size(sizeof(struct nfulnl_msg_packet_timestamp))
|
|
|
|
+ nla_total_size(sizeof(struct nfgenmsg)); /* NLMSG_DONE */
|
2005-08-10 04:58:39 +02:00
|
|
|
|
2009-05-27 15:49:11 +02:00
|
|
|
if (in && skb_mac_header_was_set(skb)) {
|
|
|
|
size += nla_total_size(skb->dev->hard_header_len)
|
|
|
|
+ nla_total_size(sizeof(u_int16_t)) /* hwtype */
|
|
|
|
+ nla_total_size(sizeof(u_int16_t)); /* hwlen */
|
|
|
|
}
|
|
|
|
|
2005-08-10 04:58:39 +02:00
|
|
|
spin_lock_bh(&inst->lock);
|
|
|
|
|
2006-03-21 02:15:11 +01:00
|
|
|
if (inst->flags & NFULNL_CFG_F_SEQ)
|
2007-09-28 23:37:03 +02:00
|
|
|
size += nla_total_size(sizeof(u_int32_t));
|
2006-03-21 02:15:11 +01:00
|
|
|
if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
|
2007-09-28 23:37:03 +02:00
|
|
|
size += nla_total_size(sizeof(u_int32_t));
|
2006-03-21 02:15:11 +01:00
|
|
|
|
2005-08-10 04:58:39 +02:00
|
|
|
qthreshold = inst->qthreshold;
|
|
|
|
/* per-rule qthreshold overrides per-instance */
|
2009-02-18 15:29:23 +01:00
|
|
|
if (li->u.ulog.qthreshold)
|
|
|
|
if (qthreshold > li->u.ulog.qthreshold)
|
|
|
|
qthreshold = li->u.ulog.qthreshold;
|
|
|
|
|
2007-02-12 20:15:49 +01:00
|
|
|
|
2005-08-10 04:58:39 +02:00
|
|
|
switch (inst->copy_mode) {
|
|
|
|
case NFULNL_COPY_META:
|
|
|
|
case NFULNL_COPY_NONE:
|
|
|
|
data_len = 0;
|
|
|
|
break;
|
2007-02-12 20:15:49 +01:00
|
|
|
|
2005-08-10 04:58:39 +02:00
|
|
|
case NFULNL_COPY_PACKET:
|
2014-10-23 10:36:07 +02:00
|
|
|
if (inst->copy_range > skb->len)
|
2005-08-10 04:58:39 +02:00
|
|
|
data_len = skb->len;
|
|
|
|
else
|
|
|
|
data_len = inst->copy_range;
|
2007-02-12 20:15:49 +01:00
|
|
|
|
2007-09-28 23:37:03 +02:00
|
|
|
size += nla_total_size(data_len);
|
2005-08-10 04:58:39 +02:00
|
|
|
break;
|
2007-02-12 20:15:49 +01:00
|
|
|
|
2010-06-14 16:15:23 +02:00
|
|
|
case NFULNL_COPY_DISABLED:
|
2005-08-10 04:58:39 +02:00
|
|
|
default:
|
2007-03-23 19:11:05 +01:00
|
|
|
goto unlock_and_release;
|
2005-08-10 04:58:39 +02:00
|
|
|
}
|
|
|
|
|
2014-10-23 10:36:06 +02:00
|
|
|
if (inst->skb && size > skb_tailroom(inst->skb)) {
|
2005-08-10 04:58:39 +02:00
|
|
|
/* either the queue len is too high or we don't have
|
|
|
|
* enough room in the skb left. flush to userspace. */
|
2007-09-28 23:44:21 +02:00
|
|
|
__nfulnl_flush(inst);
|
2007-03-23 19:11:05 +01:00
|
|
|
}
|
2005-08-10 04:58:39 +02:00
|
|
|
|
2007-03-23 19:11:05 +01:00
|
|
|
if (!inst->skb) {
|
2013-09-23 13:20:56 +02:00
|
|
|
inst->skb = nfulnl_alloc_skb(net, inst->peer_portid,
|
|
|
|
inst->nlbufsiz, size);
|
2007-03-23 19:11:05 +01:00
|
|
|
if (!inst->skb)
|
2005-08-10 04:58:39 +02:00
|
|
|
goto alloc_failure;
|
|
|
|
}
|
|
|
|
|
|
|
|
inst->qlen++;
|
|
|
|
|
2013-03-25 00:50:45 +01:00
|
|
|
__build_packet_message(log, inst, skb, data_len, pf,
|
2011-02-15 21:59:37 +01:00
|
|
|
hooknum, in, out, prefix, plen);
|
2005-08-10 04:58:39 +02:00
|
|
|
|
2007-09-28 23:44:44 +02:00
|
|
|
if (inst->qlen >= qthreshold)
|
|
|
|
__nfulnl_flush(inst);
|
2005-08-10 04:58:39 +02:00
|
|
|
/* timer_pending always called within inst->lock, so there
|
|
|
|
* is no chance of a race here */
|
2007-09-28 23:44:44 +02:00
|
|
|
else if (!timer_pending(&inst->timer)) {
|
2005-08-10 04:58:39 +02:00
|
|
|
instance_get(inst);
|
|
|
|
inst->timer.expires = jiffies + (inst->flushtimeout*HZ/100);
|
|
|
|
add_timer(&inst->timer);
|
|
|
|
}
|
|
|
|
|
2007-03-05 00:58:15 +01:00
|
|
|
unlock_and_release:
|
|
|
|
spin_unlock_bh(&inst->lock);
|
|
|
|
instance_put(inst);
|
2005-08-10 04:58:39 +02:00
|
|
|
return;
|
|
|
|
|
|
|
|
alloc_failure:
|
|
|
|
/* FIXME: statistics */
|
2007-03-05 00:58:15 +01:00
|
|
|
goto unlock_and_release;
|
2005-08-10 04:58:39 +02:00
|
|
|
}
|
2008-11-04 14:21:08 +01:00
|
|
|
EXPORT_SYMBOL_GPL(nfulnl_log_packet);
|
2005-08-10 04:58:39 +02:00
|
|
|
|
|
|
|
static int
|
|
|
|
nfulnl_rcv_nl_event(struct notifier_block *this,
|
|
|
|
unsigned long event, void *ptr)
|
|
|
|
{
|
|
|
|
struct netlink_notify *n = ptr;
|
2013-03-25 00:50:45 +01:00
|
|
|
struct nfnl_log_net *log = nfnl_log_pernet(n->net);
|
2005-08-10 04:58:39 +02:00
|
|
|
|
2009-11-06 17:04:00 +01:00
|
|
|
if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) {
|
2005-08-10 04:58:39 +02:00
|
|
|
int i;
|
|
|
|
|
2012-09-07 22:12:54 +02:00
|
|
|
/* destroy all instances for this portid */
|
2013-03-25 00:50:45 +01:00
|
|
|
spin_lock_bh(&log->instances_lock);
|
2005-08-10 04:58:39 +02:00
|
|
|
for (i = 0; i < INSTANCE_BUCKETS; i++) {
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 02:06:00 +01:00
|
|
|
struct hlist_node *t2;
|
2005-08-10 04:58:39 +02:00
|
|
|
struct nfulnl_instance *inst;
|
2013-03-25 00:50:45 +01:00
|
|
|
struct hlist_head *head = &log->instance_table[i];
|
2005-08-10 04:58:39 +02:00
|
|
|
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 02:06:00 +01:00
|
|
|
hlist_for_each_entry_safe(inst, t2, head, hlist) {
|
2013-03-25 00:50:45 +01:00
|
|
|
if (n->portid == inst->peer_portid)
|
2005-08-10 04:58:39 +02:00
|
|
|
__instance_destroy(inst);
|
|
|
|
}
|
|
|
|
}
|
2013-03-25 00:50:45 +01:00
|
|
|
spin_unlock_bh(&log->instances_lock);
|
2005-08-10 04:58:39 +02:00
|
|
|
}
|
|
|
|
return NOTIFY_DONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct notifier_block nfulnl_rtnl_notifier = {
|
|
|
|
.notifier_call = nfulnl_rcv_nl_event,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int
|
|
|
|
nfulnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb,
|
2009-08-25 16:07:58 +02:00
|
|
|
const struct nlmsghdr *nlh,
|
|
|
|
const struct nlattr * const nfqa[])
|
2005-08-10 04:58:39 +02:00
|
|
|
{
|
|
|
|
return -ENOTSUPP;
|
|
|
|
}
|
|
|
|
|
2009-03-16 14:54:21 +01:00
|
|
|
static struct nf_logger nfulnl_logger __read_mostly = {
|
2005-08-10 04:58:39 +02:00
|
|
|
.name = "nfnetlink_log",
|
2014-06-18 19:24:30 +02:00
|
|
|
.type = NF_LOG_TYPE_ULOG,
|
2005-08-10 04:58:39 +02:00
|
|
|
.logfn = &nfulnl_log_packet,
|
|
|
|
.me = THIS_MODULE,
|
|
|
|
};
|
|
|
|
|
2007-09-28 23:39:09 +02:00
|
|
|
static const struct nla_policy nfula_cfg_policy[NFULA_CFG_MAX+1] = {
|
|
|
|
[NFULA_CFG_CMD] = { .len = sizeof(struct nfulnl_msg_config_cmd) },
|
|
|
|
[NFULA_CFG_MODE] = { .len = sizeof(struct nfulnl_msg_config_mode) },
|
|
|
|
[NFULA_CFG_TIMEOUT] = { .type = NLA_U32 },
|
|
|
|
[NFULA_CFG_QTHRESH] = { .type = NLA_U32 },
|
|
|
|
[NFULA_CFG_NLBUFSIZ] = { .type = NLA_U32 },
|
|
|
|
[NFULA_CFG_FLAGS] = { .type = NLA_U16 },
|
2005-08-10 04:58:39 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
static int
|
|
|
|
nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
|
2009-08-25 16:07:58 +02:00
|
|
|
const struct nlmsghdr *nlh,
|
|
|
|
const struct nlattr * const nfula[])
|
2005-08-10 04:58:39 +02:00
|
|
|
{
|
2012-06-27 06:34:03 +02:00
|
|
|
struct nfgenmsg *nfmsg = nlmsg_data(nlh);
|
2005-08-10 04:58:39 +02:00
|
|
|
u_int16_t group_num = ntohs(nfmsg->res_id);
|
|
|
|
struct nfulnl_instance *inst;
|
2008-03-11 00:44:13 +01:00
|
|
|
struct nfulnl_msg_config_cmd *cmd = NULL;
|
2013-03-25 00:50:40 +01:00
|
|
|
struct net *net = sock_net(ctnl);
|
2013-03-25 00:50:45 +01:00
|
|
|
struct nfnl_log_net *log = nfnl_log_pernet(net);
|
2005-08-10 04:58:39 +02:00
|
|
|
int ret = 0;
|
|
|
|
|
2008-03-11 00:44:13 +01:00
|
|
|
if (nfula[NFULA_CFG_CMD]) {
|
|
|
|
u_int8_t pf = nfmsg->nfgen_family;
|
|
|
|
cmd = nla_data(nfula[NFULA_CFG_CMD]);
|
|
|
|
|
|
|
|
/* Commands without queue context */
|
|
|
|
switch (cmd->command) {
|
|
|
|
case NFULNL_CFG_CMD_PF_BIND:
|
2013-03-25 00:50:40 +01:00
|
|
|
return nf_log_bind_pf(net, pf, &nfulnl_logger);
|
2008-03-11 00:44:13 +01:00
|
|
|
case NFULNL_CFG_CMD_PF_UNBIND:
|
2013-03-25 00:50:40 +01:00
|
|
|
nf_log_unbind_pf(net, pf);
|
2008-03-11 00:44:13 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-03-25 00:50:45 +01:00
|
|
|
inst = instance_lookup_get(log, group_num);
|
2012-09-07 22:12:54 +02:00
|
|
|
if (inst && inst->peer_portid != NETLINK_CB(skb).portid) {
|
2007-12-18 07:39:55 +01:00
|
|
|
ret = -EPERM;
|
|
|
|
goto out_put;
|
|
|
|
}
|
|
|
|
|
2008-03-11 00:44:13 +01:00
|
|
|
if (cmd != NULL) {
|
2005-08-10 04:58:39 +02:00
|
|
|
switch (cmd->command) {
|
|
|
|
case NFULNL_CFG_CMD_BIND:
|
|
|
|
if (inst) {
|
|
|
|
ret = -EBUSY;
|
|
|
|
goto out_put;
|
|
|
|
}
|
|
|
|
|
2013-03-25 00:50:45 +01:00
|
|
|
inst = instance_create(net, group_num,
|
2012-09-07 22:12:54 +02:00
|
|
|
NETLINK_CB(skb).portid,
|
2013-04-17 08:46:57 +02:00
|
|
|
sk_user_ns(NETLINK_CB(skb).sk));
|
2007-12-18 07:41:21 +01:00
|
|
|
if (IS_ERR(inst)) {
|
|
|
|
ret = PTR_ERR(inst);
|
2007-03-23 19:11:31 +01:00
|
|
|
goto out;
|
2005-08-10 04:58:39 +02:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NFULNL_CFG_CMD_UNBIND:
|
|
|
|
if (!inst) {
|
|
|
|
ret = -ENODEV;
|
2007-03-23 19:11:31 +01:00
|
|
|
goto out;
|
2005-08-10 04:58:39 +02:00
|
|
|
}
|
|
|
|
|
2013-03-25 00:50:45 +01:00
|
|
|
instance_destroy(log, inst);
|
2010-02-26 17:48:40 +01:00
|
|
|
goto out_put;
|
2005-08-10 04:58:39 +02:00
|
|
|
default:
|
2007-12-18 07:40:19 +01:00
|
|
|
ret = -ENOTSUPP;
|
2005-08-10 04:58:39 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-09-28 23:37:03 +02:00
|
|
|
if (nfula[NFULA_CFG_MODE]) {
|
2005-08-10 04:58:39 +02:00
|
|
|
struct nfulnl_msg_config_mode *params;
|
2007-09-28 23:37:03 +02:00
|
|
|
params = nla_data(nfula[NFULA_CFG_MODE]);
|
2005-08-10 04:58:39 +02:00
|
|
|
|
2007-12-18 07:39:55 +01:00
|
|
|
if (!inst) {
|
|
|
|
ret = -ENODEV;
|
|
|
|
goto out;
|
|
|
|
}
|
2005-08-10 04:58:39 +02:00
|
|
|
nfulnl_set_mode(inst, params->copy_mode,
|
2006-11-03 09:58:41 +01:00
|
|
|
ntohl(params->copy_range));
|
2005-08-10 04:58:39 +02:00
|
|
|
}
|
|
|
|
|
2007-09-28 23:37:03 +02:00
|
|
|
if (nfula[NFULA_CFG_TIMEOUT]) {
|
2007-12-18 07:41:35 +01:00
|
|
|
__be32 timeout = nla_get_be32(nfula[NFULA_CFG_TIMEOUT]);
|
2005-08-10 04:58:39 +02:00
|
|
|
|
2007-12-18 07:39:55 +01:00
|
|
|
if (!inst) {
|
|
|
|
ret = -ENODEV;
|
|
|
|
goto out;
|
|
|
|
}
|
2005-08-10 04:58:39 +02:00
|
|
|
nfulnl_set_timeout(inst, ntohl(timeout));
|
|
|
|
}
|
|
|
|
|
2007-09-28 23:37:03 +02:00
|
|
|
if (nfula[NFULA_CFG_NLBUFSIZ]) {
|
2007-12-18 07:41:35 +01:00
|
|
|
__be32 nlbufsiz = nla_get_be32(nfula[NFULA_CFG_NLBUFSIZ]);
|
2005-08-10 04:58:39 +02:00
|
|
|
|
2007-12-18 07:39:55 +01:00
|
|
|
if (!inst) {
|
|
|
|
ret = -ENODEV;
|
|
|
|
goto out;
|
|
|
|
}
|
2005-08-10 04:58:39 +02:00
|
|
|
nfulnl_set_nlbufsiz(inst, ntohl(nlbufsiz));
|
|
|
|
}
|
|
|
|
|
2007-09-28 23:37:03 +02:00
|
|
|
if (nfula[NFULA_CFG_QTHRESH]) {
|
2007-12-18 07:41:35 +01:00
|
|
|
__be32 qthresh = nla_get_be32(nfula[NFULA_CFG_QTHRESH]);
|
2005-08-10 04:58:39 +02:00
|
|
|
|
2007-12-18 07:39:55 +01:00
|
|
|
if (!inst) {
|
|
|
|
ret = -ENODEV;
|
|
|
|
goto out;
|
|
|
|
}
|
2005-08-10 04:58:39 +02:00
|
|
|
nfulnl_set_qthresh(inst, ntohl(qthresh));
|
|
|
|
}
|
|
|
|
|
2007-09-28 23:37:03 +02:00
|
|
|
if (nfula[NFULA_CFG_FLAGS]) {
|
2007-12-18 07:41:35 +01:00
|
|
|
__be16 flags = nla_get_be16(nfula[NFULA_CFG_FLAGS]);
|
2007-12-18 07:39:55 +01:00
|
|
|
|
|
|
|
if (!inst) {
|
|
|
|
ret = -ENODEV;
|
|
|
|
goto out;
|
|
|
|
}
|
2006-05-19 11:17:18 +02:00
|
|
|
nfulnl_set_flags(inst, ntohs(flags));
|
2006-03-21 02:15:11 +01:00
|
|
|
}
|
|
|
|
|
2005-08-10 04:58:39 +02:00
|
|
|
out_put:
|
|
|
|
instance_put(inst);
|
2007-03-05 00:59:20 +01:00
|
|
|
out:
|
2005-08-10 04:58:39 +02:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2007-09-28 23:15:45 +02:00
|
|
|
static const struct nfnl_callback nfulnl_cb[NFULNL_MSG_MAX] = {
|
2005-08-10 04:58:39 +02:00
|
|
|
[NFULNL_MSG_PACKET] = { .call = nfulnl_recv_unsupp,
|
2005-11-15 00:24:59 +01:00
|
|
|
.attr_count = NFULA_MAX, },
|
2005-08-10 04:58:39 +02:00
|
|
|
[NFULNL_MSG_CONFIG] = { .call = nfulnl_recv_config,
|
2007-09-28 23:39:09 +02:00
|
|
|
.attr_count = NFULA_CFG_MAX,
|
|
|
|
.policy = nfula_cfg_policy },
|
2005-08-10 04:58:39 +02:00
|
|
|
};
|
|
|
|
|
2007-09-28 23:15:45 +02:00
|
|
|
static const struct nfnetlink_subsystem nfulnl_subsys = {
|
2005-08-10 04:58:39 +02:00
|
|
|
.name = "log",
|
|
|
|
.subsys_id = NFNL_SUBSYS_ULOG,
|
|
|
|
.cb_count = NFULNL_MSG_MAX,
|
|
|
|
.cb = nfulnl_cb,
|
|
|
|
};
|
|
|
|
|
|
|
|
#ifdef CONFIG_PROC_FS
|
|
|
|
struct iter_state {
|
2013-03-25 00:50:45 +01:00
|
|
|
struct seq_net_private p;
|
2005-08-10 04:58:39 +02:00
|
|
|
unsigned int bucket;
|
|
|
|
};
|
|
|
|
|
2013-03-25 00:50:45 +01:00
|
|
|
static struct hlist_node *get_first(struct net *net, struct iter_state *st)
|
2005-08-10 04:58:39 +02:00
|
|
|
{
|
2013-03-25 00:50:45 +01:00
|
|
|
struct nfnl_log_net *log;
|
2005-08-10 04:58:39 +02:00
|
|
|
if (!st)
|
|
|
|
return NULL;
|
|
|
|
|
2013-03-25 00:50:45 +01:00
|
|
|
log = nfnl_log_pernet(net);
|
|
|
|
|
2005-08-10 04:58:39 +02:00
|
|
|
for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
|
2013-03-25 00:50:45 +01:00
|
|
|
struct hlist_head *head = &log->instance_table[st->bucket];
|
|
|
|
|
|
|
|
if (!hlist_empty(head))
|
|
|
|
return rcu_dereference_bh(hlist_first_rcu(head));
|
2005-08-10 04:58:39 +02:00
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2013-03-25 00:50:45 +01:00
|
|
|
static struct hlist_node *get_next(struct net *net, struct iter_state *st,
|
|
|
|
struct hlist_node *h)
|
2005-08-10 04:58:39 +02:00
|
|
|
{
|
2010-11-15 18:17:21 +01:00
|
|
|
h = rcu_dereference_bh(hlist_next_rcu(h));
|
2005-08-10 04:58:39 +02:00
|
|
|
while (!h) {
|
2013-03-25 00:50:45 +01:00
|
|
|
struct nfnl_log_net *log;
|
|
|
|
struct hlist_head *head;
|
|
|
|
|
2005-08-10 04:58:39 +02:00
|
|
|
if (++st->bucket >= INSTANCE_BUCKETS)
|
|
|
|
return NULL;
|
|
|
|
|
2013-03-25 00:50:45 +01:00
|
|
|
log = nfnl_log_pernet(net);
|
|
|
|
head = &log->instance_table[st->bucket];
|
|
|
|
h = rcu_dereference_bh(hlist_first_rcu(head));
|
2005-08-10 04:58:39 +02:00
|
|
|
}
|
|
|
|
return h;
|
|
|
|
}
|
|
|
|
|
2013-03-25 00:50:45 +01:00
|
|
|
static struct hlist_node *get_idx(struct net *net, struct iter_state *st,
|
|
|
|
loff_t pos)
|
2005-08-10 04:58:39 +02:00
|
|
|
{
|
|
|
|
struct hlist_node *head;
|
2013-03-25 00:50:45 +01:00
|
|
|
head = get_first(net, st);
|
2005-08-10 04:58:39 +02:00
|
|
|
|
|
|
|
if (head)
|
2013-03-25 00:50:45 +01:00
|
|
|
while (pos && (head = get_next(net, st, head)))
|
2005-08-10 04:58:39 +02:00
|
|
|
pos--;
|
|
|
|
return pos ? NULL : head;
|
|
|
|
}
|
|
|
|
|
2013-03-25 00:50:45 +01:00
|
|
|
static void *seq_start(struct seq_file *s, loff_t *pos)
|
2010-06-09 18:14:58 +02:00
|
|
|
__acquires(rcu_bh)
|
2005-08-10 04:58:39 +02:00
|
|
|
{
|
2010-06-09 18:14:58 +02:00
|
|
|
rcu_read_lock_bh();
|
2013-03-25 00:50:45 +01:00
|
|
|
return get_idx(seq_file_net(s), s->private, *pos);
|
2005-08-10 04:58:39 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
|
|
|
|
{
|
|
|
|
(*pos)++;
|
2013-03-25 00:50:45 +01:00
|
|
|
return get_next(seq_file_net(s), s->private, v);
|
2005-08-10 04:58:39 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void seq_stop(struct seq_file *s, void *v)
|
2010-06-09 18:14:58 +02:00
|
|
|
__releases(rcu_bh)
|
2005-08-10 04:58:39 +02:00
|
|
|
{
|
2010-06-09 18:14:58 +02:00
|
|
|
rcu_read_unlock_bh();
|
2005-08-10 04:58:39 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static int seq_show(struct seq_file *s, void *v)
|
|
|
|
{
|
|
|
|
const struct nfulnl_instance *inst = v;
|
|
|
|
|
2007-02-12 20:15:49 +01:00
|
|
|
return seq_printf(s, "%5d %6d %5d %1d %5d %6d %2d\n",
|
2005-08-10 04:58:39 +02:00
|
|
|
inst->group_num,
|
2012-09-07 22:12:54 +02:00
|
|
|
inst->peer_portid, inst->qlen,
|
2005-08-10 04:58:39 +02:00
|
|
|
inst->copy_mode, inst->copy_range,
|
|
|
|
inst->flushtimeout, atomic_read(&inst->use));
|
|
|
|
}
|
|
|
|
|
2007-07-11 08:07:31 +02:00
|
|
|
static const struct seq_operations nful_seq_ops = {
|
2005-08-10 04:58:39 +02:00
|
|
|
.start = seq_start,
|
|
|
|
.next = seq_next,
|
|
|
|
.stop = seq_stop,
|
|
|
|
.show = seq_show,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int nful_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
2013-03-25 00:50:45 +01:00
|
|
|
return seq_open_net(inode, file, &nful_seq_ops,
|
|
|
|
sizeof(struct iter_state));
|
2005-08-10 04:58:39 +02:00
|
|
|
}
|
|
|
|
|
2007-02-12 09:55:36 +01:00
|
|
|
static const struct file_operations nful_file_ops = {
|
2005-08-10 04:58:39 +02:00
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.open = nful_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.llseek = seq_lseek,
|
2013-03-25 00:50:45 +01:00
|
|
|
.release = seq_release_net,
|
2005-08-10 04:58:39 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
#endif /* PROC_FS */
|
|
|
|
|
2013-03-25 00:50:45 +01:00
|
|
|
static int __net_init nfnl_log_net_init(struct net *net)
|
2005-08-10 04:58:39 +02:00
|
|
|
{
|
2013-03-25 00:50:45 +01:00
|
|
|
unsigned int i;
|
|
|
|
struct nfnl_log_net *log = nfnl_log_pernet(net);
|
2007-02-12 20:15:49 +01:00
|
|
|
|
2005-08-10 04:58:39 +02:00
|
|
|
for (i = 0; i < INSTANCE_BUCKETS; i++)
|
2013-03-25 00:50:45 +01:00
|
|
|
INIT_HLIST_HEAD(&log->instance_table[i]);
|
|
|
|
spin_lock_init(&log->instances_lock);
|
|
|
|
|
|
|
|
#ifdef CONFIG_PROC_FS
|
|
|
|
if (!proc_create("nfnetlink_log", 0440,
|
|
|
|
net->nf.proc_netfilter, &nful_file_ops))
|
|
|
|
return -ENOMEM;
|
|
|
|
#endif
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __net_exit nfnl_log_net_exit(struct net *net)
|
|
|
|
{
|
2013-04-30 10:01:18 +02:00
|
|
|
#ifdef CONFIG_PROC_FS
|
2013-03-25 00:50:45 +01:00
|
|
|
remove_proc_entry("nfnetlink_log", net->nf.proc_netfilter);
|
2013-04-30 10:01:18 +02:00
|
|
|
#endif
|
2013-12-16 07:59:22 +01:00
|
|
|
nf_log_unset(net, &nfulnl_logger);
|
2013-03-25 00:50:45 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct pernet_operations nfnl_log_net_ops = {
|
|
|
|
.init = nfnl_log_net_init,
|
|
|
|
.exit = nfnl_log_net_exit,
|
|
|
|
.id = &nfnl_log_net_id,
|
|
|
|
.size = sizeof(struct nfnl_log_net),
|
|
|
|
};
|
|
|
|
|
|
|
|
static int __init nfnetlink_log_init(void)
|
|
|
|
{
|
|
|
|
int status = -ENOMEM;
|
2007-02-12 20:15:49 +01:00
|
|
|
|
2005-08-10 04:58:39 +02:00
|
|
|
netlink_register_notifier(&nfulnl_rtnl_notifier);
|
|
|
|
status = nfnetlink_subsys_register(&nfulnl_subsys);
|
|
|
|
if (status < 0) {
|
2014-11-06 12:32:30 +01:00
|
|
|
pr_err("failed to create netlink socket\n");
|
2005-08-10 04:58:39 +02:00
|
|
|
goto cleanup_netlink_notifier;
|
|
|
|
}
|
|
|
|
|
2009-03-16 14:54:21 +01:00
|
|
|
status = nf_log_register(NFPROTO_UNSPEC, &nfulnl_logger);
|
|
|
|
if (status < 0) {
|
2014-11-06 12:32:30 +01:00
|
|
|
pr_err("failed to register logger\n");
|
2009-03-16 14:54:21 +01:00
|
|
|
goto cleanup_subsys;
|
|
|
|
}
|
|
|
|
|
2013-03-25 00:50:45 +01:00
|
|
|
status = register_pernet_subsys(&nfnl_log_net_ops);
|
|
|
|
if (status < 0) {
|
2014-11-06 12:32:30 +01:00
|
|
|
pr_err("failed to register pernet ops\n");
|
2009-03-16 14:54:21 +01:00
|
|
|
goto cleanup_logger;
|
2012-08-29 08:49:17 +02:00
|
|
|
}
|
2005-08-10 04:58:39 +02:00
|
|
|
return status;
|
|
|
|
|
2009-03-16 14:54:21 +01:00
|
|
|
cleanup_logger:
|
|
|
|
nf_log_unregister(&nfulnl_logger);
|
2005-08-10 04:58:39 +02:00
|
|
|
cleanup_subsys:
|
|
|
|
nfnetlink_subsys_unregister(&nfulnl_subsys);
|
|
|
|
cleanup_netlink_notifier:
|
|
|
|
netlink_unregister_notifier(&nfulnl_rtnl_notifier);
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2006-03-29 02:37:06 +02:00
|
|
|
static void __exit nfnetlink_log_fini(void)
|
2005-08-10 04:58:39 +02:00
|
|
|
{
|
2013-03-25 00:50:45 +01:00
|
|
|
unregister_pernet_subsys(&nfnl_log_net_ops);
|
2007-02-12 20:11:55 +01:00
|
|
|
nf_log_unregister(&nfulnl_logger);
|
2006-04-06 23:11:30 +02:00
|
|
|
nfnetlink_subsys_unregister(&nfulnl_subsys);
|
|
|
|
netlink_unregister_notifier(&nfulnl_rtnl_notifier);
|
2005-08-10 04:58:39 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
MODULE_DESCRIPTION("netfilter userspace logging");
|
|
|
|
MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
|
|
|
|
MODULE_LICENSE("GPL");
|
2005-08-10 05:20:34 +02:00
|
|
|
MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_ULOG);
|
2014-06-18 19:38:25 +02:00
|
|
|
MODULE_ALIAS_NF_LOGGER(AF_INET, 1);
|
|
|
|
MODULE_ALIAS_NF_LOGGER(AF_INET6, 1);
|
|
|
|
MODULE_ALIAS_NF_LOGGER(AF_BRIDGE, 1);
|
2005-08-10 04:58:39 +02:00
|
|
|
|
2006-03-29 02:37:06 +02:00
|
|
|
module_init(nfnetlink_log_init);
|
|
|
|
module_exit(nfnetlink_log_fini);
|