2019-06-04 10:11:33 +02:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2013-01-18 08:17:30 +01:00
|
|
|
/* Xtables module to match packets using a BPF filter.
|
|
|
|
* Copyright 2013 Google Inc.
|
|
|
|
* Written by Willem de Bruijn <willemb@google.com>
|
|
|
|
*/
|
|
|
|
|
2018-02-09 15:52:07 +01:00
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
|
2013-01-18 08:17:30 +01:00
|
|
|
#include <linux/module.h>
|
netfilter: xt_bpf: Fix XT_BPF_MODE_FD_PINNED mode of 'xt_bpf_info_v1'
Commit 2c16d6033264 ("netfilter: xt_bpf: support ebpf") introduced
support for attaching an eBPF object by an fd, with the
'bpf_mt_check_v1' ABI expecting the '.fd' to be specified upon each
IPT_SO_SET_REPLACE call.
However this breaks subsequent iptables calls:
# iptables -A INPUT -m bpf --object-pinned /sys/fs/bpf/xxx -j ACCEPT
# iptables -A INPUT -s 5.6.7.8 -j ACCEPT
iptables: Invalid argument. Run `dmesg' for more information.
That's because iptables works by loading existing rules using
IPT_SO_GET_ENTRIES to userspace, then issuing IPT_SO_SET_REPLACE with
the replacement set.
However, the loaded 'xt_bpf_info_v1' has an arbitrary '.fd' number
(from the initial "iptables -m bpf" invocation) - so when 2nd invocation
occurs, userspace passes a bogus fd number, which leads to
'bpf_mt_check_v1' to fail.
One suggested solution [1] was to hack iptables userspace, to perform a
"entries fixup" immediatley after IPT_SO_GET_ENTRIES, by opening a new,
process-local fd per every 'xt_bpf_info_v1' entry seen.
However, in [2] both Pablo Neira Ayuso and Willem de Bruijn suggested to
depricate the xt_bpf_info_v1 ABI dealing with pinned ebpf objects.
This fix changes the XT_BPF_MODE_FD_PINNED behavior to ignore the given
'.fd' and instead perform an in-kernel lookup for the bpf object given
the provided '.path'.
It also defines an alias for the XT_BPF_MODE_FD_PINNED mode, named
XT_BPF_MODE_PATH_PINNED, to better reflect the fact that the user is
expected to provide the path of the pinned object.
Existing XT_BPF_MODE_FD_ELF behavior (non-pinned fd mode) is preserved.
References: [1] https://marc.info/?l=netfilter-devel&m=150564724607440&w=2
[2] https://marc.info/?l=netfilter-devel&m=150575727129880&w=2
Reported-by: Rafael Buchbinder <rafi@rbk.ms>
Signed-off-by: Shmulik Ladkani <shmulik.ladkani@gmail.com>
Acked-by: Willem de Bruijn <willemb@google.com>
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
2017-10-09 14:27:15 +02:00
|
|
|
#include <linux/syscalls.h>
|
2013-01-18 08:17:30 +01:00
|
|
|
#include <linux/skbuff.h>
|
|
|
|
#include <linux/filter.h>
|
2016-12-06 22:25:02 +01:00
|
|
|
#include <linux/bpf.h>
|
2013-01-18 08:17:30 +01:00
|
|
|
|
|
|
|
#include <linux/netfilter/xt_bpf.h>
|
|
|
|
#include <linux/netfilter/x_tables.h>
|
|
|
|
|
|
|
|
MODULE_AUTHOR("Willem de Bruijn <willemb@google.com>");
|
|
|
|
MODULE_DESCRIPTION("Xtables: BPF filter match");
|
|
|
|
MODULE_LICENSE("GPL");
|
|
|
|
MODULE_ALIAS("ipt_bpf");
|
|
|
|
MODULE_ALIAS("ip6t_bpf");
|
|
|
|
|
2016-12-06 22:25:02 +01:00
|
|
|
static int __bpf_mt_check_bytecode(struct sock_filter *insns, __u16 len,
|
|
|
|
struct bpf_prog **ret)
|
2013-01-18 08:17:30 +01:00
|
|
|
{
|
2014-05-23 18:43:58 +02:00
|
|
|
struct sock_fprog_kern program;
|
2013-01-18 08:17:30 +01:00
|
|
|
|
2017-12-01 01:46:07 +01:00
|
|
|
if (len > XT_BPF_MAX_NUM_INSTR)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2016-12-06 22:25:02 +01:00
|
|
|
program.len = len;
|
|
|
|
program.filter = insns;
|
2014-05-23 18:43:58 +02:00
|
|
|
|
2016-12-06 22:25:02 +01:00
|
|
|
if (bpf_prog_create(ret, &program)) {
|
2018-02-09 15:52:07 +01:00
|
|
|
pr_info_ratelimited("check failed: parse error\n");
|
2013-01-18 08:17:30 +01:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-12-06 22:25:02 +01:00
|
|
|
static int __bpf_mt_check_fd(int fd, struct bpf_prog **ret)
|
|
|
|
{
|
|
|
|
struct bpf_prog *prog;
|
|
|
|
|
|
|
|
prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
|
|
|
|
if (IS_ERR(prog))
|
|
|
|
return PTR_ERR(prog);
|
|
|
|
|
|
|
|
*ret = prog;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
netfilter: xt_bpf: Fix XT_BPF_MODE_FD_PINNED mode of 'xt_bpf_info_v1'
Commit 2c16d6033264 ("netfilter: xt_bpf: support ebpf") introduced
support for attaching an eBPF object by an fd, with the
'bpf_mt_check_v1' ABI expecting the '.fd' to be specified upon each
IPT_SO_SET_REPLACE call.
However this breaks subsequent iptables calls:
# iptables -A INPUT -m bpf --object-pinned /sys/fs/bpf/xxx -j ACCEPT
# iptables -A INPUT -s 5.6.7.8 -j ACCEPT
iptables: Invalid argument. Run `dmesg' for more information.
That's because iptables works by loading existing rules using
IPT_SO_GET_ENTRIES to userspace, then issuing IPT_SO_SET_REPLACE with
the replacement set.
However, the loaded 'xt_bpf_info_v1' has an arbitrary '.fd' number
(from the initial "iptables -m bpf" invocation) - so when 2nd invocation
occurs, userspace passes a bogus fd number, which leads to
'bpf_mt_check_v1' to fail.
One suggested solution [1] was to hack iptables userspace, to perform a
"entries fixup" immediatley after IPT_SO_GET_ENTRIES, by opening a new,
process-local fd per every 'xt_bpf_info_v1' entry seen.
However, in [2] both Pablo Neira Ayuso and Willem de Bruijn suggested to
depricate the xt_bpf_info_v1 ABI dealing with pinned ebpf objects.
This fix changes the XT_BPF_MODE_FD_PINNED behavior to ignore the given
'.fd' and instead perform an in-kernel lookup for the bpf object given
the provided '.path'.
It also defines an alias for the XT_BPF_MODE_FD_PINNED mode, named
XT_BPF_MODE_PATH_PINNED, to better reflect the fact that the user is
expected to provide the path of the pinned object.
Existing XT_BPF_MODE_FD_ELF behavior (non-pinned fd mode) is preserved.
References: [1] https://marc.info/?l=netfilter-devel&m=150564724607440&w=2
[2] https://marc.info/?l=netfilter-devel&m=150575727129880&w=2
Reported-by: Rafael Buchbinder <rafi@rbk.ms>
Signed-off-by: Shmulik Ladkani <shmulik.ladkani@gmail.com>
Acked-by: Willem de Bruijn <willemb@google.com>
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
2017-10-09 14:27:15 +02:00
|
|
|
static int __bpf_mt_check_path(const char *path, struct bpf_prog **ret)
|
|
|
|
{
|
2017-12-01 01:46:07 +01:00
|
|
|
if (strnlen(path, XT_BPF_PATH_MAX) == XT_BPF_PATH_MAX)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2017-12-03 02:20:38 +01:00
|
|
|
*ret = bpf_prog_get_type_path(path, BPF_PROG_TYPE_SOCKET_FILTER);
|
|
|
|
return PTR_ERR_OR_ZERO(*ret);
|
netfilter: xt_bpf: Fix XT_BPF_MODE_FD_PINNED mode of 'xt_bpf_info_v1'
Commit 2c16d6033264 ("netfilter: xt_bpf: support ebpf") introduced
support for attaching an eBPF object by an fd, with the
'bpf_mt_check_v1' ABI expecting the '.fd' to be specified upon each
IPT_SO_SET_REPLACE call.
However this breaks subsequent iptables calls:
# iptables -A INPUT -m bpf --object-pinned /sys/fs/bpf/xxx -j ACCEPT
# iptables -A INPUT -s 5.6.7.8 -j ACCEPT
iptables: Invalid argument. Run `dmesg' for more information.
That's because iptables works by loading existing rules using
IPT_SO_GET_ENTRIES to userspace, then issuing IPT_SO_SET_REPLACE with
the replacement set.
However, the loaded 'xt_bpf_info_v1' has an arbitrary '.fd' number
(from the initial "iptables -m bpf" invocation) - so when 2nd invocation
occurs, userspace passes a bogus fd number, which leads to
'bpf_mt_check_v1' to fail.
One suggested solution [1] was to hack iptables userspace, to perform a
"entries fixup" immediatley after IPT_SO_GET_ENTRIES, by opening a new,
process-local fd per every 'xt_bpf_info_v1' entry seen.
However, in [2] both Pablo Neira Ayuso and Willem de Bruijn suggested to
depricate the xt_bpf_info_v1 ABI dealing with pinned ebpf objects.
This fix changes the XT_BPF_MODE_FD_PINNED behavior to ignore the given
'.fd' and instead perform an in-kernel lookup for the bpf object given
the provided '.path'.
It also defines an alias for the XT_BPF_MODE_FD_PINNED mode, named
XT_BPF_MODE_PATH_PINNED, to better reflect the fact that the user is
expected to provide the path of the pinned object.
Existing XT_BPF_MODE_FD_ELF behavior (non-pinned fd mode) is preserved.
References: [1] https://marc.info/?l=netfilter-devel&m=150564724607440&w=2
[2] https://marc.info/?l=netfilter-devel&m=150575727129880&w=2
Reported-by: Rafael Buchbinder <rafi@rbk.ms>
Signed-off-by: Shmulik Ladkani <shmulik.ladkani@gmail.com>
Acked-by: Willem de Bruijn <willemb@google.com>
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
2017-10-09 14:27:15 +02:00
|
|
|
}
|
|
|
|
|
2016-12-06 22:25:02 +01:00
|
|
|
static int bpf_mt_check(const struct xt_mtchk_param *par)
|
|
|
|
{
|
|
|
|
struct xt_bpf_info *info = par->matchinfo;
|
|
|
|
|
|
|
|
return __bpf_mt_check_bytecode(info->bpf_program,
|
|
|
|
info->bpf_program_num_elem,
|
|
|
|
&info->filter);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int bpf_mt_check_v1(const struct xt_mtchk_param *par)
|
|
|
|
{
|
|
|
|
struct xt_bpf_info_v1 *info = par->matchinfo;
|
|
|
|
|
|
|
|
if (info->mode == XT_BPF_MODE_BYTECODE)
|
|
|
|
return __bpf_mt_check_bytecode(info->bpf_program,
|
|
|
|
info->bpf_program_num_elem,
|
|
|
|
&info->filter);
|
netfilter: xt_bpf: Fix XT_BPF_MODE_FD_PINNED mode of 'xt_bpf_info_v1'
Commit 2c16d6033264 ("netfilter: xt_bpf: support ebpf") introduced
support for attaching an eBPF object by an fd, with the
'bpf_mt_check_v1' ABI expecting the '.fd' to be specified upon each
IPT_SO_SET_REPLACE call.
However this breaks subsequent iptables calls:
# iptables -A INPUT -m bpf --object-pinned /sys/fs/bpf/xxx -j ACCEPT
# iptables -A INPUT -s 5.6.7.8 -j ACCEPT
iptables: Invalid argument. Run `dmesg' for more information.
That's because iptables works by loading existing rules using
IPT_SO_GET_ENTRIES to userspace, then issuing IPT_SO_SET_REPLACE with
the replacement set.
However, the loaded 'xt_bpf_info_v1' has an arbitrary '.fd' number
(from the initial "iptables -m bpf" invocation) - so when 2nd invocation
occurs, userspace passes a bogus fd number, which leads to
'bpf_mt_check_v1' to fail.
One suggested solution [1] was to hack iptables userspace, to perform a
"entries fixup" immediatley after IPT_SO_GET_ENTRIES, by opening a new,
process-local fd per every 'xt_bpf_info_v1' entry seen.
However, in [2] both Pablo Neira Ayuso and Willem de Bruijn suggested to
depricate the xt_bpf_info_v1 ABI dealing with pinned ebpf objects.
This fix changes the XT_BPF_MODE_FD_PINNED behavior to ignore the given
'.fd' and instead perform an in-kernel lookup for the bpf object given
the provided '.path'.
It also defines an alias for the XT_BPF_MODE_FD_PINNED mode, named
XT_BPF_MODE_PATH_PINNED, to better reflect the fact that the user is
expected to provide the path of the pinned object.
Existing XT_BPF_MODE_FD_ELF behavior (non-pinned fd mode) is preserved.
References: [1] https://marc.info/?l=netfilter-devel&m=150564724607440&w=2
[2] https://marc.info/?l=netfilter-devel&m=150575727129880&w=2
Reported-by: Rafael Buchbinder <rafi@rbk.ms>
Signed-off-by: Shmulik Ladkani <shmulik.ladkani@gmail.com>
Acked-by: Willem de Bruijn <willemb@google.com>
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
2017-10-09 14:27:15 +02:00
|
|
|
else if (info->mode == XT_BPF_MODE_FD_ELF)
|
2016-12-06 22:25:02 +01:00
|
|
|
return __bpf_mt_check_fd(info->fd, &info->filter);
|
netfilter: xt_bpf: Fix XT_BPF_MODE_FD_PINNED mode of 'xt_bpf_info_v1'
Commit 2c16d6033264 ("netfilter: xt_bpf: support ebpf") introduced
support for attaching an eBPF object by an fd, with the
'bpf_mt_check_v1' ABI expecting the '.fd' to be specified upon each
IPT_SO_SET_REPLACE call.
However this breaks subsequent iptables calls:
# iptables -A INPUT -m bpf --object-pinned /sys/fs/bpf/xxx -j ACCEPT
# iptables -A INPUT -s 5.6.7.8 -j ACCEPT
iptables: Invalid argument. Run `dmesg' for more information.
That's because iptables works by loading existing rules using
IPT_SO_GET_ENTRIES to userspace, then issuing IPT_SO_SET_REPLACE with
the replacement set.
However, the loaded 'xt_bpf_info_v1' has an arbitrary '.fd' number
(from the initial "iptables -m bpf" invocation) - so when 2nd invocation
occurs, userspace passes a bogus fd number, which leads to
'bpf_mt_check_v1' to fail.
One suggested solution [1] was to hack iptables userspace, to perform a
"entries fixup" immediatley after IPT_SO_GET_ENTRIES, by opening a new,
process-local fd per every 'xt_bpf_info_v1' entry seen.
However, in [2] both Pablo Neira Ayuso and Willem de Bruijn suggested to
depricate the xt_bpf_info_v1 ABI dealing with pinned ebpf objects.
This fix changes the XT_BPF_MODE_FD_PINNED behavior to ignore the given
'.fd' and instead perform an in-kernel lookup for the bpf object given
the provided '.path'.
It also defines an alias for the XT_BPF_MODE_FD_PINNED mode, named
XT_BPF_MODE_PATH_PINNED, to better reflect the fact that the user is
expected to provide the path of the pinned object.
Existing XT_BPF_MODE_FD_ELF behavior (non-pinned fd mode) is preserved.
References: [1] https://marc.info/?l=netfilter-devel&m=150564724607440&w=2
[2] https://marc.info/?l=netfilter-devel&m=150575727129880&w=2
Reported-by: Rafael Buchbinder <rafi@rbk.ms>
Signed-off-by: Shmulik Ladkani <shmulik.ladkani@gmail.com>
Acked-by: Willem de Bruijn <willemb@google.com>
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
2017-10-09 14:27:15 +02:00
|
|
|
else if (info->mode == XT_BPF_MODE_PATH_PINNED)
|
|
|
|
return __bpf_mt_check_path(info->path, &info->filter);
|
2016-12-06 22:25:02 +01:00
|
|
|
else
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2013-01-18 08:17:30 +01:00
|
|
|
static bool bpf_mt(const struct sk_buff *skb, struct xt_action_param *par)
|
|
|
|
{
|
|
|
|
const struct xt_bpf_info *info = par->matchinfo;
|
|
|
|
|
net: filter: split 'struct sk_filter' into socket and bpf parts
clean up names related to socket filtering and bpf in the following way:
- everything that deals with sockets keeps 'sk_*' prefix
- everything that is pure BPF is changed to 'bpf_*' prefix
split 'struct sk_filter' into
struct sk_filter {
atomic_t refcnt;
struct rcu_head rcu;
struct bpf_prog *prog;
};
and
struct bpf_prog {
u32 jited:1,
len:31;
struct sock_fprog_kern *orig_prog;
unsigned int (*bpf_func)(const struct sk_buff *skb,
const struct bpf_insn *filter);
union {
struct sock_filter insns[0];
struct bpf_insn insnsi[0];
struct work_struct work;
};
};
so that 'struct bpf_prog' can be used independent of sockets and cleans up
'unattached' bpf use cases
split SK_RUN_FILTER macro into:
SK_RUN_FILTER to be used with 'struct sk_filter *' and
BPF_PROG_RUN to be used with 'struct bpf_prog *'
__sk_filter_release(struct sk_filter *) gains
__bpf_prog_release(struct bpf_prog *) helper function
also perform related renames for the functions that work
with 'struct bpf_prog *', since they're on the same lines:
sk_filter_size -> bpf_prog_size
sk_filter_select_runtime -> bpf_prog_select_runtime
sk_filter_free -> bpf_prog_free
sk_unattached_filter_create -> bpf_prog_create
sk_unattached_filter_destroy -> bpf_prog_destroy
sk_store_orig_filter -> bpf_prog_store_orig_filter
sk_release_orig_filter -> bpf_release_orig_filter
__sk_migrate_filter -> bpf_migrate_filter
__sk_prepare_filter -> bpf_prepare_filter
API for attaching classic BPF to a socket stays the same:
sk_attach_filter(prog, struct sock *)/sk_detach_filter(struct sock *)
and SK_RUN_FILTER(struct sk_filter *, ctx) to execute a program
which is used by sockets, tun, af_packet
API for 'unattached' BPF programs becomes:
bpf_prog_create(struct bpf_prog **)/bpf_prog_destroy(struct bpf_prog *)
and BPF_PROG_RUN(struct bpf_prog *, ctx) to execute a program
which is used by isdn, ppp, team, seccomp, ptp, xt_bpf, cls_bpf, test_bpf
Signed-off-by: Alexei Starovoitov <ast@plumgrid.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-07-31 05:34:16 +02:00
|
|
|
return BPF_PROG_RUN(info->filter, skb);
|
2013-01-18 08:17:30 +01:00
|
|
|
}
|
|
|
|
|
2016-12-06 22:25:02 +01:00
|
|
|
static bool bpf_mt_v1(const struct sk_buff *skb, struct xt_action_param *par)
|
|
|
|
{
|
|
|
|
const struct xt_bpf_info_v1 *info = par->matchinfo;
|
|
|
|
|
|
|
|
return !!bpf_prog_run_save_cb(info->filter, (struct sk_buff *) skb);
|
|
|
|
}
|
|
|
|
|
2013-01-18 08:17:30 +01:00
|
|
|
static void bpf_mt_destroy(const struct xt_mtdtor_param *par)
|
|
|
|
{
|
|
|
|
const struct xt_bpf_info *info = par->matchinfo;
|
2016-12-06 22:25:02 +01:00
|
|
|
|
|
|
|
bpf_prog_destroy(info->filter);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void bpf_mt_destroy_v1(const struct xt_mtdtor_param *par)
|
|
|
|
{
|
|
|
|
const struct xt_bpf_info_v1 *info = par->matchinfo;
|
|
|
|
|
net: filter: split 'struct sk_filter' into socket and bpf parts
clean up names related to socket filtering and bpf in the following way:
- everything that deals with sockets keeps 'sk_*' prefix
- everything that is pure BPF is changed to 'bpf_*' prefix
split 'struct sk_filter' into
struct sk_filter {
atomic_t refcnt;
struct rcu_head rcu;
struct bpf_prog *prog;
};
and
struct bpf_prog {
u32 jited:1,
len:31;
struct sock_fprog_kern *orig_prog;
unsigned int (*bpf_func)(const struct sk_buff *skb,
const struct bpf_insn *filter);
union {
struct sock_filter insns[0];
struct bpf_insn insnsi[0];
struct work_struct work;
};
};
so that 'struct bpf_prog' can be used independent of sockets and cleans up
'unattached' bpf use cases
split SK_RUN_FILTER macro into:
SK_RUN_FILTER to be used with 'struct sk_filter *' and
BPF_PROG_RUN to be used with 'struct bpf_prog *'
__sk_filter_release(struct sk_filter *) gains
__bpf_prog_release(struct bpf_prog *) helper function
also perform related renames for the functions that work
with 'struct bpf_prog *', since they're on the same lines:
sk_filter_size -> bpf_prog_size
sk_filter_select_runtime -> bpf_prog_select_runtime
sk_filter_free -> bpf_prog_free
sk_unattached_filter_create -> bpf_prog_create
sk_unattached_filter_destroy -> bpf_prog_destroy
sk_store_orig_filter -> bpf_prog_store_orig_filter
sk_release_orig_filter -> bpf_release_orig_filter
__sk_migrate_filter -> bpf_migrate_filter
__sk_prepare_filter -> bpf_prepare_filter
API for attaching classic BPF to a socket stays the same:
sk_attach_filter(prog, struct sock *)/sk_detach_filter(struct sock *)
and SK_RUN_FILTER(struct sk_filter *, ctx) to execute a program
which is used by sockets, tun, af_packet
API for 'unattached' BPF programs becomes:
bpf_prog_create(struct bpf_prog **)/bpf_prog_destroy(struct bpf_prog *)
and BPF_PROG_RUN(struct bpf_prog *, ctx) to execute a program
which is used by isdn, ppp, team, seccomp, ptp, xt_bpf, cls_bpf, test_bpf
Signed-off-by: Alexei Starovoitov <ast@plumgrid.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-07-31 05:34:16 +02:00
|
|
|
bpf_prog_destroy(info->filter);
|
2013-01-18 08:17:30 +01:00
|
|
|
}
|
|
|
|
|
2016-12-06 22:25:02 +01:00
|
|
|
static struct xt_match bpf_mt_reg[] __read_mostly = {
|
|
|
|
{
|
|
|
|
.name = "bpf",
|
|
|
|
.revision = 0,
|
|
|
|
.family = NFPROTO_UNSPEC,
|
|
|
|
.checkentry = bpf_mt_check,
|
|
|
|
.match = bpf_mt,
|
|
|
|
.destroy = bpf_mt_destroy,
|
|
|
|
.matchsize = sizeof(struct xt_bpf_info),
|
2017-01-02 23:19:46 +01:00
|
|
|
.usersize = offsetof(struct xt_bpf_info, filter),
|
2016-12-06 22:25:02 +01:00
|
|
|
.me = THIS_MODULE,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.name = "bpf",
|
|
|
|
.revision = 1,
|
|
|
|
.family = NFPROTO_UNSPEC,
|
|
|
|
.checkentry = bpf_mt_check_v1,
|
|
|
|
.match = bpf_mt_v1,
|
|
|
|
.destroy = bpf_mt_destroy_v1,
|
|
|
|
.matchsize = sizeof(struct xt_bpf_info_v1),
|
2017-01-02 23:19:46 +01:00
|
|
|
.usersize = offsetof(struct xt_bpf_info_v1, filter),
|
2016-12-06 22:25:02 +01:00
|
|
|
.me = THIS_MODULE,
|
|
|
|
},
|
2013-01-18 08:17:30 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
static int __init bpf_mt_init(void)
|
|
|
|
{
|
2016-12-06 22:25:02 +01:00
|
|
|
return xt_register_matches(bpf_mt_reg, ARRAY_SIZE(bpf_mt_reg));
|
2013-01-18 08:17:30 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit bpf_mt_exit(void)
|
|
|
|
{
|
2016-12-06 22:25:02 +01:00
|
|
|
xt_unregister_matches(bpf_mt_reg, ARRAY_SIZE(bpf_mt_reg));
|
2013-01-18 08:17:30 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
module_init(bpf_mt_init);
|
|
|
|
module_exit(bpf_mt_exit);
|