Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Daniel Borkmann says:

====================
pull-request: bpf 2018-07-13

The following pull-request contains BPF updates for your *net* tree.

The main changes are:

1) Fix AF_XDP TX error reporting before final kernel release such that it
   becomes consistent between copy mode and zero-copy, from Magnus.

2) Fix three different syzkaller reported issues: oob due to ld_abs
   rewrite with too large offset, another oob in l3 based skb test run
   and a bug leaving mangled prog in subprog JITing error path, from Daniel.

3) Fix BTF handling for bitfield extraction on big endian, from Okash.

4) Fix a missing linux/errno.h include in cgroup/BPF found by kbuild bot,
   from Roman.

5) Fix xdp2skb_meta.sh sample by using just command names instead of
   absolute paths for tc and ip and allow them to be redefined, from Taeung.

6) Fix availability probing for BPF seg6 helpers before final kernel ships
   so they can be detected at prog load time, from Mathieu.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2018-07-13 14:31:47 -07:00
commit c849eb0d1e
8 changed files with 89 additions and 60 deletions

View File

@ -2,6 +2,7 @@
#ifndef _BPF_CGROUP_H
#define _BPF_CGROUP_H
#include <linux/errno.h>
#include <linux/jump_label.h>
#include <uapi/linux/bpf.h>

View File

@ -991,16 +991,13 @@ static void btf_int_bits_seq_show(const struct btf *btf,
void *data, u8 bits_offset,
struct seq_file *m)
{
u16 left_shift_bits, right_shift_bits;
u32 int_data = btf_type_int(t);
u16 nr_bits = BTF_INT_BITS(int_data);
u16 total_bits_offset;
u16 nr_copy_bytes;
u16 nr_copy_bits;
u8 nr_upper_bits;
union {
u64 u64_num;
u8 u8_nums[8];
} print_num;
u64 print_num;
total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data);
data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
@ -1008,21 +1005,20 @@ static void btf_int_bits_seq_show(const struct btf *btf,
nr_copy_bits = nr_bits + bits_offset;
nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits);
print_num.u64_num = 0;
memcpy(&print_num.u64_num, data, nr_copy_bytes);
print_num = 0;
memcpy(&print_num, data, nr_copy_bytes);
/* Ditch the higher order bits */
nr_upper_bits = BITS_PER_BYTE_MASKED(nr_copy_bits);
if (nr_upper_bits) {
/* We need to mask out some bits of the upper byte. */
u8 mask = (1 << nr_upper_bits) - 1;
#ifdef __BIG_ENDIAN_BITFIELD
left_shift_bits = bits_offset;
#else
left_shift_bits = BITS_PER_U64 - nr_copy_bits;
#endif
right_shift_bits = BITS_PER_U64 - nr_bits;
print_num.u8_nums[nr_copy_bytes - 1] &= mask;
}
print_num <<= left_shift_bits;
print_num >>= right_shift_bits;
print_num.u64_num >>= bits_offset;
seq_printf(m, "0x%llx", print_num.u64_num);
seq_printf(m, "0x%llx", print_num);
}
static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t,

View File

@ -5430,6 +5430,10 @@ static int jit_subprogs(struct bpf_verifier_env *env)
if (insn->code != (BPF_JMP | BPF_CALL) ||
insn->src_reg != BPF_PSEUDO_CALL)
continue;
/* Upon error here we cannot fall back to interpreter but
* need a hard reject of the program. Thus -EFAULT is
* propagated in any case.
*/
subprog = find_subprog(env, i + insn->imm + 1);
if (subprog < 0) {
WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
@ -5450,7 +5454,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL);
if (!func)
return -ENOMEM;
goto out_undo_insn;
for (i = 0; i < env->subprog_cnt; i++) {
subprog_start = subprog_end;
@ -5515,7 +5519,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
tmp = bpf_int_jit_compile(func[i]);
if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) {
verbose(env, "JIT doesn't support bpf-to-bpf calls\n");
err = -EFAULT;
err = -ENOTSUPP;
goto out_free;
}
cond_resched();
@ -5552,6 +5556,7 @@ out_free:
if (func[i])
bpf_jit_free(func[i]);
kfree(func);
out_undo_insn:
/* cleanup main prog to be interpreted */
prog->jit_requested = 0;
for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
@ -5578,6 +5583,8 @@ static int fixup_call_args(struct bpf_verifier_env *env)
err = jit_subprogs(env);
if (err == 0)
return 0;
if (err == -EFAULT)
return err;
}
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
for (i = 0; i < prog->len; i++, insn++) {

View File

@ -96,6 +96,7 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
u32 size = kattr->test.data_size_in;
u32 repeat = kattr->test.repeat;
u32 retval, duration;
int hh_len = ETH_HLEN;
struct sk_buff *skb;
void *data;
int ret;
@ -131,12 +132,22 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
skb_reset_network_header(skb);
if (is_l2)
__skb_push(skb, ETH_HLEN);
__skb_push(skb, hh_len);
if (is_direct_pkt_access)
bpf_compute_data_pointers(skb);
retval = bpf_test_run(prog, skb, repeat, &duration);
if (!is_l2)
__skb_push(skb, ETH_HLEN);
if (!is_l2) {
if (skb_headroom(skb) < hh_len) {
int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
kfree_skb(skb);
return -ENOMEM;
}
}
memset(__skb_push(skb, hh_len), 0, hh_len);
}
size = skb->len;
/* bpf program can never convert linear skb to non-linear */
if (WARN_ON_ONCE(skb_is_nonlinear(skb)))

View File

@ -459,11 +459,21 @@ static bool convert_bpf_ld_abs(struct sock_filter *fp, struct bpf_insn **insnp)
(!unaligned_ok && offset >= 0 &&
offset + ip_align >= 0 &&
offset + ip_align % size == 0))) {
bool ldx_off_ok = offset <= S16_MAX;
*insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_H);
*insn++ = BPF_ALU64_IMM(BPF_SUB, BPF_REG_TMP, offset);
*insn++ = BPF_JMP_IMM(BPF_JSLT, BPF_REG_TMP, size, 2 + endian);
*insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A, BPF_REG_D,
offset);
*insn++ = BPF_JMP_IMM(BPF_JSLT, BPF_REG_TMP,
size, 2 + endian + (!ldx_off_ok * 2));
if (ldx_off_ok) {
*insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A,
BPF_REG_D, offset);
} else {
*insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_D);
*insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_TMP, offset);
*insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A,
BPF_REG_TMP, 0);
}
if (endian)
*insn++ = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, size * 8);
*insn++ = BPF_JMP_A(8);
@ -4526,10 +4536,10 @@ static const struct bpf_func_proto bpf_lwt_push_encap_proto = {
.arg4_type = ARG_CONST_SIZE
};
#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
BPF_CALL_4(bpf_lwt_seg6_store_bytes, struct sk_buff *, skb, u32, offset,
const void *, from, u32, len)
{
#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
struct seg6_bpf_srh_state *srh_state =
this_cpu_ptr(&seg6_bpf_srh_states);
void *srh_tlvs, *srh_end, *ptr;
@ -4555,9 +4565,6 @@ BPF_CALL_4(bpf_lwt_seg6_store_bytes, struct sk_buff *, skb, u32, offset,
memcpy(skb->data + offset, from, len);
return 0;
#else /* CONFIG_IPV6_SEG6_BPF */
return -EOPNOTSUPP;
#endif
}
static const struct bpf_func_proto bpf_lwt_seg6_store_bytes_proto = {
@ -4573,7 +4580,6 @@ static const struct bpf_func_proto bpf_lwt_seg6_store_bytes_proto = {
BPF_CALL_4(bpf_lwt_seg6_action, struct sk_buff *, skb,
u32, action, void *, param, u32, param_len)
{
#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
struct seg6_bpf_srh_state *srh_state =
this_cpu_ptr(&seg6_bpf_srh_states);
struct ipv6_sr_hdr *srh;
@ -4621,9 +4627,6 @@ BPF_CALL_4(bpf_lwt_seg6_action, struct sk_buff *, skb,
default:
return -EINVAL;
}
#else /* CONFIG_IPV6_SEG6_BPF */
return -EOPNOTSUPP;
#endif
}
static const struct bpf_func_proto bpf_lwt_seg6_action_proto = {
@ -4639,7 +4642,6 @@ static const struct bpf_func_proto bpf_lwt_seg6_action_proto = {
BPF_CALL_3(bpf_lwt_seg6_adjust_srh, struct sk_buff *, skb, u32, offset,
s32, len)
{
#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
struct seg6_bpf_srh_state *srh_state =
this_cpu_ptr(&seg6_bpf_srh_states);
void *srh_end, *srh_tlvs, *ptr;
@ -4683,9 +4685,6 @@ BPF_CALL_3(bpf_lwt_seg6_adjust_srh, struct sk_buff *, skb, u32, offset,
srh_state->hdrlen += len;
srh_state->valid = 0;
return 0;
#else /* CONFIG_IPV6_SEG6_BPF */
return -EOPNOTSUPP;
#endif
}
static const struct bpf_func_proto bpf_lwt_seg6_adjust_srh_proto = {
@ -4696,6 +4695,7 @@ static const struct bpf_func_proto bpf_lwt_seg6_adjust_srh_proto = {
.arg2_type = ARG_ANYTHING,
.arg3_type = ARG_ANYTHING,
};
#endif /* CONFIG_IPV6_SEG6_BPF */
bool bpf_helper_changes_pkt_data(void *func)
{
@ -4717,11 +4717,12 @@ bool bpf_helper_changes_pkt_data(void *func)
func == bpf_xdp_adjust_meta ||
func == bpf_msg_pull_data ||
func == bpf_xdp_adjust_tail ||
func == bpf_lwt_push_encap ||
#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
func == bpf_lwt_seg6_store_bytes ||
func == bpf_lwt_seg6_adjust_srh ||
func == bpf_lwt_seg6_action
)
func == bpf_lwt_seg6_action ||
#endif
func == bpf_lwt_push_encap)
return true;
return false;
@ -5056,12 +5057,14 @@ static const struct bpf_func_proto *
lwt_seg6local_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
switch (func_id) {
#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
case BPF_FUNC_lwt_seg6_store_bytes:
return &bpf_lwt_seg6_store_bytes_proto;
case BPF_FUNC_lwt_seg6_action:
return &bpf_lwt_seg6_action_proto;
case BPF_FUNC_lwt_seg6_adjust_srh:
return &bpf_lwt_seg6_adjust_srh_proto;
#endif
default:
return lwt_out_func_proto(func_id, prog);
}

View File

@ -218,9 +218,6 @@ static int xsk_generic_xmit(struct sock *sk, struct msghdr *m,
struct sk_buff *skb;
int err = 0;
if (unlikely(!xs->tx))
return -ENOBUFS;
mutex_lock(&xs->mutex);
while (xskq_peek_desc(xs->tx, &desc)) {
@ -233,22 +230,13 @@ static int xsk_generic_xmit(struct sock *sk, struct msghdr *m,
goto out;
}
if (xskq_reserve_addr(xs->umem->cq)) {
err = -EAGAIN;
if (xskq_reserve_addr(xs->umem->cq))
goto out;
if (xs->queue_id >= xs->dev->real_num_tx_queues)
goto out;
}
len = desc.len;
if (unlikely(len > xs->dev->mtu)) {
err = -EMSGSIZE;
goto out;
}
if (xs->queue_id >= xs->dev->real_num_tx_queues) {
err = -ENXIO;
goto out;
}
skb = sock_alloc_send_skb(sk, len, 1, &err);
if (unlikely(!skb)) {
err = -EAGAIN;
@ -300,6 +288,8 @@ static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
return -ENXIO;
if (unlikely(!(xs->dev->flags & IFF_UP)))
return -ENETDOWN;
if (unlikely(!xs->tx))
return -ENOBUFS;
if (need_wait)
return -EOPNOTSUPP;

View File

@ -16,8 +16,8 @@
BPF_FILE=xdp2skb_meta_kern.o
DIR=$(dirname $0)
export TC=/usr/sbin/tc
export IP=/usr/sbin/ip
[ -z "$TC" ] && TC=tc
[ -z "$IP" ] && IP=ip
function usage() {
echo ""
@ -53,7 +53,7 @@ function _call_cmd() {
local allow_fail="$2"
shift 2
if [[ -n "$VERBOSE" ]]; then
echo "$(basename $cmd) $@"
echo "$cmd $@"
fi
if [[ -n "$DRYRUN" ]]; then
return

View File

@ -4974,6 +4974,24 @@ static struct bpf_test tests[] = {
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_LWT_XMIT,
},
{
"make headroom for LWT_XMIT",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_2, 34),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
/* split for s390 to succeed */
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_2, 42),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_LWT_XMIT,
},
{
"invalid access of tc_classid for LWT_IN",
.insns = {
@ -12554,8 +12572,11 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
}
if (fd_prog >= 0) {
__u8 tmp[TEST_DATA_LEN << 2];
__u32 size_tmp = sizeof(tmp);
err = bpf_prog_test_run(fd_prog, 1, test->data,
sizeof(test->data), NULL, NULL,
sizeof(test->data), tmp, &size_tmp,
&retval, NULL);
if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) {
printf("Unexpected bpf_prog_test_run error\n");