Merge ra.kernel.org:/pub/scm/linux/kernel/git/bpf/bpf

Daniel Borkmann says:

====================
pull-request: bpf 2019-01-08

The following pull-request contains BPF updates for your *net* tree.

The main changes are:

1) Fix BSD'ism in sendmsg(2) to rewrite unspecified IPv6 dst for
   unconnected UDP sockets with [::1] _after_ cgroup BPF invocation,
   from Andrey.

2) Follow-up fix to the speculation fix where we need to reject a
   corner case for sanitation when ptr and scalars are mixed in the
   same alu op. Also, some unrelated minor doc fixes, from Daniel.

3) Fix BPF kselftest's incorrect uses of create_and_get_cgroup()
   by not assuming fd of zero value to be the result of an error
   case, from Stanislav.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2019-01-07 22:49:35 -05:00
commit 977e4899c9
18 changed files with 262 additions and 46 deletions

View File

@ -157,12 +157,11 @@ Q: Does BPF have a stable ABI?
------------------------------
A: YES. BPF instructions, arguments to BPF programs, set of helper
functions and their arguments, recognized return codes are all part
of ABI. However when tracing programs are using bpf_probe_read() helper
to walk kernel internal datastructures and compile with kernel
internal headers these accesses can and will break with newer
kernels. The union bpf_attr -> kern_version is checked at load time
to prevent accidentally loading kprobe-based bpf programs written
for a different kernel. Networking programs don't do kern_version check.
of ABI. However there is one specific exception to tracing programs
which are using helpers like bpf_probe_read() to walk kernel internal
data structures and compile with kernel internal headers. Both of these
kernel internals are subject to change and can break with newer kernels
such that the program needs to be adapted accordingly.
Q: How much stack space a BPF program uses?
-------------------------------------------

View File

@ -172,6 +172,7 @@ struct bpf_verifier_state_list {
#define BPF_ALU_SANITIZE_SRC 1U
#define BPF_ALU_SANITIZE_DST 2U
#define BPF_ALU_NEG_VALUE (1U << 2)
#define BPF_ALU_NON_POINTER (1U << 3)
#define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \
BPF_ALU_SANITIZE_DST)

View File

@ -3103,6 +3103,40 @@ static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
}
}
static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env,
const struct bpf_insn *insn)
{
return env->allow_ptr_leaks || BPF_SRC(insn->code) == BPF_K;
}
static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux,
u32 alu_state, u32 alu_limit)
{
/* If we arrived here from different branches with different
* state or limits to sanitize, then this won't work.
*/
if (aux->alu_state &&
(aux->alu_state != alu_state ||
aux->alu_limit != alu_limit))
return -EACCES;
/* Corresponding fixup done in fixup_bpf_calls(). */
aux->alu_state = alu_state;
aux->alu_limit = alu_limit;
return 0;
}
static int sanitize_val_alu(struct bpf_verifier_env *env,
struct bpf_insn *insn)
{
struct bpf_insn_aux_data *aux = cur_aux(env);
if (can_skip_alu_sanitation(env, insn))
return 0;
return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0);
}
static int sanitize_ptr_alu(struct bpf_verifier_env *env,
struct bpf_insn *insn,
const struct bpf_reg_state *ptr_reg,
@ -3117,7 +3151,7 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
struct bpf_reg_state tmp;
bool ret;
if (env->allow_ptr_leaks || BPF_SRC(insn->code) == BPF_K)
if (can_skip_alu_sanitation(env, insn))
return 0;
/* We already marked aux for masking from non-speculative
@ -3133,19 +3167,8 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg))
return 0;
/* If we arrived here from different branches with different
* limits to sanitize, then this won't work.
*/
if (aux->alu_state &&
(aux->alu_state != alu_state ||
aux->alu_limit != alu_limit))
if (update_alu_sanitation_state(aux, alu_state, alu_limit))
return -EACCES;
/* Corresponding fixup done in fixup_bpf_calls(). */
aux->alu_state = alu_state;
aux->alu_limit = alu_limit;
do_sim:
/* Simulate and find potential out-of-bounds access under
* speculative execution from truncation as a result of
@ -3418,6 +3441,8 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
s64 smin_val, smax_val;
u64 umin_val, umax_val;
u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
u32 dst = insn->dst_reg;
int ret;
if (insn_bitness == 32) {
/* Relevant for 32-bit RSH: Information can propagate towards
@ -3452,6 +3477,11 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
switch (opcode) {
case BPF_ADD:
ret = sanitize_val_alu(env, insn);
if (ret < 0) {
verbose(env, "R%d tried to add from different pointers or scalars\n", dst);
return ret;
}
if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
signed_add_overflows(dst_reg->smax_value, smax_val)) {
dst_reg->smin_value = S64_MIN;
@ -3471,6 +3501,11 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
break;
case BPF_SUB:
ret = sanitize_val_alu(env, insn);
if (ret < 0) {
verbose(env, "R%d tried to sub from different pointers or scalars\n", dst);
return ret;
}
if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
signed_sub_overflows(dst_reg->smax_value, smin_val)) {
/* Overflow possible, we know nothing */

View File

@ -1390,10 +1390,7 @@ do_udp_sendmsg:
ipc6.opt = opt;
fl6.flowi6_proto = sk->sk_protocol;
if (!ipv6_addr_any(daddr))
fl6.daddr = *daddr;
else
fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
fl6.daddr = *daddr;
if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
fl6.saddr = np->saddr;
fl6.fl6_sport = inet->inet_sport;
@ -1421,6 +1418,9 @@ do_udp_sendmsg:
}
}
if (ipv6_addr_any(&fl6.daddr))
fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
final_p = fl6_update_dst(&fl6, opt, &final);
if (final_p)
connected = false;

View File

@ -77,7 +77,7 @@ static int test_foo_bar(void)
/* Create cgroup /foo, get fd, and join it */
foo = create_and_get_cgroup(FOO);
if (!foo)
if (foo < 0)
goto err;
if (join_cgroup(FOO))
@ -94,7 +94,7 @@ static int test_foo_bar(void)
/* Create cgroup /foo/bar, get fd, and join it */
bar = create_and_get_cgroup(BAR);
if (!bar)
if (bar < 0)
goto err;
if (join_cgroup(BAR))
@ -298,19 +298,19 @@ static int test_multiprog(void)
goto err;
cg1 = create_and_get_cgroup("/cg1");
if (!cg1)
if (cg1 < 0)
goto err;
cg2 = create_and_get_cgroup("/cg1/cg2");
if (!cg2)
if (cg2 < 0)
goto err;
cg3 = create_and_get_cgroup("/cg1/cg2/cg3");
if (!cg3)
if (cg3 < 0)
goto err;
cg4 = create_and_get_cgroup("/cg1/cg2/cg3/cg4");
if (!cg4)
if (cg4 < 0)
goto err;
cg5 = create_and_get_cgroup("/cg1/cg2/cg3/cg4/cg5");
if (!cg5)
if (cg5 < 0)
goto err;
if (join_cgroup("/cg1/cg2/cg3/cg4/cg5"))

View File

@ -32,7 +32,7 @@ int main(int argc, char **argv)
cg2 = create_and_get_cgroup(CGROUP_PATH);
if (!cg2)
if (cg2 < 0)
goto err;
if (bpf_map_update_elem(map_fd[0], &idx, &cg2, BPF_ANY)) {

View File

@ -132,6 +132,20 @@ For example, if current state of ``libbpf.map`` is:
Format of version script and ways to handle ABI changes, including
incompatible ones, described in details in [1].
Stand-alone build
=================
Under https://github.com/libbpf/libbpf there is a (semi-)automated
mirror of the mainline's version of libbpf for a stand-alone build.
However, all changes to libbpf's code base must be upstreamed through
the mainline kernel tree.
License
=======
libbpf is dual-licensed under LGPL 2.1 and BSD 2-Clause.
Links
=====

View File

@ -155,7 +155,7 @@ void cleanup_cgroup_environment(void)
* This function creates a cgroup under the top level workdir and returns the
* file descriptor. It is idempotent.
*
* On success, it returns the file descriptor. On failure it returns 0.
* On success, it returns the file descriptor. On failure it returns -1.
* If there is a failure, it prints the error to stderr.
*/
int create_and_get_cgroup(const char *path)
@ -166,13 +166,13 @@ int create_and_get_cgroup(const char *path)
format_cgroup_path(cgroup_path, path);
if (mkdir(cgroup_path, 0777) && errno != EEXIST) {
log_err("mkdiring cgroup %s .. %s", path, cgroup_path);
return 0;
return -1;
}
fd = open(cgroup_path, O_RDONLY);
if (fd < 0) {
log_err("Opening Cgroup");
return 0;
return -1;
}
return fd;

View File

@ -81,7 +81,7 @@ int main(int argc, char **argv)
/* Create a cgroup, get fd, and join it */
cgroup_fd = create_and_get_cgroup(TEST_CGROUP);
if (!cgroup_fd) {
if (cgroup_fd < 0) {
printf("Failed to create test cgroup\n");
goto err;
}

View File

@ -43,7 +43,7 @@ int main(int argc, char **argv)
/* Create a cgroup, get fd, and join it */
cgroup_fd = create_and_get_cgroup(TEST_CGROUP);
if (!cgroup_fd) {
if (cgroup_fd < 0) {
printf("Failed to create test cgroup\n");
goto err;
}

View File

@ -65,7 +65,7 @@ int main(int argc, char **argv)
/* Create a cgroup, get fd, and join it */
cgroup_fd = create_and_get_cgroup(TEST_CGROUP);
if (!cgroup_fd) {
if (cgroup_fd < 0) {
printf("Failed to create test cgroup\n");
goto err;
}

View File

@ -164,7 +164,7 @@ int main(int argc, char **argv)
goto err;
cgfd = create_and_get_cgroup(CGROUP_PATH);
if (!cgfd)
if (cgfd < 0)
goto err;
if (join_cgroup(CGROUP_PATH))

View File

@ -458,7 +458,7 @@ int main(int argc, char **argv)
goto err;
cgfd = create_and_get_cgroup(CG_PATH);
if (!cgfd)
if (cgfd < 0)
goto err;
if (join_cgroup(CG_PATH))

View File

@ -44,6 +44,7 @@
#define SERV6_V4MAPPED_IP "::ffff:192.168.0.4"
#define SRC6_IP "::1"
#define SRC6_REWRITE_IP "::6"
#define WILDCARD6_IP "::"
#define SERV6_PORT 6060
#define SERV6_REWRITE_PORT 6666
@ -85,12 +86,14 @@ static int bind4_prog_load(const struct sock_addr_test *test);
static int bind6_prog_load(const struct sock_addr_test *test);
static int connect4_prog_load(const struct sock_addr_test *test);
static int connect6_prog_load(const struct sock_addr_test *test);
static int sendmsg_allow_prog_load(const struct sock_addr_test *test);
static int sendmsg_deny_prog_load(const struct sock_addr_test *test);
static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test);
static int sendmsg4_rw_c_prog_load(const struct sock_addr_test *test);
static int sendmsg6_rw_asm_prog_load(const struct sock_addr_test *test);
static int sendmsg6_rw_c_prog_load(const struct sock_addr_test *test);
static int sendmsg6_rw_v4mapped_prog_load(const struct sock_addr_test *test);
static int sendmsg6_rw_wildcard_prog_load(const struct sock_addr_test *test);
static struct sock_addr_test tests[] = {
/* bind */
@ -462,6 +465,34 @@ static struct sock_addr_test tests[] = {
SRC6_REWRITE_IP,
SYSCALL_ENOTSUPP,
},
{
"sendmsg6: set dst IP = [::] (BSD'ism)",
sendmsg6_rw_wildcard_prog_load,
BPF_CGROUP_UDP6_SENDMSG,
BPF_CGROUP_UDP6_SENDMSG,
AF_INET6,
SOCK_DGRAM,
SERV6_IP,
SERV6_PORT,
SERV6_REWRITE_IP,
SERV6_REWRITE_PORT,
SRC6_REWRITE_IP,
SUCCESS,
},
{
"sendmsg6: preserve dst IP = [::] (BSD'ism)",
sendmsg_allow_prog_load,
BPF_CGROUP_UDP6_SENDMSG,
BPF_CGROUP_UDP6_SENDMSG,
AF_INET6,
SOCK_DGRAM,
WILDCARD6_IP,
SERV6_PORT,
SERV6_REWRITE_IP,
SERV6_PORT,
SRC6_IP,
SUCCESS,
},
{
"sendmsg6: deny call",
sendmsg_deny_prog_load,
@ -734,16 +765,27 @@ static int connect6_prog_load(const struct sock_addr_test *test)
return load_path(test, CONNECT6_PROG_PATH);
}
static int sendmsg_deny_prog_load(const struct sock_addr_test *test)
static int sendmsg_ret_only_prog_load(const struct sock_addr_test *test,
int32_t rc)
{
struct bpf_insn insns[] = {
/* return 0 */
BPF_MOV64_IMM(BPF_REG_0, 0),
/* return rc */
BPF_MOV64_IMM(BPF_REG_0, rc),
BPF_EXIT_INSN(),
};
return load_insns(test, insns, sizeof(insns) / sizeof(struct bpf_insn));
}
static int sendmsg_allow_prog_load(const struct sock_addr_test *test)
{
return sendmsg_ret_only_prog_load(test, /*rc*/ 1);
}
static int sendmsg_deny_prog_load(const struct sock_addr_test *test)
{
return sendmsg_ret_only_prog_load(test, /*rc*/ 0);
}
static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test)
{
struct sockaddr_in dst4_rw_addr;
@ -864,6 +906,11 @@ static int sendmsg6_rw_v4mapped_prog_load(const struct sock_addr_test *test)
return sendmsg6_rw_dst_asm_prog_load(test, SERV6_V4MAPPED_IP);
}
static int sendmsg6_rw_wildcard_prog_load(const struct sock_addr_test *test)
{
return sendmsg6_rw_dst_asm_prog_load(test, WILDCARD6_IP);
}
static int sendmsg6_rw_c_prog_load(const struct sock_addr_test *test)
{
return load_path(test, SENDMSG6_PROG_PATH);
@ -1395,7 +1442,7 @@ int main(int argc, char **argv)
goto err;
cgfd = create_and_get_cgroup(CG_PATH);
if (!cgfd)
if (cgfd < 0)
goto err;
if (join_cgroup(CG_PATH))

View File

@ -202,7 +202,7 @@ int main(int argc, char **argv)
goto err;
cgfd = create_and_get_cgroup(CG_PATH);
if (!cgfd)
if (cgfd < 0)
goto err;
if (join_cgroup(CG_PATH))

View File

@ -103,7 +103,7 @@ int main(int argc, char **argv)
goto err;
cg_fd = create_and_get_cgroup(cg_path);
if (!cg_fd)
if (cg_fd < 0)
goto err;
if (join_cgroup(cg_path))

View File

@ -115,7 +115,7 @@ int main(int argc, char **argv)
goto err;
cg_fd = create_and_get_cgroup(cg_path);
if (!cg_fd)
if (cg_fd < 0)
goto err;
if (join_cgroup(cg_path))

View File

@ -6933,6 +6933,126 @@ static struct bpf_test tests[] = {
.result = ACCEPT,
.retval = 1,
},
{
"map access: mixing value pointer and scalar, 1",
.insns = {
// load map value pointer into r0 and r2
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16),
BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
// load some number from the map into r1
BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
// depending on r1, branch:
BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 3),
// branch A
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_JMP_A(2),
// branch B
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_3, 0x100000),
// common instruction
BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
// depending on r1, branch:
BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
// branch A
BPF_JMP_A(4),
// branch B
BPF_MOV64_IMM(BPF_REG_0, 0x13371337),
// verifier follows fall-through
BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0x100000, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
// fake-dead code; targeted from branch A to
// prevent dead code sanitization
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_array_48b = { 1 },
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "R2 tried to add from different pointers or scalars",
.retval = 0,
},
{
"map access: mixing value pointer and scalar, 2",
.insns = {
// load map value pointer into r0 and r2
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16),
BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
// load some number from the map into r1
BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
// depending on r1, branch:
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
// branch A
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_3, 0x100000),
BPF_JMP_A(2),
// branch B
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_3, 0),
// common instruction
BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
// depending on r1, branch:
BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
// branch A
BPF_JMP_A(4),
// branch B
BPF_MOV64_IMM(BPF_REG_0, 0x13371337),
// verifier follows fall-through
BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0x100000, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
// fake-dead code; targeted from branch A to
// prevent dead code sanitization
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_array_48b = { 1 },
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "R2 tried to add from different maps or paths",
.retval = 0,
},
{
"sanitation: alu with different scalars",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16),
BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_3, 0x100000),
BPF_JMP_A(2),
BPF_MOV64_IMM(BPF_REG_2, 42),
BPF_MOV64_IMM(BPF_REG_3, 0x100001),
BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_EXIT_INSN(),
},
.fixup_map_array_48b = { 1 },
.result = ACCEPT,
.retval = 0x100000,
},
{
"map access: value_ptr += known scalar, upper oob arith, test 1",
.insns = {