From 52e7c083b417417bb4352ebf3a6409988b6af238 Mon Sep 17 00:00:00 2001 From: Quentin Monnet Date: Wed, 26 Feb 2020 17:13:53 +0000 Subject: [PATCH 001/138] mailmap: Update email address My Netronome address is no longer active. I am no maintainer, but get_maintainer.pl sometimes returns my name for a small number of files (BPF-related). Add an entry to .mailmap for good measure. Signed-off-by: Quentin Monnet Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20200226171353.18982-1-quentin@isovalent.com Signed-off-by: Alexei Starovoitov --- .mailmap | 1 + 1 file changed, 1 insertion(+) diff --git a/.mailmap b/.mailmap index ffb8f28290c7..a0dfce8de1ba 100644 --- a/.mailmap +++ b/.mailmap @@ -225,6 +225,7 @@ Pratyush Anand Praveen BP Punit Agrawal Qais Yousef +Quentin Monnet Quentin Perret Rafael J. Wysocki Rajesh Shah From 1bc7896e9ef44fd77858b3ef0b8a6840be3a4494 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Wed, 4 Mar 2020 11:11:04 -0800 Subject: [PATCH 002/138] bpf: Fix deadlock with rq_lock in bpf_send_signal() When experimenting with bpf_send_signal() helper in our production environment (5.2 based), we experienced a deadlock in NMI mode: #5 [ffffc9002219f770] queued_spin_lock_slowpath at ffffffff8110be24 #6 [ffffc9002219f770] _raw_spin_lock_irqsave at ffffffff81a43012 #7 [ffffc9002219f780] try_to_wake_up at ffffffff810e7ecd #8 [ffffc9002219f7e0] signal_wake_up_state at ffffffff810c7b55 #9 [ffffc9002219f7f0] __send_signal at ffffffff810c8602 #10 [ffffc9002219f830] do_send_sig_info at ffffffff810ca31a #11 [ffffc9002219f868] bpf_send_signal at ffffffff8119d227 #12 [ffffc9002219f988] bpf_overflow_handler at ffffffff811d4140 #13 [ffffc9002219f9e0] __perf_event_overflow at ffffffff811d68cf #14 [ffffc9002219fa10] perf_swevent_overflow at ffffffff811d6a09 #15 [ffffc9002219fa38] ___perf_sw_event at ffffffff811e0f47 #16 [ffffc9002219fc30] __schedule at ffffffff81a3e04d #17 [ffffc9002219fc90] schedule at ffffffff81a3e219 #18 [ffffc9002219fca0] futex_wait_queue_me at ffffffff8113d1b9 #19 [ffffc9002219fcd8] futex_wait at ffffffff8113e529 #20 [ffffc9002219fdf0] do_futex at ffffffff8113ffbc #21 [ffffc9002219fec0] __x64_sys_futex at ffffffff81140d1c #22 [ffffc9002219ff38] do_syscall_64 at ffffffff81002602 #23 [ffffc9002219ff50] entry_SYSCALL_64_after_hwframe at ffffffff81c00068 The above call stack is actually very similar to an issue reported by Commit eac9153f2b58 ("bpf/stackmap: Fix deadlock with rq_lock in bpf_get_stack()") by Song Liu. The only difference is bpf_send_signal() helper instead of bpf_get_stack() helper. The above deadlock is triggered with a perf_sw_event. Similar to Commit eac9153f2b58, the below almost identical reproducer used tracepoint point sched/sched_switch so the issue can be easily caught. /* stress_test.c */ #include #include #include #include #include #include #include #define THREAD_COUNT 1000 char *filename; void *worker(void *p) { void *ptr; int fd; char *pptr; fd = open(filename, O_RDONLY); if (fd < 0) return NULL; while (1) { struct timespec ts = {0, 1000 + rand() % 2000}; ptr = mmap(NULL, 4096 * 64, PROT_READ, MAP_PRIVATE, fd, 0); usleep(1); if (ptr == MAP_FAILED) { printf("failed to mmap\n"); break; } munmap(ptr, 4096 * 64); usleep(1); pptr = malloc(1); usleep(1); pptr[0] = 1; usleep(1); free(pptr); usleep(1); nanosleep(&ts, NULL); } close(fd); return NULL; } int main(int argc, char *argv[]) { void *ptr; int i; pthread_t threads[THREAD_COUNT]; if (argc < 2) return 0; filename = argv[1]; for (i = 0; i < THREAD_COUNT; i++) { if (pthread_create(threads + i, NULL, worker, NULL)) { fprintf(stderr, "Error creating thread\n"); return 0; } } for (i = 0; i < THREAD_COUNT; i++) pthread_join(threads[i], NULL); return 0; } and the following command: 1. run `stress_test /bin/ls` in one windown 2. hack bcc trace.py with the following change: --- a/tools/trace.py +++ b/tools/trace.py @@ -513,6 +513,7 @@ BPF_PERF_OUTPUT(%s); __data.tgid = __tgid; __data.pid = __pid; bpf_get_current_comm(&__data.comm, sizeof(__data.comm)); + bpf_send_signal(10); %s %s %s.perf_submit(%s, &__data, sizeof(__data)); 3. in a different window run ./trace.py -p $(pidof stress_test) t:sched:sched_switch The deadlock can be reproduced in our production system. Similar to Song's fix, the fix is to delay sending signal if irqs is disabled to avoid deadlocks involving with rq_lock. With this change, my above stress-test in our production system won't cause deadlock any more. I also implemented a scale-down version of reproducer in the selftest (a subsequent commit). With latest bpf-next, it complains for the following potential deadlock. [ 32.832450] -> #1 (&p->pi_lock){-.-.}: [ 32.833100] _raw_spin_lock_irqsave+0x44/0x80 [ 32.833696] task_rq_lock+0x2c/0xa0 [ 32.834182] task_sched_runtime+0x59/0xd0 [ 32.834721] thread_group_cputime+0x250/0x270 [ 32.835304] thread_group_cputime_adjusted+0x2e/0x70 [ 32.835959] do_task_stat+0x8a7/0xb80 [ 32.836461] proc_single_show+0x51/0xb0 ... [ 32.839512] -> #0 (&(&sighand->siglock)->rlock){....}: [ 32.840275] __lock_acquire+0x1358/0x1a20 [ 32.840826] lock_acquire+0xc7/0x1d0 [ 32.841309] _raw_spin_lock_irqsave+0x44/0x80 [ 32.841916] __lock_task_sighand+0x79/0x160 [ 32.842465] do_send_sig_info+0x35/0x90 [ 32.842977] bpf_send_signal+0xa/0x10 [ 32.843464] bpf_prog_bc13ed9e4d3163e3_send_signal_tp_sched+0x465/0x1000 [ 32.844301] trace_call_bpf+0x115/0x270 [ 32.844809] perf_trace_run_bpf_submit+0x4a/0xc0 [ 32.845411] perf_trace_sched_switch+0x10f/0x180 [ 32.846014] __schedule+0x45d/0x880 [ 32.846483] schedule+0x5f/0xd0 ... [ 32.853148] Chain exists of: [ 32.853148] &(&sighand->siglock)->rlock --> &p->pi_lock --> &rq->lock [ 32.853148] [ 32.854451] Possible unsafe locking scenario: [ 32.854451] [ 32.855173] CPU0 CPU1 [ 32.855745] ---- ---- [ 32.856278] lock(&rq->lock); [ 32.856671] lock(&p->pi_lock); [ 32.857332] lock(&rq->lock); [ 32.857999] lock(&(&sighand->siglock)->rlock); Deadlock happens on CPU0 when it tries to acquire &sighand->siglock but it has been held by CPU1 and CPU1 tries to grab &rq->lock and cannot get it. This is not exactly the callstack in our production environment, but sympotom is similar and both locks are using spin_lock_irqsave() to acquire the lock, and both involves rq_lock. The fix to delay sending signal when irq is disabled also fixed this issue. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Cc: Song Liu Link: https://lore.kernel.org/bpf/20200304191104.2796501-1-yhs@fb.com --- kernel/trace/bpf_trace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 19e793aa441a..68250d433bd7 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -732,7 +732,7 @@ static int bpf_send_signal_common(u32 sig, enum pid_type type) if (unlikely(!nmi_uaccess_okay())) return -EPERM; - if (in_nmi()) { + if (irqs_disabled()) { /* Do an early check on signal validity. Otherwise, * the error is lost in deferred irq_work. */ From c4ef2f3256e3bc008f98121cad39ee5467db07a6 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Wed, 4 Mar 2020 11:11:05 -0800 Subject: [PATCH 003/138] selftests/bpf: Add send_signal_sched_switch test Added one test, send_signal_sched_switch, to test bpf_send_signal() helper triggered by sched/sched_switch tracepoint. This test can be used to verify kernel deadlocks fixed by the previous commit. The test itself is heavily borrowed from Commit eac9153f2b58 ("bpf/stackmap: Fix deadlock with rq_lock in bpf_get_stack()"). Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Cc: Song Liu Link: https://lore.kernel.org/bpf/20200304191105.2796601-1-yhs@fb.com --- .../bpf/prog_tests/send_signal_sched_switch.c | 60 +++++++++++++++++++ .../bpf/progs/test_send_signal_kern.c | 6 ++ 2 files changed, 66 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/send_signal_sched_switch.c diff --git a/tools/testing/selftests/bpf/prog_tests/send_signal_sched_switch.c b/tools/testing/selftests/bpf/prog_tests/send_signal_sched_switch.c new file mode 100644 index 000000000000..189a34a7addb --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/send_signal_sched_switch.c @@ -0,0 +1,60 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include +#include +#include +#include "test_send_signal_kern.skel.h" + +static void sigusr1_handler(int signum) +{ +} + +#define THREAD_COUNT 100 + +static void *worker(void *p) +{ + int i; + + for ( i = 0; i < 1000; i++) + usleep(1); + + return NULL; +} + +void test_send_signal_sched_switch(void) +{ + struct test_send_signal_kern *skel; + pthread_t threads[THREAD_COUNT]; + u32 duration = 0; + int i, err; + + signal(SIGUSR1, sigusr1_handler); + + skel = test_send_signal_kern__open_and_load(); + if (CHECK(!skel, "skel_open_and_load", "skeleton open_and_load failed\n")) + return; + + skel->bss->pid = getpid(); + skel->bss->sig = SIGUSR1; + + err = test_send_signal_kern__attach(skel); + if (CHECK(err, "skel_attach", "skeleton attach failed\n")) + goto destroy_skel; + + for (i = 0; i < THREAD_COUNT; i++) { + err = pthread_create(threads + i, NULL, worker, NULL); + if (CHECK(err, "pthread_create", "Error creating thread, %s\n", + strerror(errno))) + goto destroy_skel; + } + + for (i = 0; i < THREAD_COUNT; i++) + pthread_join(threads[i], NULL); + +destroy_skel: + test_send_signal_kern__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/progs/test_send_signal_kern.c b/tools/testing/selftests/bpf/progs/test_send_signal_kern.c index 1acc91e87bfc..b4233d3efac2 100644 --- a/tools/testing/selftests/bpf/progs/test_send_signal_kern.c +++ b/tools/testing/selftests/bpf/progs/test_send_signal_kern.c @@ -31,6 +31,12 @@ int send_signal_tp(void *ctx) return bpf_send_signal_test(ctx); } +SEC("tracepoint/sched/sched_switch") +int send_signal_tp_sched(void *ctx) +{ + return bpf_send_signal_test(ctx); +} + SEC("perf_event") int send_signal_perf(void *ctx) { From 8e5290e710f4ffe8e9f8813e2ed0397a94d7b2f1 Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Wed, 4 Mar 2020 17:34:47 -0800 Subject: [PATCH 004/138] bpf: Return better error value in delete_elem for struct_ops map The current always succeed behavior in bpf_struct_ops_map_delete_elem() is not ideal for userspace tool. It can be improved to return proper error value. If it is in TOBEFREE, it means unregistration has been already done before but it is in progress and waiting for the subsystem to clear the refcnt to zero, so -EINPROGRESS. If it is INIT, it means the struct_ops has not been registered yet, so -ENOENT. Fixes: 85d33df357b6 ("bpf: Introduce BPF_MAP_TYPE_STRUCT_OPS") Signed-off-by: Martin KaFai Lau Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200305013447.535326-1-kafai@fb.com --- kernel/bpf/bpf_struct_ops.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c index 042f95534f86..68a89a9f7ccd 100644 --- a/kernel/bpf/bpf_struct_ops.c +++ b/kernel/bpf/bpf_struct_ops.c @@ -482,13 +482,21 @@ static int bpf_struct_ops_map_delete_elem(struct bpf_map *map, void *key) prev_state = cmpxchg(&st_map->kvalue.state, BPF_STRUCT_OPS_STATE_INUSE, BPF_STRUCT_OPS_STATE_TOBEFREE); - if (prev_state == BPF_STRUCT_OPS_STATE_INUSE) { + switch (prev_state) { + case BPF_STRUCT_OPS_STATE_INUSE: st_map->st_ops->unreg(&st_map->kvalue.data); if (refcount_dec_and_test(&st_map->kvalue.refcnt)) bpf_map_put(map); + return 0; + case BPF_STRUCT_OPS_STATE_TOBEFREE: + return -EINPROGRESS; + case BPF_STRUCT_OPS_STATE_INIT: + return -ENOENT; + default: + WARN_ON_ONCE(1); + /* Should never happen. Treat it as not found. */ + return -ENOENT; } - - return 0; } static void bpf_struct_ops_map_seq_show_elem(struct bpf_map *map, void *key, From 849b4d94582a966ecb533448415462846da1f0fa Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Wed, 4 Mar 2020 17:34:54 -0800 Subject: [PATCH 005/138] bpf: Do not allow map_freeze in struct_ops map struct_ops map cannot support map_freeze. Otherwise, a struct_ops cannot be unregistered from the subsystem. Fixes: 85d33df357b6 ("bpf: Introduce BPF_MAP_TYPE_STRUCT_OPS") Signed-off-by: Martin KaFai Lau Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200305013454.535397-1-kafai@fb.com --- kernel/bpf/syscall.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index a91ad518c050..0c7fb0d4836d 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -1510,6 +1510,11 @@ static int map_freeze(const union bpf_attr *attr) if (IS_ERR(map)) return PTR_ERR(map); + if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { + fdput(f); + return -ENOTSUPP; + } + mutex_lock(&map->freeze_mutex); if (map->writecnt) { From 692b0399a22530b2de8490bea75a7d20d59391d0 Mon Sep 17 00:00:00 2001 From: Hamdan Igbaria Date: Mon, 24 Feb 2020 14:41:29 +0200 Subject: [PATCH 006/138] net/mlx5: DR, Fix postsend actions write length Fix the send info write length to be (actions x action) size in bytes. Fixes: 297cccebdc5a ("net/mlx5: DR, Expose an internal API to issue RDMA operations") Signed-off-by: Hamdan Igbaria Reviewed-by: Alex Vesker Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c | 1 - drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c | 3 ++- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c index 6dec2a550a10..2d93228ff633 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c @@ -933,7 +933,6 @@ static int dr_actions_l2_rewrite(struct mlx5dr_domain *dmn, action->rewrite.data = (void *)ops; action->rewrite.num_of_actions = i; - action->rewrite.chunk->byte_size = i * sizeof(*ops); ret = mlx5dr_send_postsend_action(dmn, action); if (ret) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c index c7f10d4f8f8d..095ec7b1399d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c @@ -558,7 +558,8 @@ int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn, int ret; send_info.write.addr = (uintptr_t)action->rewrite.data; - send_info.write.length = action->rewrite.chunk->byte_size; + send_info.write.length = action->rewrite.num_of_actions * + DR_MODIFY_ACTION_SIZE; send_info.write.lkey = 0; send_info.remote_addr = action->rewrite.chunk->mr_addr; send_info.rkey = action->rewrite.chunk->rkey; From 56917766def72f5afdf4235adb91b6897ff26d9d Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Thu, 20 Feb 2020 13:40:24 +0200 Subject: [PATCH 007/138] net/mlx5e: kTLS, Fix TCP seq off-by-1 issue in TX resync flow We have an off-by-1 issue in the TCP seq comparison. The last sequence number that belongs to the TCP packet's payload is not "start_seq + len", but one byte before it. Fix it so the 'ends_before' is evaluated properly. This fixes a bug that results in error completions in the kTLS HW offload flows. Fixes: ffbd9ca94e2e ("net/mlx5e: kTLS, Fix corner-case checks in TX resync flow") Signed-off-by: Tariq Toukan Reviewed-by: Boris Pismenny Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c index f260dd96873b..52a56622034a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c @@ -218,7 +218,7 @@ tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx, * this packet was already acknowledged and its record info * was released. */ - ends_before = before(tcp_seq + datalen, tls_record_start_seq(record)); + ends_before = before(tcp_seq + datalen - 1, tls_record_start_seq(record)); if (unlikely(tls_record_is_start_marker(record))) { ret = ends_before ? MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL; From f28ca65efa87b3fb8da3d69ca7cb1ebc0448de66 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Mon, 24 Feb 2020 13:56:53 +0200 Subject: [PATCH 008/138] net/mlx5e: kTLS, Fix wrong value in record tracker enum Fix to match the HW spec: TRACKING state is 1, SEARCHING is 2. No real issue for now, as these values are not currently used. Fixes: d2ead1f360e8 ("net/mlx5e: Add kTLS TX HW offload support") Signed-off-by: Tariq Toukan Reviewed-by: Boris Pismenny Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h index a3efa29a4629..63116be6b1d6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h @@ -38,8 +38,8 @@ enum { enum { MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START = 0, - MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_SEARCHING = 1, - MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING = 2, + MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING = 1, + MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_SEARCHING = 2, }; struct mlx5e_ktls_offload_context_tx { From 404402abd5f90aa90a134eb9604b1750c1941529 Mon Sep 17 00:00:00 2001 From: Sebastian Hense Date: Thu, 20 Feb 2020 08:11:36 +0100 Subject: [PATCH 009/138] net/mlx5e: Fix endianness handling in pedit mask The mask value is provided as 64 bit and has to be casted in either 32 or 16 bit. On big endian systems the wrong half was casted which resulted in an all zero mask. Fixes: 2b64beba0251 ("net/mlx5e: Support header re-write of partial fields in TC pedit offload") Signed-off-by: Sebastian Hense Reviewed-by: Roi Dayan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 74091f72c9a8..ec5fc52bf572 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -2476,10 +2476,11 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs, continue; if (f->field_bsize == 32) { - mask_be32 = *(__be32 *)&mask; + mask_be32 = (__be32)mask; mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32)); } else if (f->field_bsize == 16) { - mask_be16 = *(__be16 *)&mask; + mask_be32 = (__be32)mask; + mask_be16 = *(__be16 *)&mask_be32; mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16)); } From 0b136454741b2f6cb18d55e355d396db9248b2ab Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Wed, 19 Feb 2020 09:03:28 +0200 Subject: [PATCH 010/138] net/mlx5: Clear LAG notifier pointer after unregister After returning from unregister_netdevice_notifier_dev_net(), set the notifier_call field to NULL so successive call to mlx5_lag_add() will function as expected. Fixes: 7907f23adc18 ("net/mlx5: Implement RoCE LAG feature") Signed-off-by: Eli Cohen Reviewed-by: Vlad Buslov Reviewed-by: Raed Salem Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/lag.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c index 8e19f6ab8393..93052b07c76c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c @@ -615,8 +615,10 @@ void mlx5_lag_remove(struct mlx5_core_dev *dev) break; if (i == MLX5_MAX_PORTS) { - if (ldev->nb.notifier_call) + if (ldev->nb.notifier_call) { unregister_netdevice_notifier_net(&init_net, &ldev->nb); + ldev->nb.notifier_call = NULL; + } mlx5_lag_mp_cleanup(ldev); cancel_delayed_work_sync(&ldev->bond_work); mlx5_lag_dev_free(ldev); From 80f1f85036355e5581ec0b99913410345ad3491b Mon Sep 17 00:00:00 2001 From: Luke Nelson Date: Thu, 5 Mar 2020 15:44:12 -0800 Subject: [PATCH 011/138] bpf, x32: Fix bug with JMP32 JSET BPF_X checking upper bits The current x32 BPF JIT is incorrect for JMP32 JSET BPF_X when the upper 32 bits of operand registers are non-zero in certain situations. The problem is in the following code: case BPF_JMP | BPF_JSET | BPF_X: case BPF_JMP32 | BPF_JSET | BPF_X: ... /* and dreg_lo,sreg_lo */ EMIT2(0x23, add_2reg(0xC0, sreg_lo, dreg_lo)); /* and dreg_hi,sreg_hi */ EMIT2(0x23, add_2reg(0xC0, sreg_hi, dreg_hi)); /* or dreg_lo,dreg_hi */ EMIT2(0x09, add_2reg(0xC0, dreg_lo, dreg_hi)); This code checks the upper bits of the operand registers regardless if the BPF instruction is BPF_JMP32 or BPF_JMP64. Registers dreg_hi and dreg_lo are not loaded from the stack for BPF_JMP32, however, they can still be polluted with values from previous instructions. The following BPF program demonstrates the bug. The jset64 instruction loads the temporary registers and performs the jump, since ((u64)r7 & (u64)r8) is non-zero. The jset32 should _not_ be taken, as the lower 32 bits are all zero, however, the current JIT will take the branch due the pollution of temporary registers from the earlier jset64. mov64 r0, 0 ld64 r7, 0x8000000000000000 ld64 r8, 0x8000000000000000 jset64 r7, r8, 1 exit jset32 r7, r8, 1 mov64 r0, 2 exit The expected return value of this program is 2; under the buggy x32 JIT it returns 0. The fix is to skip using the upper 32 bits for jset32 and compare the upper 32 bits for jset64 only. All tests in test_bpf.ko and selftests/bpf/test_verifier continue to pass with this change. We found this bug using our automated verification tool, Serval. Fixes: 69f827eb6e14 ("x32: bpf: implement jitting of JMP32") Co-developed-by: Xi Wang Signed-off-by: Xi Wang Signed-off-by: Luke Nelson Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20200305234416.31597-1-luke.r.nels@gmail.com --- arch/x86/net/bpf_jit_comp32.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c index 393d251798c0..4d2a7a764602 100644 --- a/arch/x86/net/bpf_jit_comp32.c +++ b/arch/x86/net/bpf_jit_comp32.c @@ -2039,10 +2039,12 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, } /* and dreg_lo,sreg_lo */ EMIT2(0x23, add_2reg(0xC0, sreg_lo, dreg_lo)); - /* and dreg_hi,sreg_hi */ - EMIT2(0x23, add_2reg(0xC0, sreg_hi, dreg_hi)); - /* or dreg_lo,dreg_hi */ - EMIT2(0x09, add_2reg(0xC0, dreg_lo, dreg_hi)); + if (is_jmp64) { + /* and dreg_hi,sreg_hi */ + EMIT2(0x23, add_2reg(0xC0, sreg_hi, dreg_hi)); + /* or dreg_lo,dreg_hi */ + EMIT2(0x09, add_2reg(0xC0, dreg_lo, dreg_hi)); + } goto emit_cond_jmp; } case BPF_JMP | BPF_JSET | BPF_K: From 93e5fbb18cec70b3b5c614f67b65388829113bdd Mon Sep 17 00:00:00 2001 From: Luke Nelson Date: Thu, 5 Mar 2020 15:44:13 -0800 Subject: [PATCH 012/138] selftests: bpf: Add test for JMP32 JSET BPF_X with upper bits set The existing tests attempt to check that JMP32 JSET ignores the upper bits in the operand registers. However, the tests missed one such bug in the x32 JIT that is only uncovered when a previous instruction pollutes the upper 32 bits of the registers. This patch adds a new test case that catches the bug by first executing a 64-bit JSET to pollute the upper 32-bits of the temporary registers, followed by a 32-bit JSET which should ignore the upper 32 bits. Co-developed-by: Xi Wang Signed-off-by: Xi Wang Signed-off-by: Luke Nelson Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20200305234416.31597-2-luke.r.nels@gmail.com --- tools/testing/selftests/bpf/verifier/jmp32.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/tools/testing/selftests/bpf/verifier/jmp32.c b/tools/testing/selftests/bpf/verifier/jmp32.c index bf0322eb5346..bd5cae4a7f73 100644 --- a/tools/testing/selftests/bpf/verifier/jmp32.c +++ b/tools/testing/selftests/bpf/verifier/jmp32.c @@ -61,6 +61,21 @@ }, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, +{ + "jset32: ignores upper bits", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_LD_IMM64(BPF_REG_7, 0x8000000000000000), + BPF_LD_IMM64(BPF_REG_8, 0x8000000000000000), + BPF_JMP_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1), + BPF_EXIT_INSN(), + BPF_JMP32_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 2, +}, { "jset32: min/max deduction", .insns = { From 089e5016d7eb063712063670e6da7c1a4de1a5c1 Mon Sep 17 00:00:00 2001 From: Avraham Stern Date: Fri, 6 Mar 2020 15:16:21 +0200 Subject: [PATCH 013/138] iwlwifi: mvm: take the required lock when clearing time event data When receiving a session protection end notification, the time event data is cleared without holding the required lock. Fix it. Signed-off-by: Avraham Stern Signed-off-by: Luca Coelho Link: https://lore.kernel.org/r/iwlwifi.20200306151128.a49846a634e4.Id1ada7c5a964f5e25f4d0eacc2c4b050015b46a2@changeid --- drivers/net/wireless/intel/iwlwifi/mvm/time-event.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c index c0b420fe5e48..1babc4bb5194 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c @@ -785,7 +785,9 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm, if (!le32_to_cpu(notif->status)) { iwl_mvm_te_check_disconnect(mvm, vif, "Session protection failure"); + spin_lock_bh(&mvm->time_event_lock); iwl_mvm_te_clear_data(mvm, te_data); + spin_unlock_bh(&mvm->time_event_lock); } if (le32_to_cpu(notif->start)) { @@ -801,7 +803,9 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm, */ iwl_mvm_te_check_disconnect(mvm, vif, "No beacon heard and the session protection is over already..."); + spin_lock_bh(&mvm->time_event_lock); iwl_mvm_te_clear_data(mvm, te_data); + spin_unlock_bh(&mvm->time_event_lock); } goto out_unlock; From cb377dfda1755b3bc01436755d866c8e5336a762 Mon Sep 17 00:00:00 2001 From: Mordechay Goodstein Date: Fri, 6 Mar 2020 15:16:22 +0200 Subject: [PATCH 014/138] iwlwifi: consider HE capability when setting LDPC The AP may set the LDPC capability only in HE (IEEE80211_HE_PHY_CAP1), but we were checking it only in the HT capabilities. If we don't use this capability when required, the DSP gets the wrong configuration in HE and doesn't work properly. Signed-off-by: Mordechay Goodstein Fixes: befebbb30af0 ("iwlwifi: rs: consider LDPC capability in case of HE") Signed-off-by: Luca Coelho Link: https://lore.kernel.org/r/iwlwifi.20200306151128.492d167c1a25.I1ad1353dbbf6c99ae57814be750f41a1c9f7f4ac@changeid --- drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c index e2cf9e015ef8..80ef238a8488 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c @@ -147,7 +147,11 @@ static u16 rs_fw_get_config_flags(struct iwl_mvm *mvm, (vht_ena && (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC)))) flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK; - /* consider our LDPC support in case of HE */ + /* consider LDPC support in case of HE */ + if (he_cap->has_he && (he_cap->he_cap_elem.phy_cap_info[1] & + IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD)) + flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK; + if (sband->iftype_data && sband->iftype_data->he_cap.has_he && !(sband->iftype_data->he_cap.he_cap_elem.phy_cap_info[1] & IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD)) From 71bc0334a6371c7de0e0594439de7d237e4d2d03 Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Fri, 6 Mar 2020 15:16:23 +0200 Subject: [PATCH 015/138] iwlwifi: check allocated pointer when allocating conf_tlvs We were erroneously checking the length of the tlv instead of checking the pointer returned by kmemdup() when allocating dbg_conf_tlv[]. This was probably a typo. Fix it by checking the returned pointer instead of the length. Reported-by: Markus Elfring Signed-off-by: Luca Coelho Link: https://lore.kernel.org/r/iwlwifi.20200306151128.06e00e6e980f.I9a890ce83493b79892a5f690d12016525317fa7e@changeid --- drivers/net/wireless/intel/iwlwifi/iwl-drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index 2d1cb4647c3b..0481796f75bc 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -1467,7 +1467,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) kmemdup(pieces->dbg_conf_tlv[i], pieces->dbg_conf_tlv_len[i], GFP_KERNEL); - if (!pieces->dbg_conf_tlv_len[i]) + if (!pieces->dbg_conf_tlv[i]) goto out_free_fw; } } From a5688e600e78f9fc68102bf0fe5c797fc2826abe Mon Sep 17 00:00:00 2001 From: Mordechay Goodstein Date: Fri, 6 Mar 2020 15:16:24 +0200 Subject: [PATCH 016/138] iwlwifi: yoyo: don't add TLV offset when reading FIFOs The TLV offset is only used to read registers, while the offset used for the FIFO addresses are hard coded in the driver and not given by the TLV. If we try to apply the TLV offset when reading the FIFOs, we'll read from invalid addresses, causing the driver to hang. Signed-off-by: Mordechay Goodstein Fixes: 8d7dea25ada7 ("iwlwifi: dbg_ini: implement Rx fifos dump") Signed-off-by: Luca Coelho Link: https://lore.kernel.org/r/iwlwifi.20200306151129.fbab869c26fa.I4ddac20d02f9bce41855a816aa6855c89bc3874e@changeid --- drivers/net/wireless/intel/iwlwifi/fw/dbg.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index 91df1ee25dd0..e60eb5cc847f 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -8,7 +8,7 @@ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2018 - 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,7 +31,7 @@ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2018 - 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -1409,11 +1409,7 @@ static int iwl_dump_ini_rxf_iter(struct iwl_fw_runtime *fwrt, goto out; } - /* - * region register have absolute value so apply rxf offset after - * reading the registers - */ - offs += rxf_data.offset; + offs = rxf_data.offset; /* Lock fence */ iwl_write_prph_no_grab(fwrt->trans, RXF_SET_FENCE_MODE + offs, 0x1); From 699b760bd29edba736590fffef7654cb079c753e Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Fri, 6 Mar 2020 15:16:25 +0200 Subject: [PATCH 017/138] iwlwifi: dbg: don't abort if sending DBGC_SUSPEND_RESUME fails If the firmware is in a bad state or not initialized fully, sending the DBGC_SUSPEND_RESUME command fails but we can still collect logs. Instead of aborting the entire dump process, simply ignore the error. By removing the last callpoint that was checking the return value, we can also convert the function to return void. Signed-off-by: Luca Coelho Fixes: 576058330f2d ("iwlwifi: dbg: support debug recording suspend resume command") Signed-off-by: Luca Coelho Link: https://lore.kernel.org/r/iwlwifi.20200306151129.dcec37b2efd4.I8dcd190431d110a6a0e88095ce93591ccfb3d78d@changeid --- drivers/net/wireless/intel/iwlwifi/fw/dbg.c | 15 +++++---------- drivers/net/wireless/intel/iwlwifi/fw/dbg.h | 6 +++--- 2 files changed, 8 insertions(+), 13 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index e60eb5cc847f..8796ab8f2a5f 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -2490,10 +2490,7 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx) goto out; } - if (iwl_fw_dbg_stop_restart_recording(fwrt, ¶ms, true)) { - IWL_ERR(fwrt, "Failed to stop DBGC recording, aborting dump\n"); - goto out; - } + iwl_fw_dbg_stop_restart_recording(fwrt, ¶ms, true); IWL_DEBUG_FW_INFO(fwrt, "WRT: Data collection start\n"); if (iwl_trans_dbg_ini_valid(fwrt->trans)) @@ -2658,14 +2655,14 @@ static int iwl_fw_dbg_restart_recording(struct iwl_trans *trans, return 0; } -int iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt, - struct iwl_fw_dbg_params *params, - bool stop) +void iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt, + struct iwl_fw_dbg_params *params, + bool stop) { int ret = 0; if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) - return 0; + return; if (fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_DBG_SUSPEND_RESUME_CMD_SUPP)) @@ -2682,7 +2679,5 @@ int iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt, iwl_fw_set_dbg_rec_on(fwrt); } #endif - - return ret; } IWL_EXPORT_SYMBOL(iwl_fw_dbg_stop_restart_recording); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h index 179f2905d56b..9d3513213f5f 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h @@ -239,9 +239,9 @@ _iwl_fw_dbg_trigger_simple_stop(struct iwl_fw_runtime *fwrt, _iwl_fw_dbg_trigger_simple_stop((fwrt), (wdev), \ iwl_fw_dbg_get_trigger((fwrt)->fw,\ (trig))) -int iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt, - struct iwl_fw_dbg_params *params, - bool stop); +void iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt, + struct iwl_fw_dbg_params *params, + bool stop); #ifdef CONFIG_IWLWIFI_DEBUGFS static inline void iwl_fw_set_dbg_rec_on(struct iwl_fw_runtime *fwrt) From ce19801ba75a902ab515dda03b57738c708d0781 Mon Sep 17 00:00:00 2001 From: Ilan Peer Date: Fri, 6 Mar 2020 15:16:26 +0200 Subject: [PATCH 018/138] iwlwifi: mvm: Fix rate scale NSS configuration The TLC configuration did not take into consideration the station's SMPS configuration, and thus configured rates for 2 NSS even if static SMPS was reported by the station. Fix this. Signed-off-by: Ilan Peer Signed-off-by: Luca Coelho Link: https://lore.kernel.org/r/iwlwifi.20200306151129.b4f940d13eca.Ieebfa889d08205a3a961ae0138fb5832e8a0f9c1@changeid --- .../net/wireless/intel/iwlwifi/mvm/rs-fw.c | 29 ++++++++++++++----- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c index 80ef238a8488..ca99a9c4f70e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2018 - 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -27,7 +27,7 @@ * BSD LICENSE * * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2018 - 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -195,11 +195,13 @@ rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta, { u16 supp; int i, highest_mcs; + u8 nss = sta->rx_nss; - for (i = 0; i < sta->rx_nss; i++) { - if (i == IWL_TLC_NSS_MAX) - break; + /* the station support only a single receive chain */ + if (sta->smps_mode == IEEE80211_SMPS_STATIC) + nss = 1; + for (i = 0; i < nss && i < IWL_TLC_NSS_MAX; i++) { highest_mcs = rs_fw_vht_highest_rx_mcs_index(vht_cap, i + 1); if (!highest_mcs) continue; @@ -245,8 +247,13 @@ rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta, u16 tx_mcs_160 = le16_to_cpu(sband->iftype_data->he_cap.he_mcs_nss_supp.tx_mcs_160); int i; + u8 nss = sta->rx_nss; - for (i = 0; i < sta->rx_nss && i < IWL_TLC_NSS_MAX; i++) { + /* the station support only a single receive chain */ + if (sta->smps_mode == IEEE80211_SMPS_STATIC) + nss = 1; + + for (i = 0; i < nss && i < IWL_TLC_NSS_MAX; i++) { u16 _mcs_160 = (mcs_160 >> (2 * i)) & 0x3; u16 _mcs_80 = (mcs_80 >> (2 * i)) & 0x3; u16 _tx_mcs_160 = (tx_mcs_160 >> (2 * i)) & 0x3; @@ -307,8 +314,14 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta, cmd->mode = IWL_TLC_MNG_MODE_HT; cmd->ht_rates[IWL_TLC_NSS_1][IWL_TLC_HT_BW_NONE_160] = cpu_to_le16(ht_cap->mcs.rx_mask[0]); - cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_HT_BW_NONE_160] = - cpu_to_le16(ht_cap->mcs.rx_mask[1]); + + /* the station support only a single receive chain */ + if (sta->smps_mode == IEEE80211_SMPS_STATIC) + cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_HT_BW_NONE_160] = + 0; + else + cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_HT_BW_NONE_160] = + cpu_to_le16(ht_cap->mcs.rx_mask[1]); } } From 9352ed0165ff4313ab340c979446c3d64c531f7a Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Fri, 6 Mar 2020 15:16:27 +0200 Subject: [PATCH 019/138] iwlwifi: cfg: use antenna diversity with all AX101 devices We were erroneously only setting the tx_with_siso_diversity flag in the Qu B-step configurations for AX101 devices, though we should do it on all configurations. Add the flag to the other two configurations, namely Qu C-step and QuZ. Signed-off-by: Luca Coelho Link: https://lore.kernel.org/r/iwlwifi.20200306151129.1cd986ef467c.Idc0b111475ae3d38b68ae062613c080b574e33e1@changeid --- drivers/net/wireless/intel/iwlwifi/cfg/22000.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c index a22a830019c0..355af47c5f73 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c @@ -283,6 +283,7 @@ const struct iwl_cfg iwl_ax101_cfg_qu_c0_hr_b0 = { * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, + .tx_with_siso_diversity = true, .num_rbds = IWL_NUM_RBDS_22000_HE, }; @@ -309,6 +310,7 @@ const struct iwl_cfg iwl_ax101_cfg_quz_hr = { * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, + .tx_with_siso_diversity = true, .num_rbds = IWL_NUM_RBDS_22000_HE, }; From 62039c30c19dcab96621e074aeeb90da7100def7 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Mon, 9 Mar 2020 15:27:55 -0700 Subject: [PATCH 020/138] bpf: Initialize storage pointers to NULL to prevent freeing garbage pointer Local storage array isn't initialized, so if cgroup storage allocation fails for BPF_CGROUP_STORAGE_SHARED, error handling code will attempt to free uninitialized pointer for BPF_CGROUP_STORAGE_PERCPU storage type. Avoid this by always initializing storage pointers to NULLs. Fixes: 8bad74f9840f ("bpf: extend cgroup bpf core to allow multiple cgroup storage types") Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200309222756.1018737-1-andriin@fb.com --- kernel/bpf/cgroup.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index 9a500fadbef5..b2bc4c335bb1 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -302,8 +302,8 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog, u32 saved_flags = (flags & (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI)); struct list_head *progs = &cgrp->bpf.progs[type]; struct bpf_prog *old_prog = NULL; - struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE], - *old_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {NULL}; + struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {}; + struct bpf_cgroup_storage *old_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {}; struct bpf_prog_list *pl, *replace_pl = NULL; enum bpf_cgroup_storage_type stype; int err; From 1d8006abaab4cb90f81add86e8d1bf9411add05a Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Mon, 9 Mar 2020 15:40:17 -0700 Subject: [PATCH 021/138] bpf: Fix cgroup ref leak in cgroup_bpf_inherit on out-of-memory There is no compensating cgroup_bpf_put() for each ancestor cgroup in cgroup_bpf_inherit(). If compute_effective_progs returns error, those cgroups won't be freed ever. Fix it by putting them in cleanup code path. Fixes: e10360f815ca ("bpf: cgroup: prevent out-of-order release of cgroup bpf") Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Acked-by: Roman Gushchin Link: https://lore.kernel.org/bpf/20200309224017.1063297-1-andriin@fb.com --- kernel/bpf/cgroup.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index b2bc4c335bb1..4f1472409ef8 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -227,6 +227,9 @@ cleanup: for (i = 0; i < NR; i++) bpf_prog_array_free(arrays[i]); + for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p)) + cgroup_bpf_put(p); + percpu_ref_exit(&cgrp->bpf.refcnt); return -ENOMEM; From da86cad0e011c0216bf2303bc924fa080e150bc8 Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Thu, 5 Mar 2020 14:26:22 +0200 Subject: [PATCH 022/138] MAINTAINERS: update web URL for iwlwifi The current URL mentioned in iwlwifi's W entry is outdated and currently pointing to a dead link. Change it so that it points to the correct Wiki page directly. Signed-off-by: Luca Coelho Signed-off-by: Kalle Valo --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MAINTAINERS b/MAINTAINERS index b1935c2ae118..2e366d8f054f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8684,7 +8684,7 @@ M: Emmanuel Grumbach M: Luca Coelho M: Intel Linux Wireless L: linux-wireless@vger.kernel.org -W: http://intellinuxwireless.org +W: https://wireless.wiki.kernel.org/en/users/drivers/iwlwifi T: git git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi.git S: Supported F: drivers/net/wireless/intel/iwlwifi/ From da6c7faeb103c493e505e87643272f70be586635 Mon Sep 17 00:00:00 2001 From: Yoshiki Komachi Date: Tue, 10 Mar 2020 16:32:29 +0900 Subject: [PATCH 023/138] bpf/btf: Fix BTF verification of enum members in struct/union btf_enum_check_member() was currently sure to recognize the size of "enum" type members in struct/union as the size of "int" even if its size was packed. This patch fixes BTF enum verification to use the correct size of member in BPF programs. Fixes: 179cde8cef7e ("bpf: btf: Check members of struct/union") Signed-off-by: Yoshiki Komachi Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/1583825550-18606-2-git-send-email-komachi.yoshiki@gmail.com --- kernel/bpf/btf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 787140095e58..32ab9225026e 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -2418,7 +2418,7 @@ static int btf_enum_check_member(struct btf_verifier_env *env, struct_size = struct_type->size; bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); - if (struct_size - bytes_offset < sizeof(int)) { + if (struct_size - bytes_offset < member_type->size) { btf_verifier_log_member(env, struct_type, member, "Member exceeds struct_size"); return -EINVAL; From 6ffe559a77d1c963a3567f7a39a5419bdcdc4f1c Mon Sep 17 00:00:00 2001 From: Yoshiki Komachi Date: Tue, 10 Mar 2020 16:32:30 +0900 Subject: [PATCH 024/138] selftests/bpf: Add test for the packed enum member in struct/union Add a simple test to the existing selftest program in order to make sure that a packed enum member in struct unexceeds the struct_size. Signed-off-by: Yoshiki Komachi Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/1583825550-18606-3-git-send-email-komachi.yoshiki@gmail.com --- tools/testing/selftests/bpf/test_btf.c | 42 ++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/test_btf.c index 93040ca83e60..8da77cda5f4a 100644 --- a/tools/testing/selftests/bpf/test_btf.c +++ b/tools/testing/selftests/bpf/test_btf.c @@ -1062,6 +1062,48 @@ static struct btf_raw_test raw_tests[] = { .err_str = "Member exceeds struct_size", }, +/* Test member unexceeds the size of struct + * + * enum E { + * E0, + * E1, + * }; + * + * struct A { + * char m; + * enum E __attribute__((packed)) n; + * }; + */ +{ + .descr = "size check test #5", + .raw_types = { + /* int */ /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, sizeof(int)), + /* char */ /* [2] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 8, 1), + /* enum E { */ /* [3] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_ENUM, 0, 2), 1), + BTF_ENUM_ENC(NAME_TBD, 0), + BTF_ENUM_ENC(NAME_TBD, 1), + /* } */ + /* struct A { */ /* [4] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 2), + BTF_MEMBER_ENC(NAME_TBD, 2, 0), /* char m; */ + BTF_MEMBER_ENC(NAME_TBD, 3, 8),/* enum E __attribute__((packed)) n; */ + /* } */ + BTF_END_RAW, + }, + .str_sec = "\0E\0E0\0E1\0A\0m\0n", + .str_sec_size = sizeof("\0E\0E0\0E1\0A\0m\0n"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "size_check5_map", + .key_size = sizeof(int), + .value_size = 2, + .key_type_id = 1, + .value_type_id = 4, + .max_entries = 4, +}, + /* typedef const void * const_void_ptr; * struct A { * const_void_ptr m; From 90db6d772f749e38171d04619a5e3cd8804a6d02 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Tue, 10 Mar 2020 09:41:48 -0700 Subject: [PATCH 025/138] bpf, sockmap: Remove bucket->lock from sock_{hash|map}_free The bucket->lock is not needed in the sock_hash_free and sock_map_free calls, in fact it is causing a splat due to being inside rcu block. | BUG: sleeping function called from invalid context at net/core/sock.c:2935 | in_atomic(): 1, irqs_disabled(): 0, non_block: 0, pid: 62, name: kworker/0:1 | 3 locks held by kworker/0:1/62: | #0: ffff88813b019748 ((wq_completion)events){+.+.}, at: process_one_work+0x1d7/0x5e0 | #1: ffffc900000abe50 ((work_completion)(&map->work)){+.+.}, at: process_one_work+0x1d7/0x5e0 | #2: ffff8881381f6df8 (&stab->lock){+...}, at: sock_map_free+0x26/0x180 | CPU: 0 PID: 62 Comm: kworker/0:1 Not tainted 5.5.0-04008-g7b083332376e #454 | Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS ?-20190727_073836-buildvm-ppc64le-16.ppc.fedoraproject.org-3.fc31 04/01/2014 | Workqueue: events bpf_map_free_deferred | Call Trace: | dump_stack+0x71/0xa0 | ___might_sleep.cold+0xa6/0xb6 | lock_sock_nested+0x28/0x90 | sock_map_free+0x5f/0x180 | bpf_map_free_deferred+0x58/0x80 | process_one_work+0x260/0x5e0 | worker_thread+0x4d/0x3e0 | kthread+0x108/0x140 | ? process_one_work+0x5e0/0x5e0 | ? kthread_park+0x90/0x90 | ret_from_fork+0x3a/0x50 The reason we have stab->lock and bucket->locks in sockmap code is to handle checking EEXIST in update/delete cases. We need to be careful during an update operation that we check for EEXIST and we need to ensure that the psock object is not in some partial state of removal/insertion while we do this. So both map_update_common and sock_map_delete need to guard from being run together potentially deleting an entry we are checking, etc. But by the time we get to the tear-down code in sock_{ma[|hash}_free we have already disconnected the map and we just did synchronize_rcu() in the line above so no updates/deletes should be in flight. Because of this we can drop the bucket locks from the map free'ing code, noting no update/deletes can be in-flight. Fixes: 604326b41a6f ("bpf, sockmap: convert to generic sk_msg interface") Reported-by: Jakub Sitnicki Suggested-by: Jakub Sitnicki Signed-off-by: John Fastabend Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/158385850787.30597.8346421465837046618.stgit@john-Precision-5820-Tower --- net/core/sock_map.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/net/core/sock_map.c b/net/core/sock_map.c index 085cef5857bb..b70c844a88ec 100644 --- a/net/core/sock_map.c +++ b/net/core/sock_map.c @@ -233,8 +233,11 @@ static void sock_map_free(struct bpf_map *map) struct bpf_stab *stab = container_of(map, struct bpf_stab, map); int i; + /* After the sync no updates or deletes will be in-flight so it + * is safe to walk map and remove entries without risking a race + * in EEXIST update case. + */ synchronize_rcu(); - raw_spin_lock_bh(&stab->lock); for (i = 0; i < stab->map.max_entries; i++) { struct sock **psk = &stab->sks[i]; struct sock *sk; @@ -248,7 +251,6 @@ static void sock_map_free(struct bpf_map *map) release_sock(sk); } } - raw_spin_unlock_bh(&stab->lock); /* wait for psock readers accessing its map link */ synchronize_rcu(); @@ -863,10 +865,13 @@ static void sock_hash_free(struct bpf_map *map) struct hlist_node *node; int i; + /* After the sync no updates or deletes will be in-flight so it + * is safe to walk map and remove entries without risking a race + * in EEXIST update case. + */ synchronize_rcu(); for (i = 0; i < htab->buckets_num; i++) { bucket = sock_hash_select_bucket(htab, i); - raw_spin_lock_bh(&bucket->lock); hlist_for_each_entry_safe(elem, node, &bucket->head, node) { hlist_del_rcu(&elem->node); lock_sock(elem->sk); @@ -875,7 +880,6 @@ static void sock_hash_free(struct bpf_map *map) rcu_read_unlock(); release_sock(elem->sk); } - raw_spin_unlock_bh(&bucket->lock); } /* wait for psock readers accessing its map link */ From c80b18cbb04b7b101af9bd14550f13d9866c646a Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Wed, 19 Feb 2020 14:00:41 -0600 Subject: [PATCH 026/138] rtlwifi: rtl8188ee: Fix regression due to commit d1d1a96bdb44 For some unexplained reason, commit d1d1a96bdb44 ("rtlwifi: rtl8188ee: Remove local configuration variable") broke at least one system. As the only net effect of the change was to remove 2 bytes from the start of struct phy_status_rpt, this patch adds 2 bytes of padding at the beginning of the struct. Fixes: d1d1a96bdb44 ("rtlwifi: rtl8188ee: Remove local configuration variable") Cc: Stable # V5.4+ Reported-by: Ashish Tested-by: Ashish Signed-off-by: Larry Finger Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h index 917729807514..e17f70b4d199 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h @@ -561,6 +561,7 @@ static inline void clear_pci_tx_desc_content(__le32 *__pdesc, int _size) rxmcs == DESC92C_RATE11M) struct phy_status_rpt { + u8 padding[2]; u8 ch_corr[2]; u8 cck_sig_qual_ofdm_pwdb_all; u8 cck_agc_rpt_ofdm_cfosho_a; From e2e57291097b289f84e05fa58ab5e6c6f30cc6e2 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 12 Mar 2020 09:04:08 +0100 Subject: [PATCH 027/138] wlcore: remove stray plus sign The commit mentioned below added a stray plus sign, likely due to some conflict resolution (i.e. as a leftover from a unified diff), which was harmless since it was just used as an integer constant modifier. Remove it anyway, now that I stumbled across it. Fixes: cf33a7728bf2 ("wlcore: mesh: Add support for RX Broadcast Key") Signed-off-by: Johannes Berg Signed-off-by: Kalle Valo --- drivers/net/wireless/ti/wlcore/main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index ed049c9f7e29..f140f7d7f553 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -6274,7 +6274,7 @@ static int wl1271_init_ieee80211(struct wl1271 *wl) wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD | WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL | WIPHY_FLAG_HAS_CHANNEL_SWITCH | -+ WIPHY_FLAG_IBSS_RSN; + WIPHY_FLAG_IBSS_RSN; wl->hw->wiphy->features |= NL80211_FEATURE_AP_SCAN; From 158fe6665389964a1de212818b4a5c52b7f7aff4 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 13 Mar 2020 09:05:38 +0000 Subject: [PATCH 028/138] rxrpc: Abstract out the calculation of whether there's Tx space Abstract out the calculation of there being sufficient Tx buffer space. This is reproduced several times in the rxrpc sendmsg code. Signed-off-by: David Howells --- net/rxrpc/sendmsg.c | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c index 813fd6888142..a13051d38097 100644 --- a/net/rxrpc/sendmsg.c +++ b/net/rxrpc/sendmsg.c @@ -17,6 +17,21 @@ #include #include "ar-internal.h" +/* + * Return true if there's sufficient Tx queue space. + */ +static bool rxrpc_check_tx_space(struct rxrpc_call *call, rxrpc_seq_t *_tx_win) +{ + unsigned int win_size = + min_t(unsigned int, call->tx_winsize, + call->cong_cwnd + call->cong_extra); + rxrpc_seq_t tx_win = READ_ONCE(call->tx_hard_ack); + + if (_tx_win) + *_tx_win = tx_win; + return call->tx_top - tx_win < win_size; +} + /* * Wait for space to appear in the Tx queue or a signal to occur. */ @@ -26,9 +41,7 @@ static int rxrpc_wait_for_tx_window_intr(struct rxrpc_sock *rx, { for (;;) { set_current_state(TASK_INTERRUPTIBLE); - if (call->tx_top - call->tx_hard_ack < - min_t(unsigned int, call->tx_winsize, - call->cong_cwnd + call->cong_extra)) + if (rxrpc_check_tx_space(call, NULL)) return 0; if (call->state >= RXRPC_CALL_COMPLETE) @@ -68,9 +81,7 @@ static int rxrpc_wait_for_tx_window_nonintr(struct rxrpc_sock *rx, set_current_state(TASK_UNINTERRUPTIBLE); tx_win = READ_ONCE(call->tx_hard_ack); - if (call->tx_top - tx_win < - min_t(unsigned int, call->tx_winsize, - call->cong_cwnd + call->cong_extra)) + if (rxrpc_check_tx_space(call, &tx_win)) return 0; if (call->state >= RXRPC_CALL_COMPLETE) @@ -302,9 +313,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx, _debug("alloc"); - if (call->tx_top - call->tx_hard_ack >= - min_t(unsigned int, call->tx_winsize, - call->cong_cwnd + call->cong_extra)) { + if (!rxrpc_check_tx_space(call, NULL)) { ret = -EAGAIN; if (msg->msg_flags & MSG_DONTWAIT) goto maybe_error; From e138aa7d3271ac1b0690ae2c9b04d51468dce1d6 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 13 Mar 2020 09:22:09 +0000 Subject: [PATCH 029/138] rxrpc: Fix call interruptibility handling Fix the interruptibility of kernel-initiated client calls so that they're either only interruptible when they're waiting for a call slot to come available or they're not interruptible at all. Either way, they're not interruptible during transmission. This should help prevent StoreData calls from being interrupted when writeback is in progress. It doesn't, however, handle interruption during the receive phase. Userspace-initiated calls are still interruptable. After the signal has been handled, sendmsg() will return the amount of data copied out of the buffer and userspace can perform another sendmsg() call to continue transmission. Fixes: bc5e3a546d55 ("rxrpc: Use MSG_WAITALL to tell sendmsg() to temporarily ignore signals") Signed-off-by: David Howells --- fs/afs/rxrpc.c | 3 ++- include/net/af_rxrpc.h | 8 +++++++- net/rxrpc/af_rxrpc.c | 4 ++-- net/rxrpc/ar-internal.h | 4 ++-- net/rxrpc/call_object.c | 3 +-- net/rxrpc/conn_client.c | 13 +++++++++--- net/rxrpc/sendmsg.c | 44 +++++++++++++++++++++++++++++++++-------- 7 files changed, 60 insertions(+), 19 deletions(-) diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index 58d396592250..4c28712bb7f6 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c @@ -413,7 +413,8 @@ void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp) afs_wake_up_async_call : afs_wake_up_call_waiter), call->upgrade, - call->intr, + (call->intr ? RXRPC_PREINTERRUPTIBLE : + RXRPC_UNINTERRUPTIBLE), call->debug_id); if (IS_ERR(rxcall)) { ret = PTR_ERR(rxcall); diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h index 1abae3c340a5..8e547b4d88c8 100644 --- a/include/net/af_rxrpc.h +++ b/include/net/af_rxrpc.h @@ -16,6 +16,12 @@ struct sock; struct socket; struct rxrpc_call; +enum rxrpc_interruptibility { + RXRPC_INTERRUPTIBLE, /* Call is interruptible */ + RXRPC_PREINTERRUPTIBLE, /* Call can be cancelled whilst waiting for a slot */ + RXRPC_UNINTERRUPTIBLE, /* Call should not be interruptible at all */ +}; + /* * Debug ID counter for tracing. */ @@ -41,7 +47,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *, gfp_t, rxrpc_notify_rx_t, bool, - bool, + enum rxrpc_interruptibility, unsigned int); int rxrpc_kernel_send_data(struct socket *, struct rxrpc_call *, struct msghdr *, size_t, diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index fe42f986cd94..7603cf811f75 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c @@ -285,7 +285,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock, gfp_t gfp, rxrpc_notify_rx_t notify_rx, bool upgrade, - bool intr, + enum rxrpc_interruptibility interruptibility, unsigned int debug_id) { struct rxrpc_conn_parameters cp; @@ -310,7 +310,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock, memset(&p, 0, sizeof(p)); p.user_call_ID = user_call_ID; p.tx_total_len = tx_total_len; - p.intr = intr; + p.interruptibility = interruptibility; memset(&cp, 0, sizeof(cp)); cp.local = rx->local; diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 7d730c438404..1f72f43b082d 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -489,7 +489,6 @@ enum rxrpc_call_flag { RXRPC_CALL_BEGAN_RX_TIMER, /* We began the expect_rx_by timer */ RXRPC_CALL_RX_HEARD, /* The peer responded at least once to this call */ RXRPC_CALL_RX_UNDERRUN, /* Got data underrun */ - RXRPC_CALL_IS_INTR, /* The call is interruptible */ RXRPC_CALL_DISCONNECTED, /* The call has been disconnected */ }; @@ -598,6 +597,7 @@ struct rxrpc_call { atomic_t usage; u16 service_id; /* service ID */ u8 security_ix; /* Security type */ + enum rxrpc_interruptibility interruptibility; /* At what point call may be interrupted */ u32 call_id; /* call ID on connection */ u32 cid; /* connection ID plus channel index */ int debug_id; /* debug ID for printks */ @@ -721,7 +721,7 @@ struct rxrpc_call_params { u32 normal; /* Max time since last call packet (msec) */ } timeouts; u8 nr_timeouts; /* Number of timeouts specified */ - bool intr; /* The call is interruptible */ + enum rxrpc_interruptibility interruptibility; /* How is interruptible is the call? */ }; struct rxrpc_send_params { diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c index c9f34b0a11df..f07970207b54 100644 --- a/net/rxrpc/call_object.c +++ b/net/rxrpc/call_object.c @@ -237,8 +237,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, return call; } - if (p->intr) - __set_bit(RXRPC_CALL_IS_INTR, &call->flags); + call->interruptibility = p->interruptibility; call->tx_total_len = p->tx_total_len; trace_rxrpc_call(call->debug_id, rxrpc_call_new_client, atomic_read(&call->usage), diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c index ea7d4c21f889..f2a1a5dbb5a7 100644 --- a/net/rxrpc/conn_client.c +++ b/net/rxrpc/conn_client.c @@ -655,13 +655,20 @@ static int rxrpc_wait_for_channel(struct rxrpc_call *call, gfp_t gfp) add_wait_queue_exclusive(&call->waitq, &myself); for (;;) { - if (test_bit(RXRPC_CALL_IS_INTR, &call->flags)) + switch (call->interruptibility) { + case RXRPC_INTERRUPTIBLE: + case RXRPC_PREINTERRUPTIBLE: set_current_state(TASK_INTERRUPTIBLE); - else + break; + case RXRPC_UNINTERRUPTIBLE: + default: set_current_state(TASK_UNINTERRUPTIBLE); + break; + } if (call->call_id) break; - if (test_bit(RXRPC_CALL_IS_INTR, &call->flags) && + if ((call->interruptibility == RXRPC_INTERRUPTIBLE || + call->interruptibility == RXRPC_PREINTERRUPTIBLE) && signal_pending(current)) { ret = -ERESTARTSYS; break; diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c index a13051d38097..1eccfb92c9e1 100644 --- a/net/rxrpc/sendmsg.c +++ b/net/rxrpc/sendmsg.c @@ -62,7 +62,7 @@ static int rxrpc_wait_for_tx_window_intr(struct rxrpc_sock *rx, * Wait for space to appear in the Tx queue uninterruptibly, but with * a timeout of 2*RTT if no progress was made and a signal occurred. */ -static int rxrpc_wait_for_tx_window_nonintr(struct rxrpc_sock *rx, +static int rxrpc_wait_for_tx_window_waitall(struct rxrpc_sock *rx, struct rxrpc_call *call) { rxrpc_seq_t tx_start, tx_win; @@ -87,8 +87,7 @@ static int rxrpc_wait_for_tx_window_nonintr(struct rxrpc_sock *rx, if (call->state >= RXRPC_CALL_COMPLETE) return call->error; - if (test_bit(RXRPC_CALL_IS_INTR, &call->flags) && - timeout == 0 && + if (timeout == 0 && tx_win == tx_start && signal_pending(current)) return -EINTR; @@ -102,6 +101,26 @@ static int rxrpc_wait_for_tx_window_nonintr(struct rxrpc_sock *rx, } } +/* + * Wait for space to appear in the Tx queue uninterruptibly. + */ +static int rxrpc_wait_for_tx_window_nonintr(struct rxrpc_sock *rx, + struct rxrpc_call *call, + long *timeo) +{ + for (;;) { + set_current_state(TASK_UNINTERRUPTIBLE); + if (rxrpc_check_tx_space(call, NULL)) + return 0; + + if (call->state >= RXRPC_CALL_COMPLETE) + return call->error; + + trace_rxrpc_transmit(call, rxrpc_transmit_wait); + *timeo = schedule_timeout(*timeo); + } +} + /* * wait for space to appear in the transmit/ACK window * - caller holds the socket locked @@ -119,10 +138,19 @@ static int rxrpc_wait_for_tx_window(struct rxrpc_sock *rx, add_wait_queue(&call->waitq, &myself); - if (waitall) - ret = rxrpc_wait_for_tx_window_nonintr(rx, call); - else - ret = rxrpc_wait_for_tx_window_intr(rx, call, timeo); + switch (call->interruptibility) { + case RXRPC_INTERRUPTIBLE: + if (waitall) + ret = rxrpc_wait_for_tx_window_waitall(rx, call); + else + ret = rxrpc_wait_for_tx_window_intr(rx, call, timeo); + break; + case RXRPC_PREINTERRUPTIBLE: + case RXRPC_UNINTERRUPTIBLE: + default: + ret = rxrpc_wait_for_tx_window_nonintr(rx, call, timeo); + break; + } remove_wait_queue(&call->waitq, &myself); set_current_state(TASK_RUNNING); @@ -628,7 +656,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) .call.tx_total_len = -1, .call.user_call_ID = 0, .call.nr_timeouts = 0, - .call.intr = true, + .call.interruptibility = RXRPC_INTERRUPTIBLE, .abort_code = 0, .command = RXRPC_CMD_SEND_DATA, .exclusive = false, From 498b577660f08cef5d9e78e0ed6dcd4c0939e98c Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 13 Mar 2020 17:30:27 +0000 Subject: [PATCH 030/138] rxrpc: Fix sendmsg(MSG_WAITALL) handling Fix the handling of sendmsg() with MSG_WAITALL for userspace to round the timeout for when a signal occurs up to at least two jiffies as a 1 jiffy timeout may end up being effectively 0 if jiffies wraps at the wrong time. Fixes: bc5e3a546d55 ("rxrpc: Use MSG_WAITALL to tell sendmsg() to temporarily ignore signals") Signed-off-by: David Howells --- net/rxrpc/sendmsg.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c index 1eccfb92c9e1..0fcf157aa09f 100644 --- a/net/rxrpc/sendmsg.c +++ b/net/rxrpc/sendmsg.c @@ -71,8 +71,8 @@ static int rxrpc_wait_for_tx_window_waitall(struct rxrpc_sock *rx, rtt = READ_ONCE(call->peer->rtt); rtt2 = nsecs_to_jiffies64(rtt) * 2; - if (rtt2 < 1) - rtt2 = 1; + if (rtt2 < 2) + rtt2 = 2; timeout = rtt2; tx_start = READ_ONCE(call->tx_hard_ack); From 4636cf184d6d9a92a56c2554681ea520dd4fe49a Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 13 Mar 2020 13:36:01 +0000 Subject: [PATCH 031/138] afs: Fix some tracing details Fix a couple of tracelines to indicate the usage count after the atomic op, not the usage count before it to be consistent with other afs and rxrpc trace lines. Change the wording of the afs_call_trace_work trace ID label from "WORK" to "QUEUE" to reflect the fact that it's queueing work, not doing work. Fixes: 341f741f04be ("afs: Refcount the afs_call struct") Signed-off-by: David Howells --- fs/afs/rxrpc.c | 4 ++-- include/trace/events/afs.h | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index 4c28712bb7f6..907d5948564a 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c @@ -169,7 +169,7 @@ void afs_put_call(struct afs_call *call) int n = atomic_dec_return(&call->usage); int o = atomic_read(&net->nr_outstanding_calls); - trace_afs_call(call, afs_call_trace_put, n + 1, o, + trace_afs_call(call, afs_call_trace_put, n, o, __builtin_return_address(0)); ASSERTCMP(n, >=, 0); @@ -736,7 +736,7 @@ static void afs_wake_up_async_call(struct sock *sk, struct rxrpc_call *rxcall, u = atomic_fetch_add_unless(&call->usage, 1, 0); if (u != 0) { - trace_afs_call(call, afs_call_trace_wake, u, + trace_afs_call(call, afs_call_trace_wake, u + 1, atomic_read(&call->net->nr_outstanding_calls), __builtin_return_address(0)); diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h index 564ba1b5cf57..c612cabbc378 100644 --- a/include/trace/events/afs.h +++ b/include/trace/events/afs.h @@ -233,7 +233,7 @@ enum afs_cb_break_reason { EM(afs_call_trace_get, "GET ") \ EM(afs_call_trace_put, "PUT ") \ EM(afs_call_trace_wake, "WAKE ") \ - E_(afs_call_trace_work, "WORK ") + E_(afs_call_trace_work, "QUEUE") #define afs_server_traces \ EM(afs_server_trace_alloc, "ALLOC ") \ From dde9f095583b3f375ba23979045ee10dfcebec2f Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 13 Mar 2020 13:46:08 +0000 Subject: [PATCH 032/138] afs: Fix handling of an abort from a service handler When an AFS service handler function aborts a call, AF_RXRPC marks the call as complete - which means that it's not going to get any more packets from the receiver. This is a problem because reception of the final ACK is what triggers afs_deliver_to_call() to drop the final ref on the afs_call object. Instead, aborted AFS service calls may then just sit around waiting for ever or until they're displaced by a new call on the same connection channel or a connection-level abort. Fix this by calling afs_set_call_complete() to finalise the afs_call struct representing the call. However, we then need to drop the ref that stops the call from being deallocated. We can do this in afs_set_call_complete(), as the work queue is holding a separate ref of its own, but then we shouldn't do it in afs_process_async_call() and afs_delete_async_call(). call->drop_ref is set to indicate that a ref needs dropping for a call and this is dealt with when we transition a call to AFS_CALL_COMPLETE. But then we also need to get rid of the ref that pins an asynchronous client call. We can do this by the same mechanism, setting call->drop_ref for an async client call too. We can also get rid of call->incoming since nothing ever sets it and only one thing ever checks it (futilely). A trace of the rxrpc_call and afs_call struct ref counting looks like: -0 [001] ..s5 164.764892: rxrpc_call: c=00000002 SEE u=3 sp=rxrpc_new_incoming_call+0x473/0xb34 a=00000000442095b5 -0 [001] .Ns5 164.766001: rxrpc_call: c=00000002 QUE u=4 sp=rxrpc_propose_ACK+0xbe/0x551 a=00000000442095b5 -0 [001] .Ns4 164.766005: rxrpc_call: c=00000002 PUT u=3 sp=rxrpc_new_incoming_call+0xa3f/0xb34 a=00000000442095b5 -0 [001] .Ns7 164.766433: afs_call: c=00000002 WAKE u=2 o=11 sp=rxrpc_notify_socket+0x196/0x33c kworker/1:2-1810 [001] ...1 164.768409: rxrpc_call: c=00000002 SEE u=3 sp=rxrpc_process_call+0x25/0x7ae a=00000000442095b5 kworker/1:2-1810 [001] ...1 164.769439: rxrpc_tx_packet: c=00000002 e9f1a7a8:95786a88:00000008:09c5 00000001 00000000 02 22 ACK CallAck kworker/1:2-1810 [001] ...1 164.769459: rxrpc_call: c=00000002 PUT u=2 sp=rxrpc_process_call+0x74f/0x7ae a=00000000442095b5 kworker/1:2-1810 [001] ...1 164.770794: afs_call: c=00000002 QUEUE u=3 o=12 sp=afs_deliver_to_call+0x449/0x72c kworker/1:2-1810 [001] ...1 164.770829: afs_call: c=00000002 PUT u=2 o=12 sp=afs_process_async_call+0xdb/0x11e kworker/1:2-1810 [001] ...2 164.771084: rxrpc_abort: c=00000002 95786a88:00000008 s=0 a=1 e=1 K-1 kworker/1:2-1810 [001] ...1 164.771461: rxrpc_tx_packet: c=00000002 e9f1a7a8:95786a88:00000008:09c5 00000002 00000000 04 00 ABORT CallAbort kworker/1:2-1810 [001] ...1 164.771466: afs_call: c=00000002 PUT u=1 o=12 sp=SRXAFSCB_ProbeUuid+0xc1/0x106 The abort generated in SRXAFSCB_ProbeUuid(), labelled "K-1", indicates that the local filesystem/cache manager didn't recognise the UUID as its own. Fixes: 2067b2b3f484 ("afs: Fix the CB.ProbeUuid service handler to reply correctly") Signed-off-by: David Howells --- fs/afs/cmservice.c | 14 ++++++++++++-- fs/afs/internal.h | 12 ++++++++++-- fs/afs/rxrpc.c | 33 ++++----------------------------- 3 files changed, 26 insertions(+), 33 deletions(-) diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c index ff3994a6be23..6765949b3aab 100644 --- a/fs/afs/cmservice.c +++ b/fs/afs/cmservice.c @@ -243,6 +243,17 @@ static void afs_cm_destructor(struct afs_call *call) call->buffer = NULL; } +/* + * Abort a service call from within an action function. + */ +static void afs_abort_service_call(struct afs_call *call, u32 abort_code, int error, + const char *why) +{ + rxrpc_kernel_abort_call(call->net->socket, call->rxcall, + abort_code, error, why); + afs_set_call_complete(call, error, 0); +} + /* * The server supplied a list of callbacks that it wanted to break. */ @@ -510,8 +521,7 @@ static void SRXAFSCB_ProbeUuid(struct work_struct *work) if (memcmp(r, &call->net->uuid, sizeof(call->net->uuid)) == 0) afs_send_empty_reply(call); else - rxrpc_kernel_abort_call(call->net->socket, call->rxcall, - 1, 1, "K-1"); + afs_abort_service_call(call, 1, 1, "K-1"); afs_put_call(call); _leave(""); diff --git a/fs/afs/internal.h b/fs/afs/internal.h index 1d81fc4c3058..52de2112e1b1 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -154,7 +154,7 @@ struct afs_call { }; unsigned char unmarshall; /* unmarshalling phase */ unsigned char addr_ix; /* Address in ->alist */ - bool incoming; /* T if incoming call */ + bool drop_ref; /* T if need to drop ref for incoming call */ bool send_pages; /* T if data from mapping should be sent */ bool need_attention; /* T if RxRPC poked us */ bool async; /* T if asynchronous */ @@ -1209,8 +1209,16 @@ static inline void afs_set_call_complete(struct afs_call *call, ok = true; } spin_unlock_bh(&call->state_lock); - if (ok) + if (ok) { trace_afs_call_done(call); + + /* Asynchronous calls have two refs to release - one from the alloc and + * one queued with the work item - and we can't just deallocate the + * call because the work item may be queued again. + */ + if (call->drop_ref) + afs_put_call(call); + } } /* diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index 907d5948564a..972e3aafa361 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c @@ -18,7 +18,6 @@ struct workqueue_struct *afs_async_calls; static void afs_wake_up_call_waiter(struct sock *, struct rxrpc_call *, unsigned long); static void afs_wake_up_async_call(struct sock *, struct rxrpc_call *, unsigned long); -static void afs_delete_async_call(struct work_struct *); static void afs_process_async_call(struct work_struct *); static void afs_rx_new_call(struct sock *, struct rxrpc_call *, unsigned long); static void afs_rx_discard_new_call(struct rxrpc_call *, unsigned long); @@ -402,8 +401,10 @@ void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp) /* If the call is going to be asynchronous, we need an extra ref for * the call to hold itself so the caller need not hang on to its ref. */ - if (call->async) + if (call->async) { afs_get_call(call, afs_call_trace_get); + call->drop_ref = true; + } /* create a call */ rxcall = rxrpc_kernel_begin_call(call->net->socket, srx, call->key, @@ -585,8 +586,6 @@ static void afs_deliver_to_call(struct afs_call *call) done: if (call->type->done) call->type->done(call); - if (state == AFS_CALL_COMPLETE && call->incoming) - afs_put_call(call); out: _leave(""); return; @@ -745,21 +744,6 @@ static void afs_wake_up_async_call(struct sock *sk, struct rxrpc_call *rxcall, } } -/* - * Delete an asynchronous call. The work item carries a ref to the call struct - * that we need to release. - */ -static void afs_delete_async_call(struct work_struct *work) -{ - struct afs_call *call = container_of(work, struct afs_call, async_work); - - _enter(""); - - afs_put_call(call); - - _leave(""); -} - /* * Perform I/O processing on an asynchronous call. The work item carries a ref * to the call struct that we either need to release or to pass on. @@ -775,16 +759,6 @@ static void afs_process_async_call(struct work_struct *work) afs_deliver_to_call(call); } - if (call->state == AFS_CALL_COMPLETE) { - /* We have two refs to release - one from the alloc and one - * queued with the work item - and we can't just deallocate the - * call because the work item may be queued again. - */ - call->async_work.func = afs_delete_async_call; - if (!queue_work(afs_async_calls, &call->async_work)) - afs_put_call(call); - } - afs_put_call(call); _leave(""); } @@ -811,6 +785,7 @@ void afs_charge_preallocation(struct work_struct *work) if (!call) break; + call->drop_ref = true; call->async = true; call->state = AFS_CALL_SV_AWAIT_OP_ID; init_waitqueue_head(&call->waitq); From 7d7587db0d7fd1138f2afcffdc46a8e15630b944 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 12 Mar 2020 21:40:06 +0000 Subject: [PATCH 033/138] afs: Fix client call Rx-phase signal handling Fix the handling of signals in client rxrpc calls made by the afs filesystem. Ignore signals completely, leaving call abandonment or connection loss to be detected by timeouts inside AF_RXRPC. Allowing a filesystem call to be interrupted after the entire request has been transmitted and an abort sent means that the server may or may not have done the action - and we don't know. It may even be worse than that for older servers. Fixes: bc5e3a546d55 ("rxrpc: Use MSG_WAITALL to tell sendmsg() to temporarily ignore signals") Signed-off-by: David Howells --- fs/afs/rxrpc.c | 34 ++-------------------------------- include/net/af_rxrpc.h | 4 +--- net/rxrpc/af_rxrpc.c | 33 +++------------------------------ net/rxrpc/ar-internal.h | 1 - net/rxrpc/input.c | 1 - 5 files changed, 6 insertions(+), 67 deletions(-) diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index 972e3aafa361..1ecc67da6c1a 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c @@ -604,11 +604,7 @@ call_complete: long afs_wait_for_call_to_complete(struct afs_call *call, struct afs_addr_cursor *ac) { - signed long rtt2, timeout; long ret; - bool stalled = false; - u64 rtt; - u32 life, last_life; bool rxrpc_complete = false; DECLARE_WAITQUEUE(myself, current); @@ -619,14 +615,6 @@ long afs_wait_for_call_to_complete(struct afs_call *call, if (ret < 0) goto out; - rtt = rxrpc_kernel_get_rtt(call->net->socket, call->rxcall); - rtt2 = nsecs_to_jiffies64(rtt) * 2; - if (rtt2 < 2) - rtt2 = 2; - - timeout = rtt2; - rxrpc_kernel_check_life(call->net->socket, call->rxcall, &last_life); - add_wait_queue(&call->waitq, &myself); for (;;) { set_current_state(TASK_UNINTERRUPTIBLE); @@ -637,37 +625,19 @@ long afs_wait_for_call_to_complete(struct afs_call *call, call->need_attention = false; __set_current_state(TASK_RUNNING); afs_deliver_to_call(call); - timeout = rtt2; continue; } if (afs_check_call_state(call, AFS_CALL_COMPLETE)) break; - if (!rxrpc_kernel_check_life(call->net->socket, call->rxcall, &life)) { + if (!rxrpc_kernel_check_life(call->net->socket, call->rxcall)) { /* rxrpc terminated the call. */ rxrpc_complete = true; break; } - if (call->intr && timeout == 0 && - life == last_life && signal_pending(current)) { - if (stalled) - break; - __set_current_state(TASK_RUNNING); - rxrpc_kernel_probe_life(call->net->socket, call->rxcall); - timeout = rtt2; - stalled = true; - continue; - } - - if (life != last_life) { - timeout = rtt2; - last_life = life; - stalled = false; - } - - timeout = schedule_timeout(timeout); + schedule(); } remove_wait_queue(&call->waitq, &myself); diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h index 8e547b4d88c8..04e97bab6f28 100644 --- a/include/net/af_rxrpc.h +++ b/include/net/af_rxrpc.h @@ -64,9 +64,7 @@ int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t, rxrpc_user_attach_call_t, unsigned long, gfp_t, unsigned int); void rxrpc_kernel_set_tx_length(struct socket *, struct rxrpc_call *, s64); -bool rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *, - u32 *); -void rxrpc_kernel_probe_life(struct socket *, struct rxrpc_call *); +bool rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *); u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *); bool rxrpc_kernel_get_reply_time(struct socket *, struct rxrpc_call *, ktime_t *); diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index 7603cf811f75..15ee92d79581 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c @@ -371,44 +371,17 @@ EXPORT_SYMBOL(rxrpc_kernel_end_call); * rxrpc_kernel_check_life - Check to see whether a call is still alive * @sock: The socket the call is on * @call: The call to check - * @_life: Where to store the life value * - * Allow a kernel service to find out whether a call is still alive - ie. we're - * getting ACKs from the server. Passes back in *_life a number representing - * the life state which can be compared to that returned by a previous call and - * return true if the call is still alive. - * - * If the life state stalls, rxrpc_kernel_probe_life() should be called and - * then 2RTT waited. + * Allow a kernel service to find out whether a call is still alive - + * ie. whether it has completed. */ bool rxrpc_kernel_check_life(const struct socket *sock, - const struct rxrpc_call *call, - u32 *_life) + const struct rxrpc_call *call) { - *_life = call->acks_latest; return call->state != RXRPC_CALL_COMPLETE; } EXPORT_SYMBOL(rxrpc_kernel_check_life); -/** - * rxrpc_kernel_probe_life - Poke the peer to see if it's still alive - * @sock: The socket the call is on - * @call: The call to check - * - * In conjunction with rxrpc_kernel_check_life(), allow a kernel service to - * find out whether a call is still alive by pinging it. This should cause the - * life state to be bumped in about 2*RTT. - * - * The must be called in TASK_RUNNING state on pain of might_sleep() objecting. - */ -void rxrpc_kernel_probe_life(struct socket *sock, struct rxrpc_call *call) -{ - rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, false, - rxrpc_propose_ack_ping_for_check_life); - rxrpc_send_ack_packet(call, true, NULL); -} -EXPORT_SYMBOL(rxrpc_kernel_probe_life); - /** * rxrpc_kernel_get_epoch - Retrieve the epoch value from a call. * @sock: The socket the call is on diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 1f72f43b082d..3eb1ab40ca5c 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -675,7 +675,6 @@ struct rxrpc_call { /* transmission-phase ACK management */ ktime_t acks_latest_ts; /* Timestamp of latest ACK received */ - rxrpc_serial_t acks_latest; /* serial number of latest ACK received */ rxrpc_seq_t acks_lowest_nak; /* Lowest NACK in the buffer (or ==tx_hard_ack) */ rxrpc_seq_t acks_lost_top; /* tx_top at the time lost-ack ping sent */ rxrpc_serial_t acks_lost_ping; /* Serial number of probe ACK */ diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index ef10fbf71b15..69e09d69c896 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c @@ -882,7 +882,6 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb) before(prev_pkt, call->ackr_prev_seq)) goto out; call->acks_latest_ts = skb->tstamp; - call->acks_latest = sp->hdr.serial; call->ackr_first_seq = first_soft_ack; call->ackr_prev_seq = prev_pkt; From b1be2e8cd290f620777bfdb8aa00890cd2fa02b5 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Wed, 11 Mar 2020 22:42:27 -0700 Subject: [PATCH 034/138] net_sched: hold rtnl lock in tcindex_partial_destroy_work() syzbot reported a use-after-free in tcindex_dump(). This is due to the lack of RTNL in the deferred rcu work. We queue this work with RTNL in tcindex_change(), later, tcindex_dump() is called: fh = tp->ops->get(tp, t->tcm_handle); ... err = tp->ops->change(..., &fh, ...); tfilter_notify(..., fh, ...); but there is nothing to serialize the pending tcindex_partial_destroy_work() with tcindex_dump(). Fix this by simply holding RTNL in tcindex_partial_destroy_work(), so that it won't be called until RTNL is released after tc_new_tfilter() is completed. Reported-and-tested-by: syzbot+653090db2562495901dc@syzkaller.appspotmail.com Fixes: 3d210534cc93 ("net_sched: fix a race condition in tcindex_destroy()") Cc: Jamal Hadi Salim Cc: Jiri Pirko Signed-off-by: Cong Wang Signed-off-by: David S. Miller --- net/sched/cls_tcindex.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c index 09b7dc5fe7e0..f2cb24b6f0cf 100644 --- a/net/sched/cls_tcindex.c +++ b/net/sched/cls_tcindex.c @@ -261,8 +261,10 @@ static void tcindex_partial_destroy_work(struct work_struct *work) struct tcindex_data, rwork); + rtnl_lock(); kfree(p->perfect); kfree(p); + rtnl_unlock(); } static void tcindex_free_perfect_hash(struct tcindex_data *cp) From 0d1c3530e1bd38382edef72591b78e877e0edcd3 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Wed, 11 Mar 2020 22:42:28 -0700 Subject: [PATCH 035/138] net_sched: keep alloc_hash updated after hash allocation In commit 599be01ee567 ("net_sched: fix an OOB access in cls_tcindex") I moved cp->hash calculation before the first tcindex_alloc_perfect_hash(), but cp->alloc_hash is left untouched. This difference could lead to another out of bound access. cp->alloc_hash should always be the size allocated, we should update it after this tcindex_alloc_perfect_hash(). Reported-and-tested-by: syzbot+dcc34d54d68ef7d2d53d@syzkaller.appspotmail.com Reported-and-tested-by: syzbot+c72da7b9ed57cde6fca2@syzkaller.appspotmail.com Fixes: 599be01ee567 ("net_sched: fix an OOB access in cls_tcindex") Cc: Jamal Hadi Salim Cc: Jiri Pirko Signed-off-by: Cong Wang Signed-off-by: David S. Miller --- net/sched/cls_tcindex.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c index f2cb24b6f0cf..9904299424a1 100644 --- a/net/sched/cls_tcindex.c +++ b/net/sched/cls_tcindex.c @@ -359,6 +359,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, if (tcindex_alloc_perfect_hash(net, cp) < 0) goto errout; + cp->alloc_hash = cp->hash; for (i = 0; i < min(cp->hash, p->hash); i++) cp->perfect[i].res = p->perfect[i].res; balloc = 1; From 13d0f7b814d9b4c67e60d8c2820c86ea181e7d99 Mon Sep 17 00:00:00 2001 From: Bruno Meneguele Date: Thu, 12 Mar 2020 20:08:20 -0300 Subject: [PATCH 036/138] net/bpfilter: fix dprintf usage for /dev/kmsg The bpfilter UMH code was recently changed to log its informative messages to /dev/kmsg, however this interface doesn't support SEEK_CUR yet, used by dprintf(). As result dprintf() returns -EINVAL and doesn't log anything. However there already had some discussions about supporting SEEK_CUR into /dev/kmsg interface in the past it wasn't concluded. Since the only user of that from userspace perspective inside the kernel is the bpfilter UMH (userspace) module it's better to correct it here instead waiting a conclusion on the interface. Fixes: 36c4357c63f3 ("net: bpfilter: print umh messages to /dev/kmsg") Signed-off-by: Bruno Meneguele Signed-off-by: David S. Miller --- net/bpfilter/main.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/net/bpfilter/main.c b/net/bpfilter/main.c index 77396a098fbe..efea4874743e 100644 --- a/net/bpfilter/main.c +++ b/net/bpfilter/main.c @@ -10,7 +10,7 @@ #include #include "msgfmt.h" -int debug_fd; +FILE *debug_f; static int handle_get_cmd(struct mbox_request *cmd) { @@ -35,9 +35,10 @@ static void loop(void) struct mbox_reply reply; int n; + fprintf(debug_f, "testing the buffer\n"); n = read(0, &req, sizeof(req)); if (n != sizeof(req)) { - dprintf(debug_fd, "invalid request %d\n", n); + fprintf(debug_f, "invalid request %d\n", n); return; } @@ -47,7 +48,7 @@ static void loop(void) n = write(1, &reply, sizeof(reply)); if (n != sizeof(reply)) { - dprintf(debug_fd, "reply failed %d\n", n); + fprintf(debug_f, "reply failed %d\n", n); return; } } @@ -55,9 +56,10 @@ static void loop(void) int main(void) { - debug_fd = open("/dev/kmsg", 00000002); - dprintf(debug_fd, "Started bpfilter\n"); + debug_f = fopen("/dev/kmsg", "w"); + setvbuf(debug_f, 0, _IOLBF, 0); + fprintf(debug_f, "Started bpfilter\n"); loop(); - close(debug_fd); + fclose(debug_f); return 0; } From fc191af1bb0d069dc7e981076e8b80af21f1e61d Mon Sep 17 00:00:00 2001 From: Markus Fuchs Date: Fri, 6 Mar 2020 17:38:48 +0100 Subject: [PATCH 037/138] net: stmmac: platform: Fix misleading interrupt error msg Not every stmmac based platform makes use of the eth_wake_irq or eth_lpi interrupts. Use the platform_get_irq_byname_optional variant for these interrupts, so no error message is displayed, if they can't be found. Rather print an information to hint something might be wrong to assist debugging on platforms which use these interrupts. Signed-off-by: Markus Fuchs Signed-off-by: David S. Miller --- .../net/ethernet/stmicro/stmmac/stmmac_platform.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index d10ac54bf385..13fafd905db8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -663,16 +663,22 @@ int stmmac_get_platform_resources(struct platform_device *pdev, * In case the wake up interrupt is not passed from the platform * so the driver will continue to use the mac irq (ndev->irq) */ - stmmac_res->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq"); + stmmac_res->wol_irq = + platform_get_irq_byname_optional(pdev, "eth_wake_irq"); if (stmmac_res->wol_irq < 0) { if (stmmac_res->wol_irq == -EPROBE_DEFER) return -EPROBE_DEFER; + dev_info(&pdev->dev, "IRQ eth_wake_irq not found\n"); stmmac_res->wol_irq = stmmac_res->irq; } - stmmac_res->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi"); - if (stmmac_res->lpi_irq == -EPROBE_DEFER) - return -EPROBE_DEFER; + stmmac_res->lpi_irq = + platform_get_irq_byname_optional(pdev, "eth_lpi"); + if (stmmac_res->lpi_irq < 0) { + if (stmmac_res->lpi_irq == -EPROBE_DEFER) + return -EPROBE_DEFER; + dev_info(&pdev->dev, "IRQ eth_lpi not found\n"); + } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); stmmac_res->addr = devm_ioremap_resource(&pdev->dev, res); From 46ea929b2b3f66e6a9bc91adbb9ca2157065f9b2 Mon Sep 17 00:00:00 2001 From: Shahjada Abul Husain Date: Fri, 13 Mar 2020 14:32:57 +0530 Subject: [PATCH 038/138] cxgb4: fix delete filter entry fail in unload path Currently, the hardware TID index is assumed to start from index 0. However, with the following changeset, commit c21939998802 ("cxgb4: add support for high priority filters") hardware TID index can start after the high priority region, which has introduced a regression resulting in remove filters entry failure for cxgb4 unload path. This patch fix that. Fixes: c21939998802 ("cxgb4: add support for high priority filters") Signed-off-by: Shahjada Abul Husain Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c index 2a2938bbb93a..fc05248984fc 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c @@ -902,7 +902,7 @@ void clear_all_filters(struct adapter *adapter) adapter->tids.tid_tab[i]; if (f && (f->valid || f->pending)) - cxgb4_del_filter(dev, i, &f->fs); + cxgb4_del_filter(dev, f->tid, &f->fs); } sb = t4_read_reg(adapter, LE_DB_SRVR_START_INDEX_A); @@ -910,7 +910,7 @@ void clear_all_filters(struct adapter *adapter) f = (struct filter_entry *)adapter->tids.tid_tab[i]; if (f && (f->valid || f->pending)) - cxgb4_del_filter(dev, i, &f->fs); + cxgb4_del_filter(dev, f->tid, &f->fs); } } } From e1f8f78ffe9854308b9e12a73ebe4e909074fc33 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Fri, 13 Mar 2020 13:39:36 +0200 Subject: [PATCH 039/138] net: ip_gre: Separate ERSPAN newlink / changelink callbacks ERSPAN shares most of the code path with GRE and gretap code. While that helps keep the code compact, it is also error prone. Currently a broken userspace can turn a gretap tunnel into a de facto ERSPAN one by passing IFLA_GRE_ERSPAN_VER. There has been a similar issue in ip6gretap in the past. To prevent these problems in future, split the newlink and changelink code paths. Split the ERSPAN code out of ipgre_netlink_parms() into a new function erspan_netlink_parms(). Extract a piece of common logic from ipgre_newlink() and ipgre_changelink() into ipgre_newlink_encap_setup(). Add erspan_newlink() and erspan_changelink(). Fixes: 84e54fe0a5ea ("gre: introduce native tunnel support for ERSPAN") Signed-off-by: Petr Machata Signed-off-by: David S. Miller --- net/ipv4/ip_gre.c | 103 ++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 85 insertions(+), 18 deletions(-) diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 8274f98c511c..7765c65fc7d2 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -1153,6 +1153,22 @@ static int ipgre_netlink_parms(struct net_device *dev, if (data[IFLA_GRE_FWMARK]) *fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]); + return 0; +} + +static int erspan_netlink_parms(struct net_device *dev, + struct nlattr *data[], + struct nlattr *tb[], + struct ip_tunnel_parm *parms, + __u32 *fwmark) +{ + struct ip_tunnel *t = netdev_priv(dev); + int err; + + err = ipgre_netlink_parms(dev, data, tb, parms, fwmark); + if (err) + return err; + if (data[IFLA_GRE_ERSPAN_VER]) { t->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]); @@ -1276,45 +1292,70 @@ static void ipgre_tap_setup(struct net_device *dev) ip_tunnel_setup(dev, gre_tap_net_id); } -static int ipgre_newlink(struct net *src_net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[], - struct netlink_ext_ack *extack) +static int +ipgre_newlink_encap_setup(struct net_device *dev, struct nlattr *data[]) { - struct ip_tunnel_parm p; struct ip_tunnel_encap ipencap; - __u32 fwmark = 0; - int err; if (ipgre_netlink_encap_parms(data, &ipencap)) { struct ip_tunnel *t = netdev_priv(dev); - err = ip_tunnel_encap_setup(t, &ipencap); + int err = ip_tunnel_encap_setup(t, &ipencap); if (err < 0) return err; } + return 0; +} + +static int ipgre_newlink(struct net *src_net, struct net_device *dev, + struct nlattr *tb[], struct nlattr *data[], + struct netlink_ext_ack *extack) +{ + struct ip_tunnel_parm p; + __u32 fwmark = 0; + int err; + + err = ipgre_newlink_encap_setup(dev, data); + if (err) + return err; + err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark); if (err < 0) return err; return ip_tunnel_newlink(dev, tb, &p, fwmark); } +static int erspan_newlink(struct net *src_net, struct net_device *dev, + struct nlattr *tb[], struct nlattr *data[], + struct netlink_ext_ack *extack) +{ + struct ip_tunnel_parm p; + __u32 fwmark = 0; + int err; + + err = ipgre_newlink_encap_setup(dev, data); + if (err) + return err; + + err = erspan_netlink_parms(dev, data, tb, &p, &fwmark); + if (err) + return err; + return ip_tunnel_newlink(dev, tb, &p, fwmark); +} + static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { struct ip_tunnel *t = netdev_priv(dev); - struct ip_tunnel_encap ipencap; __u32 fwmark = t->fwmark; struct ip_tunnel_parm p; int err; - if (ipgre_netlink_encap_parms(data, &ipencap)) { - err = ip_tunnel_encap_setup(t, &ipencap); - - if (err < 0) - return err; - } + err = ipgre_newlink_encap_setup(dev, data); + if (err) + return err; err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark); if (err < 0) @@ -1327,8 +1368,34 @@ static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[], t->parms.i_flags = p.i_flags; t->parms.o_flags = p.o_flags; - if (strcmp(dev->rtnl_link_ops->kind, "erspan")) - ipgre_link_update(dev, !tb[IFLA_MTU]); + ipgre_link_update(dev, !tb[IFLA_MTU]); + + return 0; +} + +static int erspan_changelink(struct net_device *dev, struct nlattr *tb[], + struct nlattr *data[], + struct netlink_ext_ack *extack) +{ + struct ip_tunnel *t = netdev_priv(dev); + __u32 fwmark = t->fwmark; + struct ip_tunnel_parm p; + int err; + + err = ipgre_newlink_encap_setup(dev, data); + if (err) + return err; + + err = erspan_netlink_parms(dev, data, tb, &p, &fwmark); + if (err < 0) + return err; + + err = ip_tunnel_changelink(dev, tb, &p, fwmark); + if (err < 0) + return err; + + t->parms.i_flags = p.i_flags; + t->parms.o_flags = p.o_flags; return 0; } @@ -1519,8 +1586,8 @@ static struct rtnl_link_ops erspan_link_ops __read_mostly = { .priv_size = sizeof(struct ip_tunnel), .setup = erspan_setup, .validate = erspan_validate, - .newlink = ipgre_newlink, - .changelink = ipgre_changelink, + .newlink = erspan_newlink, + .changelink = erspan_changelink, .dellink = ip_tunnel_dellink, .get_size = ipgre_get_size, .fill_info = ipgre_fill_info, From 61fad6816fc10fb8793a925d5c1256d1c3db0cd2 Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Fri, 13 Mar 2020 12:18:09 -0400 Subject: [PATCH 040/138] net/packet: tpacket_rcv: avoid a producer race condition PACKET_RX_RING can cause multiple writers to access the same slot if a fast writer wraps the ring while a slow writer is still copying. This is particularly likely with few, large, slots (e.g., GSO packets). Synchronize kernel thread ownership of rx ring slots with a bitmap. Writers acquire a slot race-free by testing tp_status TP_STATUS_KERNEL while holding the sk receive queue lock. They release this lock before copying and set tp_status to TP_STATUS_USER to release to userspace when done. During copying, another writer may take the lock, also see TP_STATUS_KERNEL, and start writing to the same slot. Introduce a new rx_owner_map bitmap with a bit per slot. To acquire a slot, test and set with the lock held. To release race-free, update tp_status and owner bit as a transaction, so take the lock again. This is the one of a variety of discussed options (see Link below): * instead of a shadow ring, embed the data in the slot itself, such as in tp_padding. But any test for this field may match a value left by userspace, causing deadlock. * avoid the lock on release. This leaves a small race if releasing the shadow slot before setting TP_STATUS_USER. The below reproducer showed that this race is not academic. If releasing the slot after tp_status, the race is more subtle. See the first link for details. * add a new tp_status TP_KERNEL_OWNED to avoid the transactional store of two fields. But, legacy applications may interpret all non-zero tp_status as owned by the user. As libpcap does. So this is possible only opt-in by newer processes. It can be added as an optional mode. * embed the struct at the tail of pg_vec to avoid extra allocation. The implementation proved no less complex than a separate field. The additional locking cost on release adds contention, no different than scaling on multicore or multiqueue h/w. In practice, below reproducer nor small packet tcpdump showed a noticeable change in perf report in cycles spent in spinlock. Where contention is problematic, packet sockets support mitigation through PACKET_FANOUT. And we can consider adding opt-in state TP_KERNEL_OWNED. Easy to reproduce by running multiple netperf or similar TCP_STREAM flows concurrently with `tcpdump -B 129 -n greater 60000`. Based on an earlier patchset by Jon Rosen. See links below. I believe this issue goes back to the introduction of tpacket_rcv, which predates git history. Link: https://www.mail-archive.com/netdev@vger.kernel.org/msg237222.html Suggested-by: Jon Rosen Signed-off-by: Willem de Bruijn Signed-off-by: Jon Rosen Signed-off-by: David S. Miller --- net/packet/af_packet.c | 21 +++++++++++++++++++++ net/packet/internal.h | 5 ++++- 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index e5b0986215d2..29bd405adbbd 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -2173,6 +2173,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct timespec64 ts; __u32 ts_status; bool is_drop_n_account = false; + unsigned int slot_id = 0; bool do_vnet = false; /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT. @@ -2275,6 +2276,13 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, if (!h.raw) goto drop_n_account; + if (po->tp_version <= TPACKET_V2) { + slot_id = po->rx_ring.head; + if (test_bit(slot_id, po->rx_ring.rx_owner_map)) + goto drop_n_account; + __set_bit(slot_id, po->rx_ring.rx_owner_map); + } + if (do_vnet && virtio_net_hdr_from_skb(skb, h.raw + macoff - sizeof(struct virtio_net_hdr), @@ -2380,7 +2388,10 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, #endif if (po->tp_version <= TPACKET_V2) { + spin_lock(&sk->sk_receive_queue.lock); __packet_set_status(po, h.raw, status); + __clear_bit(slot_id, po->rx_ring.rx_owner_map); + spin_unlock(&sk->sk_receive_queue.lock); sk->sk_data_ready(sk); } else { prb_clear_blk_fill_status(&po->rx_ring); @@ -4277,6 +4288,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, { struct pgv *pg_vec = NULL; struct packet_sock *po = pkt_sk(sk); + unsigned long *rx_owner_map = NULL; int was_running, order = 0; struct packet_ring_buffer *rb; struct sk_buff_head *rb_queue; @@ -4362,6 +4374,12 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, } break; default: + if (!tx_ring) { + rx_owner_map = bitmap_alloc(req->tp_frame_nr, + GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO); + if (!rx_owner_map) + goto out_free_pg_vec; + } break; } } @@ -4391,6 +4409,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, err = 0; spin_lock_bh(&rb_queue->lock); swap(rb->pg_vec, pg_vec); + if (po->tp_version <= TPACKET_V2) + swap(rb->rx_owner_map, rx_owner_map); rb->frame_max = (req->tp_frame_nr - 1); rb->head = 0; rb->frame_size = req->tp_frame_size; @@ -4422,6 +4442,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, } out_free_pg_vec: + bitmap_free(rx_owner_map); if (pg_vec) free_pg_vec(pg_vec, order, req->tp_block_nr); out: diff --git a/net/packet/internal.h b/net/packet/internal.h index 82fb2b10f790..907f4cd2a718 100644 --- a/net/packet/internal.h +++ b/net/packet/internal.h @@ -70,7 +70,10 @@ struct packet_ring_buffer { unsigned int __percpu *pending_refcnt; - struct tpacket_kbdq_core prb_bdqc; + union { + unsigned long *rx_owner_map; + struct tpacket_kbdq_core prb_bdqc; + }; }; extern struct mutex fanout_mutex; From 0fda7600c2e174fe27e9cf02e78e345226e441fa Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Sat, 14 Mar 2020 08:18:42 +0100 Subject: [PATCH 041/138] geneve: move debug check after netdev unregister The debug check must be done after unregister_netdevice_many() call -- the list_del() for this is done inside .ndo_stop. Fixes: 2843a25348f8 ("geneve: speedup geneve tunnels dismantle") Reported-and-tested-by: Cc: Haishuang Yan Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- drivers/net/geneve.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 75757e9954ba..09f279c0182b 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -1845,8 +1845,6 @@ static void geneve_destroy_tunnels(struct net *net, struct list_head *head) if (!net_eq(dev_net(geneve->dev), net)) unregister_netdevice_queue(geneve->dev, head); } - - WARN_ON_ONCE(!list_empty(&gn->sock_list)); } static void __net_exit geneve_exit_batch_net(struct list_head *net_list) @@ -1861,6 +1859,12 @@ static void __net_exit geneve_exit_batch_net(struct list_head *net_list) /* unregister the devices gathered above */ unregister_netdevice_many(&list); rtnl_unlock(); + + list_for_each_entry(net, net_list, exit_list) { + const struct geneve_net *gn = net_generic(net, geneve_net_id); + + WARN_ON_ONCE(!list_empty(&gn->sock_list)); + } } static struct pernet_operations geneve_net_ops = { From cb851c01b51bb610a9093d01624565ce1d4e38fa Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Sun, 15 Mar 2020 10:07:35 +0200 Subject: [PATCH 042/138] mlxsw: reg: Increase register field length to 31 bits The cited commit set a value of 2^31-1 in order to "disable" the shaper on a given a port. However, the length of the maximum shaper rate field was not updated from 28 bits to 31 bits, which means ports are still limited to ~268Gbps despite supporting speeds of 400Gbps. Fix this by increasing the field's length. Fixes: 92afbfedb77d ("mlxsw: reg: Increase MLXSW_REG_QEEC_MAS_DIS") Signed-off-by: Ido Schimmel Reviewed-by: Jiri Pirko Reviewed-by: Petr Machata Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index dd6685156396..e05d1d1be2fd 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -3572,7 +3572,7 @@ MLXSW_ITEM32(reg, qeec, mase, 0x10, 31, 1); * When in bytes mode, value is specified in units of 1000bps. * Access: RW */ -MLXSW_ITEM32(reg, qeec, max_shaper_rate, 0x10, 0, 28); +MLXSW_ITEM32(reg, qeec, max_shaper_rate, 0x10, 0, 31); /* reg_qeec_de * DWRR configuration enable. Enables configuration of the dwrr and From 13bde56c5b7cc6489eb50c40449d461c7ca4da2d Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Sun, 15 Mar 2020 10:34:58 +0100 Subject: [PATCH 043/138] net: caif: Use scnprintf() for avoiding potential buffer overflow Since snprintf() returns the would-be-output size instead of the actual output size, the succeeding calls may go beyond the given buffer limit. Fix it by replacing with scnprintf(). Cc: "David S . Miller" Cc: netdev@vger.kernel.org Signed-off-by: Takashi Iwai Signed-off-by: David S. Miller --- drivers/net/caif/caif_spi.c | 68 ++++++++++++++++++------------------- 1 file changed, 34 insertions(+), 34 deletions(-) diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c index 8e81bdf98ac6..63f2548f5b1b 100644 --- a/drivers/net/caif/caif_spi.c +++ b/drivers/net/caif/caif_spi.c @@ -141,29 +141,29 @@ static ssize_t dbgfs_state(struct file *file, char __user *user_buf, return 0; /* Print out debug information. */ - len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), - "CAIF SPI debug information:\n"); + len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len), + "CAIF SPI debug information:\n"); - len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), FLAVOR); + len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len), FLAVOR); - len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), - "STATE: %d\n", cfspi->dbg_state); - len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), - "Previous CMD: 0x%x\n", cfspi->pcmd); - len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), - "Current CMD: 0x%x\n", cfspi->cmd); - len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), - "Previous TX len: %d\n", cfspi->tx_ppck_len); - len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), - "Previous RX len: %d\n", cfspi->rx_ppck_len); - len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), - "Current TX len: %d\n", cfspi->tx_cpck_len); - len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), - "Current RX len: %d\n", cfspi->rx_cpck_len); - len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), - "Next TX len: %d\n", cfspi->tx_npck_len); - len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), - "Next RX len: %d\n", cfspi->rx_npck_len); + len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len), + "STATE: %d\n", cfspi->dbg_state); + len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len), + "Previous CMD: 0x%x\n", cfspi->pcmd); + len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len), + "Current CMD: 0x%x\n", cfspi->cmd); + len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len), + "Previous TX len: %d\n", cfspi->tx_ppck_len); + len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len), + "Previous RX len: %d\n", cfspi->rx_ppck_len); + len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len), + "Current TX len: %d\n", cfspi->tx_cpck_len); + len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len), + "Current RX len: %d\n", cfspi->rx_cpck_len); + len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len), + "Next TX len: %d\n", cfspi->tx_npck_len); + len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len), + "Next RX len: %d\n", cfspi->rx_npck_len); if (len > DEBUGFS_BUF_SIZE) len = DEBUGFS_BUF_SIZE; @@ -180,23 +180,23 @@ static ssize_t print_frame(char *buf, size_t size, char *frm, int len = 0; int i; for (i = 0; i < count; i++) { - len += snprintf((buf + len), (size - len), + len += scnprintf((buf + len), (size - len), "[0x" BYTE_HEX_FMT "]", frm[i]); if ((i == cut) && (count > (cut * 2))) { /* Fast forward. */ i = count - cut; - len += snprintf((buf + len), (size - len), - "--- %zu bytes skipped ---\n", - count - (cut * 2)); + len += scnprintf((buf + len), (size - len), + "--- %zu bytes skipped ---\n", + count - (cut * 2)); } if ((!(i % 10)) && i) { - len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), - "\n"); + len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len), + "\n"); } } - len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), "\n"); + len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len), "\n"); return len; } @@ -214,18 +214,18 @@ static ssize_t dbgfs_frame(struct file *file, char __user *user_buf, return 0; /* Print out debug information. */ - len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), - "Current frame:\n"); + len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len), + "Current frame:\n"); - len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), - "Tx data (Len: %d):\n", cfspi->tx_cpck_len); + len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len), + "Tx data (Len: %d):\n", cfspi->tx_cpck_len); len += print_frame((buf + len), (DEBUGFS_BUF_SIZE - len), cfspi->xfer.va_tx[0], (cfspi->tx_cpck_len + SPI_CMD_SZ), 100); - len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), - "Rx data (Len: %d):\n", cfspi->rx_cpck_len); + len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len), + "Rx data (Len: %d):\n", cfspi->rx_cpck_len); len += print_frame((buf + len), (DEBUGFS_BUF_SIZE - len), cfspi->xfer.va_rx, From 4a348601eb9131893c22b6ed2d3b6ba2bafc2391 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Sun, 15 Mar 2020 10:34:59 +0100 Subject: [PATCH 044/138] net: mlx4: Use scnprintf() for avoiding potential buffer overflow Since snprintf() returns the would-be-output size instead of the actual output size, the succeeding calls may go beyond the given buffer limit. Fix it by replacing with scnprintf(). Cc: "David S . Miller" Cc: Tariq Toukan To: netdev@vger.kernel.org Signed-off-by: Takashi Iwai Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/mcg.c | 62 ++++++++++++------------ 1 file changed, 31 insertions(+), 31 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index 9c481823b3e8..9486caecfbdc 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -906,59 +906,59 @@ static void mlx4_err_rule(struct mlx4_dev *dev, char *str, int len = 0; mlx4_err(dev, "%s", str); - len += snprintf(buf + len, BUF_SIZE - len, - "port = %d prio = 0x%x qp = 0x%x ", - rule->port, rule->priority, rule->qpn); + len += scnprintf(buf + len, BUF_SIZE - len, + "port = %d prio = 0x%x qp = 0x%x ", + rule->port, rule->priority, rule->qpn); list_for_each_entry(cur, &rule->list, list) { switch (cur->id) { case MLX4_NET_TRANS_RULE_ID_ETH: - len += snprintf(buf + len, BUF_SIZE - len, - "dmac = %pM ", &cur->eth.dst_mac); + len += scnprintf(buf + len, BUF_SIZE - len, + "dmac = %pM ", &cur->eth.dst_mac); if (cur->eth.ether_type) - len += snprintf(buf + len, BUF_SIZE - len, - "ethertype = 0x%x ", - be16_to_cpu(cur->eth.ether_type)); + len += scnprintf(buf + len, BUF_SIZE - len, + "ethertype = 0x%x ", + be16_to_cpu(cur->eth.ether_type)); if (cur->eth.vlan_id) - len += snprintf(buf + len, BUF_SIZE - len, - "vlan-id = %d ", - be16_to_cpu(cur->eth.vlan_id)); + len += scnprintf(buf + len, BUF_SIZE - len, + "vlan-id = %d ", + be16_to_cpu(cur->eth.vlan_id)); break; case MLX4_NET_TRANS_RULE_ID_IPV4: if (cur->ipv4.src_ip) - len += snprintf(buf + len, BUF_SIZE - len, - "src-ip = %pI4 ", - &cur->ipv4.src_ip); + len += scnprintf(buf + len, BUF_SIZE - len, + "src-ip = %pI4 ", + &cur->ipv4.src_ip); if (cur->ipv4.dst_ip) - len += snprintf(buf + len, BUF_SIZE - len, - "dst-ip = %pI4 ", - &cur->ipv4.dst_ip); + len += scnprintf(buf + len, BUF_SIZE - len, + "dst-ip = %pI4 ", + &cur->ipv4.dst_ip); break; case MLX4_NET_TRANS_RULE_ID_TCP: case MLX4_NET_TRANS_RULE_ID_UDP: if (cur->tcp_udp.src_port) - len += snprintf(buf + len, BUF_SIZE - len, - "src-port = %d ", - be16_to_cpu(cur->tcp_udp.src_port)); + len += scnprintf(buf + len, BUF_SIZE - len, + "src-port = %d ", + be16_to_cpu(cur->tcp_udp.src_port)); if (cur->tcp_udp.dst_port) - len += snprintf(buf + len, BUF_SIZE - len, - "dst-port = %d ", - be16_to_cpu(cur->tcp_udp.dst_port)); + len += scnprintf(buf + len, BUF_SIZE - len, + "dst-port = %d ", + be16_to_cpu(cur->tcp_udp.dst_port)); break; case MLX4_NET_TRANS_RULE_ID_IB: - len += snprintf(buf + len, BUF_SIZE - len, - "dst-gid = %pI6\n", cur->ib.dst_gid); - len += snprintf(buf + len, BUF_SIZE - len, - "dst-gid-mask = %pI6\n", - cur->ib.dst_gid_msk); + len += scnprintf(buf + len, BUF_SIZE - len, + "dst-gid = %pI6\n", cur->ib.dst_gid); + len += scnprintf(buf + len, BUF_SIZE - len, + "dst-gid-mask = %pI6\n", + cur->ib.dst_gid_msk); break; case MLX4_NET_TRANS_RULE_ID_VXLAN: - len += snprintf(buf + len, BUF_SIZE - len, - "VNID = %d ", be32_to_cpu(cur->vxlan.vni)); + len += scnprintf(buf + len, BUF_SIZE - len, + "VNID = %d ", be32_to_cpu(cur->vxlan.vni)); break; case MLX4_NET_TRANS_RULE_ID_IPV6: break; @@ -967,7 +967,7 @@ static void mlx4_err_rule(struct mlx4_dev *dev, char *str, break; } } - len += snprintf(buf + len, BUF_SIZE - len, "\n"); + len += scnprintf(buf + len, BUF_SIZE - len, "\n"); mlx4_err(dev, "%s", buf); if (len >= BUF_SIZE) From 413ae546f8726ac0652e59fcf97561fc21f52653 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Sun, 15 Mar 2020 10:35:00 +0100 Subject: [PATCH 045/138] net: nfp: Use scnprintf() for avoiding potential buffer overflow Since snprintf() returns the would-be-output size instead of the actual output size, the succeeding calls may go beyond the given buffer limit. Fix it by replacing with scnprintf(). Reviewed-by: Simon Horman Cc: "David S . Miller" Cc: Jakub Kicinski Cc: oss-drivers@netronome.com To: netdev@vger.kernel.org Signed-off-by: Takashi Iwai Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c index b454db283aef..684e4e036c55 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c @@ -616,7 +616,7 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface) if (bar->iomem) { int pf; - msg += snprintf(msg, end - msg, "0.0: General/MSI-X SRAM, "); + msg += scnprintf(msg, end - msg, "0.0: General/MSI-X SRAM, "); atomic_inc(&bar->refcnt); bars_free--; @@ -661,7 +661,7 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface) /* Configure, and lock, BAR0.1 for PCIe XPB (MSI-X PBA) */ bar = &nfp->bar[1]; - msg += snprintf(msg, end - msg, "0.1: PCIe XPB/MSI-X PBA, "); + msg += scnprintf(msg, end - msg, "0.1: PCIe XPB/MSI-X PBA, "); atomic_inc(&bar->refcnt); bars_free--; @@ -680,8 +680,8 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface) bar->iomem = ioremap(nfp_bar_resource_start(bar), nfp_bar_resource_len(bar)); if (bar->iomem) { - msg += snprintf(msg, end - msg, - "0.%d: Explicit%d, ", 4 + i, i); + msg += scnprintf(msg, end - msg, + "0.%d: Explicit%d, ", 4 + i, i); atomic_inc(&bar->refcnt); bars_free--; From 38e0f746c456b2f7d64b37aef3b6bd9ad73508c0 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Sun, 15 Mar 2020 10:35:01 +0100 Subject: [PATCH 046/138] net: ionic: Use scnprintf() for avoiding potential buffer overflow Since snprintf() returns the would-be-output size instead of the actual output size, the succeeding calls may go beyond the given buffer limit. Fix it by replacing with scnprintf(). Reviewed-by: Simon Horman Acked-by: Shannon Nelson Cc: "David S . Miller" Cc: Jakub Kicinski Cc: oss-drivers@netronome.com Cc: netdev@vger.kernel.org Signed-off-by: Takashi Iwai Signed-off-by: David S. Miller --- drivers/net/ethernet/pensando/ionic/ionic_lif.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index c2f5b691e0fa..938e19ee0bcd 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -948,18 +948,18 @@ static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode) int i; #define REMAIN(__x) (sizeof(buf) - (__x)) - i = snprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:", - lif->rx_mode, rx_mode); + i = scnprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:", + lif->rx_mode, rx_mode); if (rx_mode & IONIC_RX_MODE_F_UNICAST) - i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST"); + i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST"); if (rx_mode & IONIC_RX_MODE_F_MULTICAST) - i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST"); + i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST"); if (rx_mode & IONIC_RX_MODE_F_BROADCAST) - i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST"); + i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST"); if (rx_mode & IONIC_RX_MODE_F_PROMISC) - i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC"); + i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC"); if (rx_mode & IONIC_RX_MODE_F_ALLMULTI) - i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI"); + i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI"); netdev_dbg(lif->netdev, "lif%d %s\n", lif->index, buf); err = ionic_adminq_post_wait(lif, &ctx); From 5e892880e14fbc1944f49aa42a67496516fe5dac Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Sun, 15 Mar 2020 10:35:02 +0100 Subject: [PATCH 047/138] net: sfc: Use scnprintf() for avoiding potential buffer overflow Since snprintf() returns the would-be-output size instead of the actual output size, the succeeding calls may go beyond the given buffer limit. Fix it by replacing with scnprintf(). Cc: "David S . Miller" Cc: Edward Cree Cc: Martin Habets Cc: Solarflare linux maintainers Cc: netdev@vger.kernel.org Signed-off-by: Takashi Iwai Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/mcdi.c | 32 ++++++++++++++++++-------------- 1 file changed, 18 insertions(+), 14 deletions(-) diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index 2713300343c7..15c731d04065 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -212,12 +212,14 @@ static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd, * progress on a NIC at any one time. So no need for locking. */ for (i = 0; i < hdr_len / 4 && bytes < PAGE_SIZE; i++) - bytes += snprintf(buf + bytes, PAGE_SIZE - bytes, - " %08x", le32_to_cpu(hdr[i].u32[0])); + bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes, + " %08x", + le32_to_cpu(hdr[i].u32[0])); for (i = 0; i < inlen / 4 && bytes < PAGE_SIZE; i++) - bytes += snprintf(buf + bytes, PAGE_SIZE - bytes, - " %08x", le32_to_cpu(inbuf[i].u32[0])); + bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes, + " %08x", + le32_to_cpu(inbuf[i].u32[0])); netif_info(efx, hw, efx->net_dev, "MCDI RPC REQ:%s\n", buf); } @@ -302,15 +304,15 @@ static void efx_mcdi_read_response_header(struct efx_nic *efx) */ for (i = 0; i < hdr_len && bytes < PAGE_SIZE; i++) { efx->type->mcdi_read_response(efx, &hdr, (i * 4), 4); - bytes += snprintf(buf + bytes, PAGE_SIZE - bytes, - " %08x", le32_to_cpu(hdr.u32[0])); + bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes, + " %08x", le32_to_cpu(hdr.u32[0])); } for (i = 0; i < data_len && bytes < PAGE_SIZE; i++) { efx->type->mcdi_read_response(efx, &hdr, mcdi->resp_hdr_len + (i * 4), 4); - bytes += snprintf(buf + bytes, PAGE_SIZE - bytes, - " %08x", le32_to_cpu(hdr.u32[0])); + bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes, + " %08x", le32_to_cpu(hdr.u32[0])); } netif_info(efx, hw, efx->net_dev, "MCDI RPC RESP:%s\n", buf); @@ -1417,9 +1419,11 @@ void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len) } ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION); - offset = snprintf(buf, len, "%u.%u.%u.%u", - le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]), - le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3])); + offset = scnprintf(buf, len, "%u.%u.%u.%u", + le16_to_cpu(ver_words[0]), + le16_to_cpu(ver_words[1]), + le16_to_cpu(ver_words[2]), + le16_to_cpu(ver_words[3])); /* EF10 may have multiple datapath firmware variants within a * single version. Report which variants are running. @@ -1427,9 +1431,9 @@ void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len) if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) { struct efx_ef10_nic_data *nic_data = efx->nic_data; - offset += snprintf(buf + offset, len - offset, " rx%x tx%x", - nic_data->rx_dpcpu_fw_id, - nic_data->tx_dpcpu_fw_id); + offset += scnprintf(buf + offset, len - offset, " rx%x tx%x", + nic_data->rx_dpcpu_fw_id, + nic_data->tx_dpcpu_fw_id); /* It's theoretically possible for the string to exceed 31 * characters, though in practice the first three version From 2da222f612b58b6e6b41972f46005b58aac1699e Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Sun, 15 Mar 2020 10:35:03 +0100 Subject: [PATCH 048/138] net: netdevsim: Use scnprintf() for avoiding potential buffer overflow Since snprintf() returns the would-be-output size instead of the actual output size, the succeeding calls may go beyond the given buffer limit. Fix it by replacing with scnprintf(). Cc: "David S . Miller" Cc: Jakub Kicinski Cc: netdev@vger.kernel.org Signed-off-by: Takashi Iwai Signed-off-by: David S. Miller --- drivers/net/netdevsim/ipsec.c | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/drivers/net/netdevsim/ipsec.c b/drivers/net/netdevsim/ipsec.c index e27fc1a4516d..3811f1bde84e 100644 --- a/drivers/net/netdevsim/ipsec.c +++ b/drivers/net/netdevsim/ipsec.c @@ -29,9 +29,9 @@ static ssize_t nsim_dbg_netdev_ops_read(struct file *filp, return -ENOMEM; p = buf; - p += snprintf(p, bufsize - (p - buf), - "SA count=%u tx=%u\n", - ipsec->count, ipsec->tx); + p += scnprintf(p, bufsize - (p - buf), + "SA count=%u tx=%u\n", + ipsec->count, ipsec->tx); for (i = 0; i < NSIM_IPSEC_MAX_SA_COUNT; i++) { struct nsim_sa *sap = &ipsec->sa[i]; @@ -39,18 +39,18 @@ static ssize_t nsim_dbg_netdev_ops_read(struct file *filp, if (!sap->used) continue; - p += snprintf(p, bufsize - (p - buf), - "sa[%i] %cx ipaddr=0x%08x %08x %08x %08x\n", - i, (sap->rx ? 'r' : 't'), sap->ipaddr[0], - sap->ipaddr[1], sap->ipaddr[2], sap->ipaddr[3]); - p += snprintf(p, bufsize - (p - buf), - "sa[%i] spi=0x%08x proto=0x%x salt=0x%08x crypt=%d\n", - i, be32_to_cpu(sap->xs->id.spi), - sap->xs->id.proto, sap->salt, sap->crypt); - p += snprintf(p, bufsize - (p - buf), - "sa[%i] key=0x%08x %08x %08x %08x\n", - i, sap->key[0], sap->key[1], - sap->key[2], sap->key[3]); + p += scnprintf(p, bufsize - (p - buf), + "sa[%i] %cx ipaddr=0x%08x %08x %08x %08x\n", + i, (sap->rx ? 'r' : 't'), sap->ipaddr[0], + sap->ipaddr[1], sap->ipaddr[2], sap->ipaddr[3]); + p += scnprintf(p, bufsize - (p - buf), + "sa[%i] spi=0x%08x proto=0x%x salt=0x%08x crypt=%d\n", + i, be32_to_cpu(sap->xs->id.spi), + sap->xs->id.proto, sap->salt, sap->crypt); + p += scnprintf(p, bufsize - (p - buf), + "sa[%i] key=0x%08x %08x %08x %08x\n", + i, sap->key[0], sap->key[1], + sap->key[2], sap->key[3]); } len = simple_read_from_buffer(buffer, count, ppos, buf, p - buf); From 173756b86803655d70af7732079b3aa935e6ab68 Mon Sep 17 00:00:00 2001 From: Taehee Yoo Date: Fri, 13 Mar 2020 06:50:14 +0000 Subject: [PATCH 049/138] hsr: use rcu_read_lock() in hsr_get_node_{list/status}() hsr_get_node_{list/status}() are not under rtnl_lock() because they are callback functions of generic netlink. But they use __dev_get_by_index() without rtnl_lock(). So, it would use unsafe data. In order to fix it, rcu_read_lock() and dev_get_by_index_rcu() are used instead of __dev_get_by_index(). Fixes: f421436a591d ("net/hsr: Add support for the High-availability Seamless Redundancy protocol (HSRv0)") Signed-off-by: Taehee Yoo Signed-off-by: David S. Miller --- net/hsr/hsr_framereg.c | 9 ++------- net/hsr/hsr_netlink.c | 39 +++++++++++++++++++++------------------ 2 files changed, 23 insertions(+), 25 deletions(-) diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c index 3ba7f61be107..a64bb64935a6 100644 --- a/net/hsr/hsr_framereg.c +++ b/net/hsr/hsr_framereg.c @@ -482,12 +482,9 @@ int hsr_get_node_data(struct hsr_priv *hsr, struct hsr_port *port; unsigned long tdiff; - rcu_read_lock(); node = find_node_by_addr_A(&hsr->node_db, addr); - if (!node) { - rcu_read_unlock(); - return -ENOENT; /* No such entry */ - } + if (!node) + return -ENOENT; ether_addr_copy(addr_b, node->macaddress_B); @@ -522,7 +519,5 @@ int hsr_get_node_data(struct hsr_priv *hsr, *addr_b_ifindex = -1; } - rcu_read_unlock(); - return 0; } diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c index 8dc0547f01d0..d6760df2ad1f 100644 --- a/net/hsr/hsr_netlink.c +++ b/net/hsr/hsr_netlink.c @@ -251,15 +251,16 @@ static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info) if (!na) goto invalid; - hsr_dev = __dev_get_by_index(genl_info_net(info), - nla_get_u32(info->attrs[HSR_A_IFINDEX])); + rcu_read_lock(); + hsr_dev = dev_get_by_index_rcu(genl_info_net(info), + nla_get_u32(info->attrs[HSR_A_IFINDEX])); if (!hsr_dev) - goto invalid; + goto rcu_unlock; if (!is_hsr_master(hsr_dev)) - goto invalid; + goto rcu_unlock; /* Send reply */ - skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); + skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); if (!skb_out) { res = -ENOMEM; goto fail; @@ -313,12 +314,10 @@ static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info) res = nla_put_u16(skb_out, HSR_A_IF1_SEQ, hsr_node_if1_seq); if (res < 0) goto nla_put_failure; - rcu_read_lock(); port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A); if (port) res = nla_put_u32(skb_out, HSR_A_IF1_IFINDEX, port->dev->ifindex); - rcu_read_unlock(); if (res < 0) goto nla_put_failure; @@ -328,20 +327,22 @@ static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info) res = nla_put_u16(skb_out, HSR_A_IF2_SEQ, hsr_node_if2_seq); if (res < 0) goto nla_put_failure; - rcu_read_lock(); port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B); if (port) res = nla_put_u32(skb_out, HSR_A_IF2_IFINDEX, port->dev->ifindex); - rcu_read_unlock(); if (res < 0) goto nla_put_failure; + rcu_read_unlock(); + genlmsg_end(skb_out, msg_head); genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid); return 0; +rcu_unlock: + rcu_read_unlock(); invalid: netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL); return 0; @@ -351,6 +352,7 @@ nla_put_failure: /* Fall through */ fail: + rcu_read_unlock(); return res; } @@ -377,15 +379,16 @@ static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info) if (!na) goto invalid; - hsr_dev = __dev_get_by_index(genl_info_net(info), - nla_get_u32(info->attrs[HSR_A_IFINDEX])); + rcu_read_lock(); + hsr_dev = dev_get_by_index_rcu(genl_info_net(info), + nla_get_u32(info->attrs[HSR_A_IFINDEX])); if (!hsr_dev) - goto invalid; + goto rcu_unlock; if (!is_hsr_master(hsr_dev)) - goto invalid; + goto rcu_unlock; /* Send reply */ - skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); + skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); if (!skb_out) { res = -ENOMEM; goto fail; @@ -405,14 +408,11 @@ static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info) hsr = netdev_priv(hsr_dev); - rcu_read_lock(); pos = hsr_get_next_node(hsr, NULL, addr); while (pos) { res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN, addr); - if (res < 0) { - rcu_read_unlock(); + if (res < 0) goto nla_put_failure; - } pos = hsr_get_next_node(hsr, pos, addr); } rcu_read_unlock(); @@ -422,6 +422,8 @@ static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info) return 0; +rcu_unlock: + rcu_read_unlock(); invalid: netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL); return 0; @@ -431,6 +433,7 @@ nla_put_failure: /* Fall through */ fail: + rcu_read_unlock(); return res; } From ca19c70f5225771c05bcdcb832b4eb84d7271c5e Mon Sep 17 00:00:00 2001 From: Taehee Yoo Date: Fri, 13 Mar 2020 06:50:24 +0000 Subject: [PATCH 050/138] hsr: add restart routine into hsr_get_node_list() The hsr_get_node_list() is to send node addresses to the userspace. If there are so many nodes, it could fail because of buffer size. In order to avoid this failure, the restart routine is added. Fixes: f421436a591d ("net/hsr: Add support for the High-availability Seamless Redundancy protocol (HSRv0)") Signed-off-by: Taehee Yoo Signed-off-by: David S. Miller --- net/hsr/hsr_netlink.c | 42 ++++++++++++++++++++++++++---------------- 1 file changed, 26 insertions(+), 16 deletions(-) diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c index d6760df2ad1f..726bfe923999 100644 --- a/net/hsr/hsr_netlink.c +++ b/net/hsr/hsr_netlink.c @@ -360,16 +360,14 @@ fail: */ static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info) { - /* For receiving */ - struct nlattr *na; - struct net_device *hsr_dev; - - /* For sending */ - struct sk_buff *skb_out; - void *msg_head; - struct hsr_priv *hsr; - void *pos; unsigned char addr[ETH_ALEN]; + struct net_device *hsr_dev; + struct sk_buff *skb_out; + struct hsr_priv *hsr; + bool restart = false; + struct nlattr *na; + void *pos = NULL; + void *msg_head; int res; if (!info) @@ -387,8 +385,9 @@ static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info) if (!is_hsr_master(hsr_dev)) goto rcu_unlock; +restart: /* Send reply */ - skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); + skb_out = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (!skb_out) { res = -ENOMEM; goto fail; @@ -402,17 +401,28 @@ static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info) goto nla_put_failure; } - res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex); - if (res < 0) - goto nla_put_failure; + if (!restart) { + res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex); + if (res < 0) + goto nla_put_failure; + } hsr = netdev_priv(hsr_dev); - pos = hsr_get_next_node(hsr, NULL, addr); + if (!pos) + pos = hsr_get_next_node(hsr, NULL, addr); while (pos) { res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN, addr); - if (res < 0) + if (res < 0) { + if (res == -EMSGSIZE) { + genlmsg_end(skb_out, msg_head); + genlmsg_unicast(genl_info_net(info), skb_out, + info->snd_portid); + restart = true; + goto restart; + } goto nla_put_failure; + } pos = hsr_get_next_node(hsr, pos, addr); } rcu_read_unlock(); @@ -429,7 +439,7 @@ invalid: return 0; nla_put_failure: - kfree_skb(skb_out); + nlmsg_free(skb_out); /* Fall through */ fail: From 09e91dbea0aa32be02d8877bd50490813de56b9a Mon Sep 17 00:00:00 2001 From: Taehee Yoo Date: Fri, 13 Mar 2020 06:50:33 +0000 Subject: [PATCH 051/138] hsr: set .netnsok flag The hsr module has been supporting the list and status command. (HSR_C_GET_NODE_LIST and HSR_C_GET_NODE_STATUS) These commands send node information to the user-space via generic netlink. But, in the non-init_net namespace, these commands are not allowed because .netnsok flag is false. So, there is no way to get node information in the non-init_net namespace. Fixes: f421436a591d ("net/hsr: Add support for the High-availability Seamless Redundancy protocol (HSRv0)") Signed-off-by: Taehee Yoo Signed-off-by: David S. Miller --- net/hsr/hsr_netlink.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c index 726bfe923999..fae21c863b1f 100644 --- a/net/hsr/hsr_netlink.c +++ b/net/hsr/hsr_netlink.c @@ -470,6 +470,7 @@ static struct genl_family hsr_genl_family __ro_after_init = { .version = 1, .maxattr = HSR_A_MAX, .policy = hsr_genl_policy, + .netnsok = true, .module = THIS_MODULE, .ops = hsr_ops, .n_ops = ARRAY_SIZE(hsr_ops), From ef299cc3fa1a9e1288665a9fdc8bff55629fd359 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Fri, 13 Mar 2020 22:29:54 -0700 Subject: [PATCH 052/138] net_sched: cls_route: remove the right filter from hashtable route4_change() allocates a new filter and copies values from the old one. After the new filter is inserted into the hash table, the old filter should be removed and freed, as the final step of the update. However, the current code mistakenly removes the new one. This looks apparently wrong to me, and it causes double "free" and use-after-free too, as reported by syzbot. Reported-and-tested-by: syzbot+f9b32aaacd60305d9687@syzkaller.appspotmail.com Reported-and-tested-by: syzbot+2f8c233f131943d6056d@syzkaller.appspotmail.com Reported-and-tested-by: syzbot+9c2df9fd5e9445b74e01@syzkaller.appspotmail.com Fixes: 1109c00547fc ("net: sched: RCU cls_route") Cc: Jamal Hadi Salim Cc: Jiri Pirko Cc: John Fastabend Signed-off-by: Cong Wang Signed-off-by: David S. Miller --- net/sched/cls_route.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c index 6f8786b06bde..5efa3e7ace15 100644 --- a/net/sched/cls_route.c +++ b/net/sched/cls_route.c @@ -534,8 +534,8 @@ static int route4_change(struct net *net, struct sk_buff *in_skb, fp = &b->ht[h]; for (pfp = rtnl_dereference(*fp); pfp; fp = &pfp->next, pfp = rtnl_dereference(*fp)) { - if (pfp == f) { - *fp = f->next; + if (pfp == fold) { + rcu_assign_pointer(*fp, fold->next); break; } } From fe2a31d790f81bd14a76de3d3b87f4f1362f60cd Mon Sep 17 00:00:00 2001 From: Michal Kubecek Date: Sun, 15 Mar 2020 18:17:43 +0100 Subject: [PATCH 053/138] netlink: allow extack cookie also for error messages Commit ba0dc5f6e0ba ("netlink: allow sending extended ACK with cookie on success") introduced a cookie which can be sent to userspace as part of extended ack message in the form of NLMSGERR_ATTR_COOKIE attribute. Currently the cookie is ignored if error code is non-zero but there is no technical reason for such limitation and it can be useful to provide machine parseable information as part of an error message. Include NLMSGERR_ATTR_COOKIE whenever the cookie has been set, regardless of error code. Signed-off-by: Michal Kubecek Signed-off-by: David S. Miller --- net/netlink/af_netlink.c | 43 ++++++++++++++++------------------------ 1 file changed, 17 insertions(+), 26 deletions(-) diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 5313f1cec170..2f234791b879 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -2392,19 +2392,14 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err, if (nlk_has_extack && extack && extack->_msg) tlvlen += nla_total_size(strlen(extack->_msg) + 1); - if (err) { - if (!(nlk->flags & NETLINK_F_CAP_ACK)) - payload += nlmsg_len(nlh); - else - flags |= NLM_F_CAPPED; - if (nlk_has_extack && extack && extack->bad_attr) - tlvlen += nla_total_size(sizeof(u32)); - } else { + if (err && !(nlk->flags & NETLINK_F_CAP_ACK)) + payload += nlmsg_len(nlh); + else flags |= NLM_F_CAPPED; - - if (nlk_has_extack && extack && extack->cookie_len) - tlvlen += nla_total_size(extack->cookie_len); - } + if (err && nlk_has_extack && extack && extack->bad_attr) + tlvlen += nla_total_size(sizeof(u32)); + if (nlk_has_extack && extack && extack->cookie_len) + tlvlen += nla_total_size(extack->cookie_len); if (tlvlen) flags |= NLM_F_ACK_TLVS; @@ -2427,20 +2422,16 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err, WARN_ON(nla_put_string(skb, NLMSGERR_ATTR_MSG, extack->_msg)); } - if (err) { - if (extack->bad_attr && - !WARN_ON((u8 *)extack->bad_attr < in_skb->data || - (u8 *)extack->bad_attr >= in_skb->data + - in_skb->len)) - WARN_ON(nla_put_u32(skb, NLMSGERR_ATTR_OFFS, - (u8 *)extack->bad_attr - - (u8 *)nlh)); - } else { - if (extack->cookie_len) - WARN_ON(nla_put(skb, NLMSGERR_ATTR_COOKIE, - extack->cookie_len, - extack->cookie)); - } + if (err && extack->bad_attr && + !WARN_ON((u8 *)extack->bad_attr < in_skb->data || + (u8 *)extack->bad_attr >= in_skb->data + + in_skb->len)) + WARN_ON(nla_put_u32(skb, NLMSGERR_ATTR_OFFS, + (u8 *)extack->bad_attr - + (u8 *)nlh)); + if (extack->cookie_len) + WARN_ON(nla_put(skb, NLMSGERR_ATTR_COOKIE, + extack->cookie_len, extack->cookie)); } nlmsg_end(skb, rep); From f1388ec4a144f40348321a0915c5535d623e165c Mon Sep 17 00:00:00 2001 From: Michal Kubecek Date: Sun, 15 Mar 2020 18:17:48 +0100 Subject: [PATCH 054/138] netlink: add nl_set_extack_cookie_u32() Similar to existing nl_set_extack_cookie_u64(), add new helper nl_set_extack_cookie_u32() which sets extack cookie to a u32 value. Signed-off-by: Michal Kubecek Signed-off-by: David S. Miller --- include/linux/netlink.h | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 205fa7b1f07a..4090524c3462 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -119,6 +119,15 @@ static inline void nl_set_extack_cookie_u64(struct netlink_ext_ack *extack, extack->cookie_len = sizeof(__cookie); } +static inline void nl_set_extack_cookie_u32(struct netlink_ext_ack *extack, + u32 cookie) +{ + u32 __cookie = cookie; + + memcpy(extack->cookie, &__cookie, sizeof(__cookie)); + extack->cookie_len = sizeof(__cookie); +} + void netlink_kernel_release(struct sock *sk); int __netlink_change_ngroups(struct sock *sk, unsigned int groups); int netlink_change_ngroups(struct sock *sk, unsigned int groups); From 2363d73a2f3e92787f336721c40918ba2eb0c74c Mon Sep 17 00:00:00 2001 From: Michal Kubecek Date: Sun, 15 Mar 2020 18:17:53 +0100 Subject: [PATCH 055/138] ethtool: reject unrecognized request flags As pointed out by Jakub Kicinski, we ethtool netlink code should respond with an error if request head has flags set which are not recognized by kernel, either as a mistake or because it expects functionality introduced in later kernel versions. To avoid unnecessary roundtrips, use extack cookie to provide the information about supported request flags. Signed-off-by: Michal Kubecek Signed-off-by: David S. Miller --- net/ethtool/netlink.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c index 180c194fab07..fc9e0b806889 100644 --- a/net/ethtool/netlink.c +++ b/net/ethtool/netlink.c @@ -40,6 +40,7 @@ int ethnl_parse_header(struct ethnl_req_info *req_info, struct nlattr *tb[ETHTOOL_A_HEADER_MAX + 1]; const struct nlattr *devname_attr; struct net_device *dev = NULL; + u32 flags = 0; int ret; if (!header) { @@ -50,8 +51,17 @@ int ethnl_parse_header(struct ethnl_req_info *req_info, ethnl_header_policy, extack); if (ret < 0) return ret; - devname_attr = tb[ETHTOOL_A_HEADER_DEV_NAME]; + if (tb[ETHTOOL_A_HEADER_FLAGS]) { + flags = nla_get_u32(tb[ETHTOOL_A_HEADER_FLAGS]); + if (flags & ~ETHTOOL_FLAG_ALL) { + NL_SET_ERR_MSG_ATTR(extack, tb[ETHTOOL_A_HEADER_FLAGS], + "unrecognized request flags"); + nl_set_extack_cookie_u32(extack, ETHTOOL_FLAG_ALL); + return -EOPNOTSUPP; + } + } + devname_attr = tb[ETHTOOL_A_HEADER_DEV_NAME]; if (tb[ETHTOOL_A_HEADER_DEV_INDEX]) { u32 ifindex = nla_get_u32(tb[ETHTOOL_A_HEADER_DEV_INDEX]); @@ -90,9 +100,7 @@ int ethnl_parse_header(struct ethnl_req_info *req_info, } req_info->dev = dev; - if (tb[ETHTOOL_A_HEADER_FLAGS]) - req_info->flags = nla_get_u32(tb[ETHTOOL_A_HEADER_FLAGS]); - + req_info->flags = flags; return 0; } From 0fe1568061be372a908b408b93c2f64d1d95b6f6 Mon Sep 17 00:00:00 2001 From: Madalin Bucur Date: Mon, 16 Mar 2020 14:05:56 +0200 Subject: [PATCH 056/138] net: fsl/fman: treat all RGMII modes in memac_adjust_link() Treat all internal delay variants the same as RGMII. Signed-off-by: Madalin Bucur Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fman/fman_memac.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c index e1901874c19f..0d2b4ab01f24 100644 --- a/drivers/net/ethernet/freescale/fman/fman_memac.c +++ b/drivers/net/ethernet/freescale/fman/fman_memac.c @@ -782,7 +782,7 @@ int memac_adjust_link(struct fman_mac *memac, u16 speed) /* Set full duplex */ tmp &= ~IF_MODE_HD; - if (memac->phy_if == PHY_INTERFACE_MODE_RGMII) { + if (phy_interface_mode_is_rgmii(memac->phy_if)) { /* Configure RGMII in manual mode */ tmp &= ~IF_MODE_RGMII_AUTO; tmp &= ~IF_MODE_RGMII_SP_MASK; From 4022d808c45277693ea86478fab1f081ebf997e8 Mon Sep 17 00:00:00 2001 From: Madalin Bucur Date: Mon, 16 Mar 2020 14:05:57 +0200 Subject: [PATCH 057/138] arm64: dts: ls1043a-rdb: correct RGMII delay mode to rgmii-id The correct setting for the RGMII ports on LS1043ARDB is to enable delay on both Rx and Tx so the interface mode used must be PHY_INTERFACE_MODE_RGMII_ID. Since commit 1b3047b5208a80 ("net: phy: realtek: add support for configuring the RX delay on RTL8211F") the Realtek 8211F PHY driver has control over the RGMII RX delay and it is disabling it for RGMII_TXID. The LS1043ARDB uses two such PHYs in RGMII_ID mode but in the device tree the mode was described as "rgmii_txid". This issue was not apparent at the time as the PHY driver took the same action for RGMII_TXID and RGMII_ID back then but it became visible (RX no longer working) after the above patch. Changing the phy-connection-type to "rgmii-id" to address the issue. Fixes: bf02f2ffe59c ("arm64: dts: add LS1043A DPAA FMan support") Signed-off-by: Madalin Bucur Signed-off-by: David S. Miller --- arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts index 4223a2352d45..dde50c88f5e3 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts +++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts @@ -119,12 +119,12 @@ ethernet@e4000 { phy-handle = <&rgmii_phy1>; - phy-connection-type = "rgmii-txid"; + phy-connection-type = "rgmii-id"; }; ethernet@e6000 { phy-handle = <&rgmii_phy2>; - phy-connection-type = "rgmii-txid"; + phy-connection-type = "rgmii-id"; }; ethernet@e8000 { From d79e9d7c1e4ba5f95f2ff3541880c40ea9722212 Mon Sep 17 00:00:00 2001 From: Madalin Bucur Date: Mon, 16 Mar 2020 14:05:58 +0200 Subject: [PATCH 058/138] arm64: dts: ls1046ardb: set RGMII interfaces to RGMII_ID mode The correct setting for the RGMII ports on LS1046ARDB is to enable delay on both Rx and Tx so the interface mode used must be PHY_INTERFACE_MODE_RGMII_ID. Since commit 1b3047b5208a80 ("net: phy: realtek: add support for configuring the RX delay on RTL8211F") the Realtek 8211F PHY driver has control over the RGMII RX delay and it is disabling it for RGMII_TXID. The LS1046ARDB uses two such PHYs in RGMII_ID mode but in the device tree the mode was described as "rgmii". Changing the phy-connection-type to "rgmii-id" to address the issue. Fixes: 3fa395d2c48a ("arm64: dts: add LS1046A DPAA FMan nodes") Signed-off-by: Madalin Bucur Signed-off-by: David S. Miller --- arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts index dbc23d6cd3b4..d53ccc56bb63 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts +++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts @@ -131,12 +131,12 @@ &fman0 { ethernet@e4000 { phy-handle = <&rgmii_phy1>; - phy-connection-type = "rgmii"; + phy-connection-type = "rgmii-id"; }; ethernet@e6000 { phy-handle = <&rgmii_phy2>; - phy-connection-type = "rgmii"; + phy-connection-type = "rgmii-id"; }; ethernet@e8000 { From b317538c47943f9903860d83cc0060409e12d2ff Mon Sep 17 00:00:00 2001 From: Zheng Wei Date: Mon, 16 Mar 2020 22:23:47 +0800 Subject: [PATCH 059/138] net: vxge: fix wrong __VA_ARGS__ usage printk in macro vxge_debug_ll uses __VA_ARGS__ without "##" prefix, it causes a build error when there is no variable arguments(e.g. only fmt is specified.). Signed-off-by: Zheng Wei Signed-off-by: David S. Miller --- drivers/net/ethernet/neterion/vxge/vxge-config.h | 2 +- drivers/net/ethernet/neterion/vxge/vxge-main.h | 14 +++++++------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h index e678ba379598..628fa9b2f741 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.h +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h @@ -2045,7 +2045,7 @@ vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask); if ((level >= VXGE_ERR && VXGE_COMPONENT_LL & VXGE_DEBUG_ERR_MASK) || \ (level >= VXGE_TRACE && VXGE_COMPONENT_LL & VXGE_DEBUG_TRACE_MASK))\ if ((mask & VXGE_DEBUG_MASK) == mask) \ - printk(fmt "\n", __VA_ARGS__); \ + printk(fmt "\n", ##__VA_ARGS__); \ } while (0) #else #define vxge_debug_ll(level, mask, fmt, ...) diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.h b/drivers/net/ethernet/neterion/vxge/vxge-main.h index 59a57ff5e96a..9c86f4f9cd42 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.h +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.h @@ -452,49 +452,49 @@ int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override); #if (VXGE_DEBUG_LL_CONFIG & VXGE_DEBUG_MASK) #define vxge_debug_ll_config(level, fmt, ...) \ - vxge_debug_ll(level, VXGE_DEBUG_LL_CONFIG, fmt, __VA_ARGS__) + vxge_debug_ll(level, VXGE_DEBUG_LL_CONFIG, fmt, ##__VA_ARGS__) #else #define vxge_debug_ll_config(level, fmt, ...) #endif #if (VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) #define vxge_debug_init(level, fmt, ...) \ - vxge_debug_ll(level, VXGE_DEBUG_INIT, fmt, __VA_ARGS__) + vxge_debug_ll(level, VXGE_DEBUG_INIT, fmt, ##__VA_ARGS__) #else #define vxge_debug_init(level, fmt, ...) #endif #if (VXGE_DEBUG_TX & VXGE_DEBUG_MASK) #define vxge_debug_tx(level, fmt, ...) \ - vxge_debug_ll(level, VXGE_DEBUG_TX, fmt, __VA_ARGS__) + vxge_debug_ll(level, VXGE_DEBUG_TX, fmt, ##__VA_ARGS__) #else #define vxge_debug_tx(level, fmt, ...) #endif #if (VXGE_DEBUG_RX & VXGE_DEBUG_MASK) #define vxge_debug_rx(level, fmt, ...) \ - vxge_debug_ll(level, VXGE_DEBUG_RX, fmt, __VA_ARGS__) + vxge_debug_ll(level, VXGE_DEBUG_RX, fmt, ##__VA_ARGS__) #else #define vxge_debug_rx(level, fmt, ...) #endif #if (VXGE_DEBUG_MEM & VXGE_DEBUG_MASK) #define vxge_debug_mem(level, fmt, ...) \ - vxge_debug_ll(level, VXGE_DEBUG_MEM, fmt, __VA_ARGS__) + vxge_debug_ll(level, VXGE_DEBUG_MEM, fmt, ##__VA_ARGS__) #else #define vxge_debug_mem(level, fmt, ...) #endif #if (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK) #define vxge_debug_entryexit(level, fmt, ...) \ - vxge_debug_ll(level, VXGE_DEBUG_ENTRYEXIT, fmt, __VA_ARGS__) + vxge_debug_ll(level, VXGE_DEBUG_ENTRYEXIT, fmt, ##__VA_ARGS__) #else #define vxge_debug_entryexit(level, fmt, ...) #endif #if (VXGE_DEBUG_INTR & VXGE_DEBUG_MASK) #define vxge_debug_intr(level, fmt, ...) \ - vxge_debug_ll(level, VXGE_DEBUG_INTR, fmt, __VA_ARGS__) + vxge_debug_ll(level, VXGE_DEBUG_INTR, fmt, ##__VA_ARGS__) #else #define vxge_debug_intr(level, fmt, ...) #endif From 065fd83e1be2e1ba0d446a257fd86a3cc7bddb51 Mon Sep 17 00:00:00 2001 From: Jisheng Zhang Date: Mon, 16 Mar 2020 22:56:36 +0800 Subject: [PATCH 060/138] net: mvneta: Fix the case where the last poll did not process all rx For the case where the last mvneta_poll did not process all RX packets, we need to xor the pp->cause_rx_tx or port->cause_rx_tx before claculating the rx_queue. Fixes: 2dcf75e2793c ("net: mvneta: Associate RX queues with each CPU") Signed-off-by: Jisheng Zhang Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvneta.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 98017e7d5dd0..11babc79dc6c 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -3036,11 +3036,10 @@ static int mvneta_poll(struct napi_struct *napi, int budget) /* For the case where the last mvneta_poll did not process all * RX packets */ - rx_queue = fls(((cause_rx_tx >> 8) & 0xff)); - cause_rx_tx |= pp->neta_armada3700 ? pp->cause_rx_tx : port->cause_rx_tx; + rx_queue = fls(((cause_rx_tx >> 8) & 0xff)); if (rx_queue) { rx_queue = rx_queue - 1; if (pp->bm_priv) From 32ca98feab8c9076c89c0697c5a85e46fece809d Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Mon, 16 Mar 2020 19:53:00 +0200 Subject: [PATCH 061/138] net: ip_gre: Accept IFLA_INFO_DATA-less configuration The fix referenced below causes a crash when an ERSPAN tunnel is created without passing IFLA_INFO_DATA. Fix by validating passed-in data in the same way as ipgre does. Fixes: e1f8f78ffe98 ("net: ip_gre: Separate ERSPAN newlink / changelink callbacks") Reported-by: syzbot+1b4ebf4dae4e510dd219@syzkaller.appspotmail.com Signed-off-by: Petr Machata Signed-off-by: David S. Miller --- net/ipv4/ip_gre.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 7765c65fc7d2..029b24eeafba 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -1168,6 +1168,8 @@ static int erspan_netlink_parms(struct net_device *dev, err = ipgre_netlink_parms(dev, data, tb, parms, fwmark); if (err) return err; + if (!data) + return 0; if (data[IFLA_GRE_ERSPAN_VER]) { t->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]); From 028fd76b9b1cc9e195ce60790791fd3f37b9b5d7 Mon Sep 17 00:00:00 2001 From: Chris Packham Date: Mon, 16 Mar 2020 20:49:06 +1300 Subject: [PATCH 062/138] Revert "net: mvmdio: avoid error message for optional IRQ" This reverts commit e1f550dc44a4d535da4e25ada1b0eaf8f3417929. platform_get_irq_optional() will still return -ENXIO when no interrupt is provided so the additional error handling caused the driver prone to fail when no interrupt was specified. Revert the change so we can apply the correct minimal fix. Signed-off-by: Chris Packham Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvmdio.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c index d2e2dc538428..0b9e851f3da4 100644 --- a/drivers/net/ethernet/marvell/mvmdio.c +++ b/drivers/net/ethernet/marvell/mvmdio.c @@ -347,7 +347,7 @@ static int orion_mdio_probe(struct platform_device *pdev) } - dev->err_interrupt = platform_get_irq_optional(pdev, 0); + dev->err_interrupt = platform_get_irq(pdev, 0); if (dev->err_interrupt > 0 && resource_size(r) < MVMDIO_ERR_INT_MASK + 4) { dev_err(&pdev->dev, @@ -364,8 +364,8 @@ static int orion_mdio_probe(struct platform_device *pdev) writel(MVMDIO_ERR_INT_SMI_DONE, dev->regs + MVMDIO_ERR_INT_MASK); - } else if (dev->err_interrupt < 0) { - ret = dev->err_interrupt; + } else if (dev->err_interrupt == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; goto out_mdio; } From fa2632f74e57bbc869c8ad37751a11b6147a3acc Mon Sep 17 00:00:00 2001 From: Chris Packham Date: Mon, 16 Mar 2020 20:49:07 +1300 Subject: [PATCH 063/138] net: mvmdio: avoid error message for optional IRQ Per the dt-binding the interrupt is optional so use platform_get_irq_optional() instead of platform_get_irq(). Since commit 7723f4c5ecdb ("driver core: platform: Add an error message to platform_get_irq*()") platform_get_irq() produces an error message orion-mdio f1072004.mdio: IRQ index 0 not found which is perfectly normal if one hasn't specified the optional property in the device tree. Signed-off-by: Chris Packham Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvmdio.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c index 0b9e851f3da4..d14762d93640 100644 --- a/drivers/net/ethernet/marvell/mvmdio.c +++ b/drivers/net/ethernet/marvell/mvmdio.c @@ -347,7 +347,7 @@ static int orion_mdio_probe(struct platform_device *pdev) } - dev->err_interrupt = platform_get_irq(pdev, 0); + dev->err_interrupt = platform_get_irq_optional(pdev, 0); if (dev->err_interrupt > 0 && resource_size(r) < MVMDIO_ERR_INT_MASK + 4) { dev_err(&pdev->dev, From 612eb1c3b9e504de24136c947ed7c07bc342f3aa Mon Sep 17 00:00:00 2001 From: Doug Berger Date: Mon, 16 Mar 2020 14:44:55 -0700 Subject: [PATCH 064/138] Revert "net: bcmgenet: use RGMII loopback for MAC reset" This reverts commit 3a55402c93877d291b0a612d25edb03d1b4b93ac. This is not a good solution when connecting to an external switch that may not support the isolation of the TXC signal resulting in output driver contention on the pin. A different solution is necessary. Signed-off-by: Doug Berger Acked-by: Florian Fainelli Signed-off-by: David S. Miller --- .../net/ethernet/broadcom/genet/bcmgenet.c | 2 ++ drivers/net/ethernet/broadcom/genet/bcmmii.c | 34 ------------------- 2 files changed, 2 insertions(+), 34 deletions(-) diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index e50a15397e11..c8ac2d83208f 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -1989,6 +1989,8 @@ static void reset_umac(struct bcmgenet_priv *priv) /* issue soft reset with (rg)mii loopback to ensure a stable rxclk */ bcmgenet_umac_writel(priv, CMD_SW_RESET | CMD_LCL_LOOP_EN, UMAC_CMD); + udelay(2); + bcmgenet_umac_writel(priv, 0, UMAC_CMD); } static void bcmgenet_intr_disable(struct bcmgenet_priv *priv) diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 10244941a7a6..69e80fb6e039 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -181,38 +181,8 @@ int bcmgenet_mii_config(struct net_device *dev, bool init) const char *phy_name = NULL; u32 id_mode_dis = 0; u32 port_ctrl; - int bmcr = -1; - int ret; u32 reg; - /* MAC clocking workaround during reset of umac state machines */ - reg = bcmgenet_umac_readl(priv, UMAC_CMD); - if (reg & CMD_SW_RESET) { - /* An MII PHY must be isolated to prevent TXC contention */ - if (priv->phy_interface == PHY_INTERFACE_MODE_MII) { - ret = phy_read(phydev, MII_BMCR); - if (ret >= 0) { - bmcr = ret; - ret = phy_write(phydev, MII_BMCR, - bmcr | BMCR_ISOLATE); - } - if (ret) { - netdev_err(dev, "failed to isolate PHY\n"); - return ret; - } - } - /* Switch MAC clocking to RGMII generated clock */ - bcmgenet_sys_writel(priv, PORT_MODE_EXT_GPHY, SYS_PORT_CTRL); - /* Ensure 5 clks with Rx disabled - * followed by 5 clks with Reset asserted - */ - udelay(4); - reg &= ~(CMD_SW_RESET | CMD_LCL_LOOP_EN); - bcmgenet_umac_writel(priv, reg, UMAC_CMD); - /* Ensure 5 more clocks before Rx is enabled */ - udelay(2); - } - switch (priv->phy_interface) { case PHY_INTERFACE_MODE_INTERNAL: phy_name = "internal PHY"; @@ -282,10 +252,6 @@ int bcmgenet_mii_config(struct net_device *dev, bool init) bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL); - /* Restore the MII PHY after isolation */ - if (bmcr >= 0) - phy_write(phydev, MII_BMCR, bmcr); - priv->ext_phy = !priv->internal_phy && (priv->phy_interface != PHY_INTERFACE_MODE_MOCA); From 88f6c8bf1aaed5039923fb4c701cab4d42176275 Mon Sep 17 00:00:00 2001 From: Doug Berger Date: Mon, 16 Mar 2020 14:44:56 -0700 Subject: [PATCH 065/138] net: bcmgenet: keep MAC in reset until PHY is up As noted in commit 28c2d1a7a0bf ("net: bcmgenet: enable loopback during UniMAC sw_reset") the UniMAC must be clocked at least 5 cycles while the sw_reset is asserted to ensure a clean reset. That commit enabled local loopback to provide an Rx clock from the GENET sourced Tx clk. However, when connected in MII mode the Tx clk is sourced by the PHY so if an EPHY is not supplying clocks (e.g. when the link is down) the UniMAC does not receive the necessary clocks. This commit extends the sw_reset window until the PHY reports that the link is up thereby ensuring that the clocks are being provided to the MAC to produce a clean reset. One consequence is that if the system attempts to enter a Wake on LAN suspend state when the PHY link has not been active the MAC may not have had a chance to initialize cleanly. In this case, we remove the sw_reset and enable the WoL reception path as normal with the hope that the PHY will provide the necessary clocks to drive the WoL blocks if the link becomes active after the system has entered suspend. Fixes: 1c1008c793fa ("net: bcmgenet: add main driver file") Signed-off-by: Doug Berger Acked-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/genet/bcmgenet.c | 10 ++++------ drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c | 6 +++++- drivers/net/ethernet/broadcom/genet/bcmmii.c | 6 ++++++ 3 files changed, 15 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index c8ac2d83208f..ce64b4e47feb 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -1965,6 +1965,8 @@ static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable) u32 reg; reg = bcmgenet_umac_readl(priv, UMAC_CMD); + if (reg & CMD_SW_RESET) + return; if (enable) reg |= mask; else @@ -1984,13 +1986,9 @@ static void reset_umac(struct bcmgenet_priv *priv) bcmgenet_rbuf_ctrl_set(priv, 0); udelay(10); - /* disable MAC while updating its registers */ - bcmgenet_umac_writel(priv, 0, UMAC_CMD); - - /* issue soft reset with (rg)mii loopback to ensure a stable rxclk */ - bcmgenet_umac_writel(priv, CMD_SW_RESET | CMD_LCL_LOOP_EN, UMAC_CMD); + /* issue soft reset and disable MAC while updating its registers */ + bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD); udelay(2); - bcmgenet_umac_writel(priv, 0, UMAC_CMD); } static void bcmgenet_intr_disable(struct bcmgenet_priv *priv) diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c index ea20d94bd050..c9a43695b182 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c @@ -132,8 +132,12 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv, return -EINVAL; } - /* disable RX */ + /* Can't suspend with WoL if MAC is still in reset */ reg = bcmgenet_umac_readl(priv, UMAC_CMD); + if (reg & CMD_SW_RESET) + reg &= ~CMD_SW_RESET; + + /* disable RX */ reg &= ~CMD_RX_EN; bcmgenet_umac_writel(priv, reg, UMAC_CMD); mdelay(10); diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 69e80fb6e039..b5930f80039d 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -95,6 +95,12 @@ void bcmgenet_mii_setup(struct net_device *dev) CMD_HD_EN | CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE); reg |= cmd_bits; + if (reg & CMD_SW_RESET) { + reg &= ~CMD_SW_RESET; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + udelay(2); + reg |= CMD_TX_EN | CMD_RX_EN; + } bcmgenet_umac_writel(priv, reg, UMAC_CMD); } else { /* done if nothing has changed */ From 872307abbd0d9afd72171929806c2fa33dc34179 Mon Sep 17 00:00:00 2001 From: Rayagonda Kokatanur Date: Tue, 17 Mar 2020 10:24:35 +0530 Subject: [PATCH 066/138] net: phy: mdio-mux-bcm-iproc: check clk_prepare_enable() return value Check clk_prepare_enable() return value. Fixes: 2c7230446bc9 ("net: phy: Add pm support to Broadcom iProc mdio mux driver") Signed-off-by: Rayagonda Kokatanur Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/mdio-mux-bcm-iproc.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/net/phy/mdio-mux-bcm-iproc.c b/drivers/net/phy/mdio-mux-bcm-iproc.c index 88d409e48c1f..aad6809ebe39 100644 --- a/drivers/net/phy/mdio-mux-bcm-iproc.c +++ b/drivers/net/phy/mdio-mux-bcm-iproc.c @@ -288,8 +288,13 @@ static int mdio_mux_iproc_suspend(struct device *dev) static int mdio_mux_iproc_resume(struct device *dev) { struct iproc_mdiomux_desc *md = dev_get_drvdata(dev); + int rc; - clk_prepare_enable(md->core_clk); + rc = clk_prepare_enable(md->core_clk); + if (rc) { + dev_err(md->dev, "failed to enable core clk\n"); + return rc; + } mdio_mux_iproc_config(md); return 0; From ce1f352162828ba07470328828a32f47aa759020 Mon Sep 17 00:00:00 2001 From: Arthur Kiyanovski Date: Tue, 17 Mar 2020 09:06:39 +0200 Subject: [PATCH 067/138] net: ena: fix incorrect setting of the number of msix vectors Overview: We don't frequently change the msix vectors throughout the life cycle of the driver. We do so in two functions: ena_probe() and ena_restore(). ena_probe() is only called when the driver is loaded. ena_restore() on the other hand is called during device reset / resume operations. We use num_io_queues for calculating and allocating the number of msix vectors. At ena_probe() this value is equal to max_num_io_queues and thus this is not an issue, however ena_restore() might be called after the number of io queues has changed. A possible bug scenario is as follows: * Change number of queues from 8 to 4. (num_io_queues = 4, max_num_io_queues = 8, msix_vecs = 9,) * Trigger reset occurs -> ena_restore is called. (num_io_queues = 4, max_num_io_queues =8 , msix_vecs = 5) * Change number of queues from 4 to 6. (num_io_queues = 6, max_num_io_queues = 8, msix_vecs = 5) * The driver will reset due to failure of check_for_rx_interrupt_queue() Fix: This can be easily fixed by always using max_num_io_queues to init the msix_vecs, since this number won't change as opposed to num_io_queues. Fixes: 4d19266022ec ("net: ena: multiple queue creation related cleanups") Signed-off-by: Sameeh Jubran Signed-off-by: Arthur Kiyanovski Signed-off-by: David S. Miller --- drivers/net/ethernet/amazon/ena/ena_netdev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 0b2fd96b93d7..39f290f4458f 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -1968,7 +1968,7 @@ static int ena_enable_msix(struct ena_adapter *adapter) } /* Reserved the max msix vectors we might need */ - msix_vecs = ENA_MAX_MSIX_VEC(adapter->num_io_queues); + msix_vecs = ENA_MAX_MSIX_VEC(adapter->max_num_io_queues); netif_dbg(adapter, probe, adapter->netdev, "trying to enable MSI-X, vectors %d\n", msix_vecs); From e02ae6ed51be3d28923bfd318ae57000f5643da5 Mon Sep 17 00:00:00 2001 From: Arthur Kiyanovski Date: Tue, 17 Mar 2020 09:06:40 +0200 Subject: [PATCH 068/138] net: ena: fix request of incorrect number of IRQ vectors Bug: In short the main issue is caused by the fact that the number of queues is changed using ethtool after ena_probe() has been called and before ena_up() was executed. Here is the full scenario in detail: * ena_probe() is called when the driver is loaded, the driver is not up yet at the end of ena_probe(). * The number of queues is changed -> io_queue_count is changed as well - ena_up() is not called since the "dev_was_up" boolean in ena_update_queue_count() is false. * ena_up() is called by the kernel (it's called asynchronously some time after ena_probe()). ena_setup_io_intr() is called by ena_up() and it uses io_queue_count to get the suitable irq lines for each msix vector. The function ena_request_io_irq() is called right after that and it uses msix_vecs - This value only changes during ena_probe() and ena_restore() - to request the irq vectors. This results in "Failed to request I/O IRQ" error for i > io_queue_count. Numeric example: * After ena_probe() io_queue_count = 8, msix_vecs = 9. * The number of queues changes to 4 -> io_queue_count = 4, msix_vecs = 9. * ena_up() is executed for the first time: ** ena_setup_io_intr() inits the vectors only up to io_queue_count. ** ena_request_io_irq() calls request_irq() and fails for i = 5. How to reproduce: simply run the following commands: sudo rmmod ena && sudo insmod ena.ko; sudo ethtool -L eth1 combined 3; Fix: Use ENA_MAX_MSIX_VEC(adapter->num_io_queues + adapter->xdp_num_queues) instead of adapter->msix_vecs. We need to take XDP queues into consideration as they need to have msix vectors assigned to them as well. Note that the XDP cannot be attached before the driver is up and running but in XDP mode the issue might occur when the number of queues changes right after a reset trigger. The ENA_MAX_MSIX_VEC simply adds one to the argument since the first msix vector is reserved for management queue. Fixes: 1738cd3ed342 ("net: ena: Add a driver for Amazon Elastic Network Adapters (ENA)") Signed-off-by: Sameeh Jubran Signed-off-by: Arthur Kiyanovski Signed-off-by: David S. Miller --- drivers/net/ethernet/amazon/ena/ena_netdev.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 39f290f4458f..836fda585391 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -2068,6 +2068,7 @@ static int ena_request_mgmnt_irq(struct ena_adapter *adapter) static int ena_request_io_irq(struct ena_adapter *adapter) { + u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues; unsigned long flags = 0; struct ena_irq *irq; int rc = 0, i, k; @@ -2078,7 +2079,7 @@ static int ena_request_io_irq(struct ena_adapter *adapter) return -EINVAL; } - for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) { + for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) { irq = &adapter->irq_tbl[i]; rc = request_irq(irq->vector, irq->handler, flags, irq->name, irq->data); @@ -2119,6 +2120,7 @@ static void ena_free_mgmnt_irq(struct ena_adapter *adapter) static void ena_free_io_irq(struct ena_adapter *adapter) { + u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues; struct ena_irq *irq; int i; @@ -2129,7 +2131,7 @@ static void ena_free_io_irq(struct ena_adapter *adapter) } #endif /* CONFIG_RFS_ACCEL */ - for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) { + for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) { irq = &adapter->irq_tbl[i]; irq_set_affinity_hint(irq->vector, NULL); free_irq(irq->vector, irq->data); @@ -2144,12 +2146,13 @@ static void ena_disable_msix(struct ena_adapter *adapter) static void ena_disable_io_intr_sync(struct ena_adapter *adapter) { + u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues; int i; if (!netif_running(adapter->netdev)) return; - for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) + for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) synchronize_irq(adapter->irq_tbl[i].vector); } From 30623e1ed116bcd1785217d0a98eec643687e091 Mon Sep 17 00:00:00 2001 From: Arthur Kiyanovski Date: Tue, 17 Mar 2020 09:06:41 +0200 Subject: [PATCH 069/138] net: ena: avoid memory access violation by validating req_id properly Rx req_id is an index in struct ena_eth_io_rx_cdesc_base. The driver should validate that the Rx req_id it received from the device is in range [0, ring_size -1]. Failure to do so could yield to potential memory access violoation. The validation was mistakenly done when refilling the Rx submission queue and not in Rx completion queue. Fixes: ad974baef2a1 ("net: ena: add support for out of order rx buffers refill") Signed-off-by: Noam Dagan Signed-off-by: Arthur Kiyanovski Signed-off-by: David S. Miller --- drivers/net/ethernet/amazon/ena/ena_netdev.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 836fda585391..51333a05c14d 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -1018,13 +1018,9 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num) struct ena_rx_buffer *rx_info; req_id = rx_ring->free_ids[next_to_use]; - rc = validate_rx_req_id(rx_ring, req_id); - if (unlikely(rc < 0)) - break; rx_info = &rx_ring->rx_buffer_info[req_id]; - rc = ena_alloc_rx_page(rx_ring, rx_info, GFP_ATOMIC | __GFP_COMP); if (unlikely(rc < 0)) { @@ -1379,9 +1375,15 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring, struct ena_rx_buffer *rx_info; u16 len, req_id, buf = 0; void *va; + int rc; len = ena_bufs[buf].len; req_id = ena_bufs[buf].req_id; + + rc = validate_rx_req_id(rx_ring, req_id); + if (unlikely(rc < 0)) + return NULL; + rx_info = &rx_ring->rx_buffer_info[req_id]; if (unlikely(!rx_info->page)) { @@ -1454,6 +1456,11 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring, buf++; len = ena_bufs[buf].len; req_id = ena_bufs[buf].req_id; + + rc = validate_rx_req_id(rx_ring, req_id); + if (unlikely(rc < 0)) + return NULL; + rx_info = &rx_ring->rx_buffer_info[req_id]; } while (1); From dfdde1345bc124816f0fd42fa91b8748051e758e Mon Sep 17 00:00:00 2001 From: Arthur Kiyanovski Date: Tue, 17 Mar 2020 09:06:42 +0200 Subject: [PATCH 070/138] net: ena: fix continuous keep-alive resets last_keep_alive_jiffies is updated in probe and when a keep-alive event is received. In case the driver times-out on a keep-alive event, it has high chances of continuously timing-out on keep-alive events. This is because when the driver recovers from the keep-alive-timeout reset the value of last_keep_alive_jiffies is very old, and if a keep-alive event is not received before the next timer expires, the value of last_keep_alive_jiffies will cause another keep-alive-timeout reset and so forth in a loop. Solution: Update last_keep_alive_jiffies whenever the device is restored after reset. Fixes: 1738cd3ed342 ("net: ena: Add a driver for Amazon Elastic Network Adapters (ENA)") Signed-off-by: Noam Dagan Signed-off-by: Arthur Kiyanovski Signed-off-by: David S. Miller --- drivers/net/ethernet/amazon/ena/ena_netdev.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 51333a05c14d..4647d7656761 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -3486,6 +3486,7 @@ static int ena_restore_device(struct ena_adapter *adapter) netif_carrier_on(adapter->netdev); mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); + adapter->last_keep_alive_jiffies = jiffies; dev_err(&pdev->dev, "Device reset completed successfully, Driver info: %s\n", version); From 6497ca07f5e91131c6c05e4564d7f98a780aa02b Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Tue, 17 Mar 2020 15:54:19 +0100 Subject: [PATCH 071/138] net: phy: sfp-bus.c: get rid of docs warnings The indentation for the returned values are weird, causing those warnings: ./drivers/net/phy/sfp-bus.c:579: WARNING: Unexpected indentation. ./drivers/net/phy/sfp-bus.c:619: WARNING: Unexpected indentation. Use a list and change the identation for it to be properly parsed by the documentation toolchain. Signed-off-by: Mauro Carvalho Chehab Signed-off-by: David S. Miller --- drivers/net/phy/sfp-bus.c | 32 ++++++++++++++++++-------------- 1 file changed, 18 insertions(+), 14 deletions(-) diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c index d949ea7b4f8c..6900c68260e0 100644 --- a/drivers/net/phy/sfp-bus.c +++ b/drivers/net/phy/sfp-bus.c @@ -572,13 +572,15 @@ static void sfp_upstream_clear(struct sfp_bus *bus) * the sfp_bus structure, incrementing its reference count. This must * be put via sfp_bus_put() when done. * - * Returns: on success, a pointer to the sfp_bus structure, - * %NULL if no SFP is specified, - * on failure, an error pointer value: - * corresponding to the errors detailed for - * fwnode_property_get_reference_args(). - * %-ENOMEM if we failed to allocate the bus. - * an error from the upstream's connect_phy() method. + * Returns: + * - on success, a pointer to the sfp_bus structure, + * - %NULL if no SFP is specified, + * - on failure, an error pointer value: + * + * - corresponding to the errors detailed for + * fwnode_property_get_reference_args(). + * - %-ENOMEM if we failed to allocate the bus. + * - an error from the upstream's connect_phy() method. */ struct sfp_bus *sfp_bus_find_fwnode(struct fwnode_handle *fwnode) { @@ -612,13 +614,15 @@ EXPORT_SYMBOL_GPL(sfp_bus_find_fwnode); * the SFP bus using sfp_register_upstream(). This takes a reference on the * bus, so it is safe to put the bus after this call. * - * Returns: on success, a pointer to the sfp_bus structure, - * %NULL if no SFP is specified, - * on failure, an error pointer value: - * corresponding to the errors detailed for - * fwnode_property_get_reference_args(). - * %-ENOMEM if we failed to allocate the bus. - * an error from the upstream's connect_phy() method. + * Returns: + * - on success, a pointer to the sfp_bus structure, + * - %NULL if no SFP is specified, + * - on failure, an error pointer value: + * + * - corresponding to the errors detailed for + * fwnode_property_get_reference_args(). + * - %-ENOMEM if we failed to allocate the bus. + * - an error from the upstream's connect_phy() method. */ int sfp_bus_add_upstream(struct sfp_bus *bus, void *upstream, const struct sfp_upstream_ops *ops) From 2de9780f75076c1a1f122cbd39df0fa545284724 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Tue, 17 Mar 2020 15:54:20 +0100 Subject: [PATCH 072/138] net: core: dev.c: fix a documentation warning There's a markup for link with is "foo_". On this kernel-doc comment, we don't want this, but instead, place a literal reference. So, escape the literal with ``foo``, in order to avoid this warning: ./net/core/dev.c:5195: WARNING: Unknown target name: "page_is". Signed-off-by: Mauro Carvalho Chehab Signed-off-by: David S. Miller --- net/core/dev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/core/dev.c b/net/core/dev.c index c6c985fe7b1b..402a986659cf 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -5195,7 +5195,7 @@ static int __netif_receive_skb_one_core(struct sk_buff *skb, bool pfmemalloc) * * More direct receive version of netif_receive_skb(). It should * only be used by callers that have a need to skip RPS and Generic XDP. - * Caller must also take care of handling if (page_is_)pfmemalloc. + * Caller must also take care of handling if ``(page_is_)pfmemalloc``. * * This function may only be called from softirq context and interrupts * should be enabled. From dd2af10402684cb5840a127caec9e7cdcff6d167 Mon Sep 17 00:00:00 2001 From: Paul Blakey Date: Wed, 18 Mar 2020 12:50:33 +0200 Subject: [PATCH 073/138] net/sched: act_ct: Fix leak of ct zone template on replace Currently, on replace, the previous action instance params is swapped with a newly allocated params. The old params is only freed (via kfree_rcu), without releasing the allocated ct zone template related to it. Call tcf_ct_params_free (via call_rcu) for the old params, so it will release it. Fixes: b57dc7c13ea9 ("net/sched: Introduce action ct") Signed-off-by: Paul Blakey Signed-off-by: David S. Miller --- net/sched/act_ct.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c index f685c0d73708..41114b463161 100644 --- a/net/sched/act_ct.c +++ b/net/sched/act_ct.c @@ -739,7 +739,7 @@ static int tcf_ct_init(struct net *net, struct nlattr *nla, if (goto_ch) tcf_chain_put_by_act(goto_ch); if (params) - kfree_rcu(params, rcu); + call_rcu(¶ms->rcu, tcf_ct_params_free); if (res == ACT_P_CREATED) tcf_idr_insert(tn, *a); From 384d91c267e621e0926062cfb3f20cb72dc16928 Mon Sep 17 00:00:00 2001 From: Taehee Yoo Date: Wed, 18 Mar 2020 13:28:09 +0000 Subject: [PATCH 074/138] vxlan: check return value of gro_cells_init() gro_cells_init() returns error if memory allocation is failed. But the vxlan module doesn't check the return value of gro_cells_init(). Fixes: 58ce31cca1ff ("vxlan: GRO support at tunnel layer")` Signed-off-by: Taehee Yoo Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index d3b08b76b1ec..45308b3350cf 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -2779,10 +2779,19 @@ static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan, /* Setup stats when device is created */ static int vxlan_init(struct net_device *dev) { + struct vxlan_dev *vxlan = netdev_priv(dev); + int err; + dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); if (!dev->tstats) return -ENOMEM; + err = gro_cells_init(&vxlan->gro_cells, dev); + if (err) { + free_percpu(dev->tstats); + return err; + } + return 0; } @@ -3043,8 +3052,6 @@ static void vxlan_setup(struct net_device *dev) vxlan->dev = dev; - gro_cells_init(&vxlan->gro_cells, dev); - for (h = 0; h < FDB_HASH_SIZE; ++h) { spin_lock_init(&vxlan->hash_lock[h]); INIT_HLIST_HEAD(&vxlan->fdb_head[h]); From 166391159c5deb84795d2ff46e95f276177fa5fb Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Wed, 18 Mar 2020 18:30:43 -0600 Subject: [PATCH 075/138] wireguard: selftests: remove duplicated include This commit removes a duplicated include. Signed-off-by: YueHaibing Signed-off-by: Jason A. Donenfeld Signed-off-by: David S. Miller --- tools/testing/selftests/wireguard/qemu/init.c | 1 - 1 file changed, 1 deletion(-) diff --git a/tools/testing/selftests/wireguard/qemu/init.c b/tools/testing/selftests/wireguard/qemu/init.c index 90bc9813cadc..c9698120ac9d 100644 --- a/tools/testing/selftests/wireguard/qemu/init.c +++ b/tools/testing/selftests/wireguard/qemu/init.c @@ -13,7 +13,6 @@ #include #include #include -#include #include #include #include From 551599edbfff2431cef943a772fbde1c3e26eaf8 Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Wed, 18 Mar 2020 18:30:44 -0600 Subject: [PATCH 076/138] wireguard: selftests: test using new 64-bit time_t In case this helps expose bugs with the newer 64-bit time_t types, we do our testing with the newer musl that supports this as well as CONFIG_COMPAT_32BIT_TIME=n. This matters to us, since wireguard does in fact deal with timestamps. Signed-off-by: Jason A. Donenfeld Signed-off-by: David S. Miller --- tools/testing/selftests/wireguard/qemu/Makefile | 2 +- tools/testing/selftests/wireguard/qemu/kernel.config | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/tools/testing/selftests/wireguard/qemu/Makefile b/tools/testing/selftests/wireguard/qemu/Makefile index 28d477683e8a..90598a425c18 100644 --- a/tools/testing/selftests/wireguard/qemu/Makefile +++ b/tools/testing/selftests/wireguard/qemu/Makefile @@ -41,7 +41,7 @@ $(DISTFILES_PATH)/$(1): flock -x $$@.lock -c '[ -f $$@ ] && exit 0; wget -O $$@.tmp $(MIRROR)$(1) || wget -O $$@.tmp $(2)$(1) || rm -f $$@.tmp; [ -f $$@.tmp ] || exit 1; if echo "$(3) $$@.tmp" | sha256sum -c -; then mv $$@.tmp $$@; else rm -f $$@.tmp; exit 71; fi' endef -$(eval $(call tar_download,MUSL,musl,1.1.24,.tar.gz,https://www.musl-libc.org/releases/,1370c9a812b2cf2a7d92802510cca0058cc37e66a7bedd70051f0a34015022a3)) +$(eval $(call tar_download,MUSL,musl,1.2.0,.tar.gz,https://musl.libc.org/releases/,c6de7b191139142d3f9a7b5b702c9cae1b5ee6e7f57e582da9328629408fd4e8)) $(eval $(call tar_download,IPERF,iperf,3.7,.tar.gz,https://downloads.es.net/pub/iperf/,d846040224317caf2f75c843d309a950a7db23f9b44b94688ccbe557d6d1710c)) $(eval $(call tar_download,BASH,bash,5.0,.tar.gz,https://ftp.gnu.org/gnu/bash/,b4a80f2ac66170b2913efbfb9f2594f1f76c7b1afd11f799e22035d63077fb4d)) $(eval $(call tar_download,IPROUTE2,iproute2,5.4.0,.tar.xz,https://www.kernel.org/pub/linux/utils/net/iproute2/,fe97aa60a0d4c5ac830be18937e18dc3400ca713a33a89ad896ff1e3d46086ae)) diff --git a/tools/testing/selftests/wireguard/qemu/kernel.config b/tools/testing/selftests/wireguard/qemu/kernel.config index af9323a0b6e0..d531de13c95b 100644 --- a/tools/testing/selftests/wireguard/qemu/kernel.config +++ b/tools/testing/selftests/wireguard/qemu/kernel.config @@ -56,7 +56,6 @@ CONFIG_NO_HZ_IDLE=y CONFIG_NO_HZ_FULL=n CONFIG_HZ_PERIODIC=n CONFIG_HIGH_RES_TIMERS=y -CONFIG_COMPAT_32BIT_TIME=y CONFIG_ARCH_RANDOM=y CONFIG_FILE_LOCKING=y CONFIG_POSIX_TIMERS=y From a5588604af448664e796daf3c1d5a4523c60667b Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Wed, 18 Mar 2020 18:30:45 -0600 Subject: [PATCH 077/138] wireguard: queueing: account for skb->protocol==0 We carry out checks to the effect of: if (skb->protocol != wg_examine_packet_protocol(skb)) goto err; By having wg_skb_examine_untrusted_ip_hdr return 0 on failure, this means that the check above still passes in the case where skb->protocol is zero, which is possible to hit with AF_PACKET: struct sockaddr_pkt saddr = { .spkt_device = "wg0" }; unsigned char buffer[5] = { 0 }; sendto(socket(AF_PACKET, SOCK_PACKET, /* skb->protocol = */ 0), buffer, sizeof(buffer), 0, (const struct sockaddr *)&saddr, sizeof(saddr)); Additional checks mean that this isn't actually a problem in the code base, but I could imagine it becoming a problem later if the function is used more liberally. I would prefer to fix this by having wg_examine_packet_protocol return a 32-bit ~0 value on failure, which will never match any value of skb->protocol, which would simply change the generated code from a mov to a movzx. However, sparse complains, and adding __force casts doesn't seem like a good idea, so instead we just add a simple helper function to check for the zero return value. Since wg_examine_packet_protocol itself gets inlined, this winds up not adding an additional branch to the generated code, since the 0 return value already happens in a mergable branch. Reported-by: Fabian Freyer Signed-off-by: Jason A. Donenfeld Signed-off-by: David S. Miller --- drivers/net/wireguard/device.c | 2 +- drivers/net/wireguard/queueing.h | 8 +++++++- drivers/net/wireguard/receive.c | 4 ++-- 3 files changed, 10 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c index cdc96968b0f4..3ac3f8570ca1 100644 --- a/drivers/net/wireguard/device.c +++ b/drivers/net/wireguard/device.c @@ -122,7 +122,7 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev) u32 mtu; int ret; - if (unlikely(wg_skb_examine_untrusted_ip_hdr(skb) != skb->protocol)) { + if (unlikely(!wg_check_packet_protocol(skb))) { ret = -EPROTONOSUPPORT; net_dbg_ratelimited("%s: Invalid IP packet\n", dev->name); goto err; diff --git a/drivers/net/wireguard/queueing.h b/drivers/net/wireguard/queueing.h index fecb559cbdb6..cf1e0e2376d8 100644 --- a/drivers/net/wireguard/queueing.h +++ b/drivers/net/wireguard/queueing.h @@ -66,7 +66,7 @@ struct packet_cb { #define PACKET_PEER(skb) (PACKET_CB(skb)->keypair->entry.peer) /* Returns either the correct skb->protocol value, or 0 if invalid. */ -static inline __be16 wg_skb_examine_untrusted_ip_hdr(struct sk_buff *skb) +static inline __be16 wg_examine_packet_protocol(struct sk_buff *skb) { if (skb_network_header(skb) >= skb->head && (skb_network_header(skb) + sizeof(struct iphdr)) <= @@ -81,6 +81,12 @@ static inline __be16 wg_skb_examine_untrusted_ip_hdr(struct sk_buff *skb) return 0; } +static inline bool wg_check_packet_protocol(struct sk_buff *skb) +{ + __be16 real_protocol = wg_examine_packet_protocol(skb); + return real_protocol && skb->protocol == real_protocol; +} + static inline void wg_reset_packet(struct sk_buff *skb) { skb_scrub_packet(skb, true); diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c index 4a153894cee2..243ed7172dd2 100644 --- a/drivers/net/wireguard/receive.c +++ b/drivers/net/wireguard/receive.c @@ -56,7 +56,7 @@ static int prepare_skb_header(struct sk_buff *skb, struct wg_device *wg) size_t data_offset, data_len, header_len; struct udphdr *udp; - if (unlikely(wg_skb_examine_untrusted_ip_hdr(skb) != skb->protocol || + if (unlikely(!wg_check_packet_protocol(skb) || skb_transport_header(skb) < skb->head || (skb_transport_header(skb) + sizeof(struct udphdr)) > skb_tail_pointer(skb))) @@ -388,7 +388,7 @@ static void wg_packet_consume_data_done(struct wg_peer *peer, */ skb->ip_summed = CHECKSUM_UNNECESSARY; skb->csum_level = ~0; /* All levels */ - skb->protocol = wg_skb_examine_untrusted_ip_hdr(skb); + skb->protocol = wg_examine_packet_protocol(skb); if (skb->protocol == htons(ETH_P_IP)) { len = ntohs(ip_hdr(skb)->tot_len); if (unlikely(len < sizeof(struct iphdr))) From 2b8765c52db24c0fbcc81bac9b5e8390f2c7d3c8 Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Wed, 18 Mar 2020 18:30:46 -0600 Subject: [PATCH 078/138] wireguard: receive: remove dead code from default packet type case The situation in which we wind up hitting the default case here indicates a major bug in earlier parsing code. It is not a usual thing that should ever happen, which means a "friendly" message for it doesn't make sense. Rather, replace this with a WARN_ON, just like we do earlier in the file for a similar situation, so that somebody sends us a bug report and we can fix it. Reported-by: Fabian Freyer Signed-off-by: Jason A. Donenfeld Signed-off-by: David S. Miller --- drivers/net/wireguard/receive.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c index 243ed7172dd2..da3b782ab7d3 100644 --- a/drivers/net/wireguard/receive.c +++ b/drivers/net/wireguard/receive.c @@ -587,8 +587,7 @@ void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb) wg_packet_consume_data(wg, skb); break; default: - net_dbg_skb_ratelimited("%s: Invalid packet from %pISpfsc\n", - wg->dev->name, skb); + WARN(1, "Non-exhaustive parsing of packet header lead to unknown packet type!\n"); goto err; } return; From 11a7686aa99c7fe4b3f80f6dcccd54129817984d Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Wed, 18 Mar 2020 18:30:47 -0600 Subject: [PATCH 079/138] wireguard: noise: error out precomputed DH during handshake rather than config We precompute the static-static ECDH during configuration time, in order to save an expensive computation later when receiving network packets. However, not all ECDH computations yield a contributory result. Prior, we were just not letting those peers be added to the interface. However, this creates a strange inconsistency, since it was still possible to add other weird points, like a valid public key plus a low-order point, and, like points that result in zeros, a handshake would not complete. In order to make the behavior more uniform and less surprising, simply allow all peers to be added. Then, we'll error out later when doing the crypto if there's an issue. This also adds more separation between the crypto layer and the configuration layer. Discussed-with: Mathias Hall-Andersen Signed-off-by: Jason A. Donenfeld Signed-off-by: David S. Miller --- drivers/net/wireguard/netlink.c | 8 +--- drivers/net/wireguard/noise.c | 55 ++++++++++++---------- drivers/net/wireguard/noise.h | 12 ++--- drivers/net/wireguard/peer.c | 7 +-- tools/testing/selftests/wireguard/netns.sh | 15 ++++-- 5 files changed, 49 insertions(+), 48 deletions(-) diff --git a/drivers/net/wireguard/netlink.c b/drivers/net/wireguard/netlink.c index bda26405497c..802099c8828a 100644 --- a/drivers/net/wireguard/netlink.c +++ b/drivers/net/wireguard/netlink.c @@ -411,11 +411,7 @@ static int set_peer(struct wg_device *wg, struct nlattr **attrs) peer = wg_peer_create(wg, public_key, preshared_key); if (IS_ERR(peer)) { - /* Similar to the above, if the key is invalid, we skip - * it without fanfare, so that services don't need to - * worry about doing key validation themselves. - */ - ret = PTR_ERR(peer) == -EKEYREJECTED ? 0 : PTR_ERR(peer); + ret = PTR_ERR(peer); peer = NULL; goto out; } @@ -569,7 +565,7 @@ static int wg_set_device(struct sk_buff *skb, struct genl_info *info) private_key); list_for_each_entry_safe(peer, temp, &wg->peer_list, peer_list) { - BUG_ON(!wg_noise_precompute_static_static(peer)); + wg_noise_precompute_static_static(peer); wg_noise_expire_current_peer_keypairs(peer); } wg_cookie_checker_precompute_device_keys(&wg->cookie_checker); diff --git a/drivers/net/wireguard/noise.c b/drivers/net/wireguard/noise.c index 919d9d866446..708dc61c974f 100644 --- a/drivers/net/wireguard/noise.c +++ b/drivers/net/wireguard/noise.c @@ -44,32 +44,23 @@ void __init wg_noise_init(void) } /* Must hold peer->handshake.static_identity->lock */ -bool wg_noise_precompute_static_static(struct wg_peer *peer) +void wg_noise_precompute_static_static(struct wg_peer *peer) { - bool ret; - down_write(&peer->handshake.lock); - if (peer->handshake.static_identity->has_identity) { - ret = curve25519( - peer->handshake.precomputed_static_static, + if (!peer->handshake.static_identity->has_identity || + !curve25519(peer->handshake.precomputed_static_static, peer->handshake.static_identity->static_private, - peer->handshake.remote_static); - } else { - u8 empty[NOISE_PUBLIC_KEY_LEN] = { 0 }; - - ret = curve25519(empty, empty, peer->handshake.remote_static); + peer->handshake.remote_static)) memset(peer->handshake.precomputed_static_static, 0, NOISE_PUBLIC_KEY_LEN); - } up_write(&peer->handshake.lock); - return ret; } -bool wg_noise_handshake_init(struct noise_handshake *handshake, - struct noise_static_identity *static_identity, - const u8 peer_public_key[NOISE_PUBLIC_KEY_LEN], - const u8 peer_preshared_key[NOISE_SYMMETRIC_KEY_LEN], - struct wg_peer *peer) +void wg_noise_handshake_init(struct noise_handshake *handshake, + struct noise_static_identity *static_identity, + const u8 peer_public_key[NOISE_PUBLIC_KEY_LEN], + const u8 peer_preshared_key[NOISE_SYMMETRIC_KEY_LEN], + struct wg_peer *peer) { memset(handshake, 0, sizeof(*handshake)); init_rwsem(&handshake->lock); @@ -81,7 +72,7 @@ bool wg_noise_handshake_init(struct noise_handshake *handshake, NOISE_SYMMETRIC_KEY_LEN); handshake->static_identity = static_identity; handshake->state = HANDSHAKE_ZEROED; - return wg_noise_precompute_static_static(peer); + wg_noise_precompute_static_static(peer); } static void handshake_zero(struct noise_handshake *handshake) @@ -403,6 +394,19 @@ static bool __must_check mix_dh(u8 chaining_key[NOISE_HASH_LEN], return true; } +static bool __must_check mix_precomputed_dh(u8 chaining_key[NOISE_HASH_LEN], + u8 key[NOISE_SYMMETRIC_KEY_LEN], + const u8 precomputed[NOISE_PUBLIC_KEY_LEN]) +{ + static u8 zero_point[NOISE_PUBLIC_KEY_LEN]; + if (unlikely(!crypto_memneq(precomputed, zero_point, NOISE_PUBLIC_KEY_LEN))) + return false; + kdf(chaining_key, key, NULL, precomputed, NOISE_HASH_LEN, + NOISE_SYMMETRIC_KEY_LEN, 0, NOISE_PUBLIC_KEY_LEN, + chaining_key); + return true; +} + static void mix_hash(u8 hash[NOISE_HASH_LEN], const u8 *src, size_t src_len) { struct blake2s_state blake; @@ -531,10 +535,9 @@ wg_noise_handshake_create_initiation(struct message_handshake_initiation *dst, NOISE_PUBLIC_KEY_LEN, key, handshake->hash); /* ss */ - kdf(handshake->chaining_key, key, NULL, - handshake->precomputed_static_static, NOISE_HASH_LEN, - NOISE_SYMMETRIC_KEY_LEN, 0, NOISE_PUBLIC_KEY_LEN, - handshake->chaining_key); + if (!mix_precomputed_dh(handshake->chaining_key, key, + handshake->precomputed_static_static)) + goto out; /* {t} */ tai64n_now(timestamp); @@ -595,9 +598,9 @@ wg_noise_handshake_consume_initiation(struct message_handshake_initiation *src, handshake = &peer->handshake; /* ss */ - kdf(chaining_key, key, NULL, handshake->precomputed_static_static, - NOISE_HASH_LEN, NOISE_SYMMETRIC_KEY_LEN, 0, NOISE_PUBLIC_KEY_LEN, - chaining_key); + if (!mix_precomputed_dh(chaining_key, key, + handshake->precomputed_static_static)) + goto out; /* {t} */ if (!message_decrypt(t, src->encrypted_timestamp, diff --git a/drivers/net/wireguard/noise.h b/drivers/net/wireguard/noise.h index 138a07bb817c..f532d59d3f19 100644 --- a/drivers/net/wireguard/noise.h +++ b/drivers/net/wireguard/noise.h @@ -94,11 +94,11 @@ struct noise_handshake { struct wg_device; void wg_noise_init(void); -bool wg_noise_handshake_init(struct noise_handshake *handshake, - struct noise_static_identity *static_identity, - const u8 peer_public_key[NOISE_PUBLIC_KEY_LEN], - const u8 peer_preshared_key[NOISE_SYMMETRIC_KEY_LEN], - struct wg_peer *peer); +void wg_noise_handshake_init(struct noise_handshake *handshake, + struct noise_static_identity *static_identity, + const u8 peer_public_key[NOISE_PUBLIC_KEY_LEN], + const u8 peer_preshared_key[NOISE_SYMMETRIC_KEY_LEN], + struct wg_peer *peer); void wg_noise_handshake_clear(struct noise_handshake *handshake); static inline void wg_noise_reset_last_sent_handshake(atomic64_t *handshake_ns) { @@ -116,7 +116,7 @@ void wg_noise_expire_current_peer_keypairs(struct wg_peer *peer); void wg_noise_set_static_identity_private_key( struct noise_static_identity *static_identity, const u8 private_key[NOISE_PUBLIC_KEY_LEN]); -bool wg_noise_precompute_static_static(struct wg_peer *peer); +void wg_noise_precompute_static_static(struct wg_peer *peer); bool wg_noise_handshake_create_initiation(struct message_handshake_initiation *dst, diff --git a/drivers/net/wireguard/peer.c b/drivers/net/wireguard/peer.c index 071eedf33f5a..1d634bd3038f 100644 --- a/drivers/net/wireguard/peer.c +++ b/drivers/net/wireguard/peer.c @@ -34,11 +34,8 @@ struct wg_peer *wg_peer_create(struct wg_device *wg, return ERR_PTR(ret); peer->device = wg; - if (!wg_noise_handshake_init(&peer->handshake, &wg->static_identity, - public_key, preshared_key, peer)) { - ret = -EKEYREJECTED; - goto err_1; - } + wg_noise_handshake_init(&peer->handshake, &wg->static_identity, + public_key, preshared_key, peer); if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL)) goto err_1; if (wg_packet_queue_init(&peer->tx_queue, wg_packet_tx_worker, false, diff --git a/tools/testing/selftests/wireguard/netns.sh b/tools/testing/selftests/wireguard/netns.sh index 138d46b3f330..936e1ca9410e 100755 --- a/tools/testing/selftests/wireguard/netns.sh +++ b/tools/testing/selftests/wireguard/netns.sh @@ -527,11 +527,16 @@ n0 wg set wg0 peer "$pub2" allowed-ips 0.0.0.0/0 n0 wg set wg0 peer "$pub2" allowed-ips ::/0,1700::/111,5000::/4,e000::/37,9000::/75 n0 wg set wg0 peer "$pub2" allowed-ips ::/0 n0 wg set wg0 peer "$pub2" remove -low_order_points=( AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= AQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= 4Ot6fDtBuK4WVuP68Z/EatoJjeucMrH9hmIFFl9JuAA= X5yVvKNQjCSx0LFVnIPvWwREXMRYHI6G2CJO3dCfEVc= 7P///////////////////////////////////////38= 7f///////////////////////////////////////38= 7v///////////////////////////////////////38= ) -n0 wg set wg0 private-key /dev/null ${low_order_points[@]/#/peer } -[[ -z $(n0 wg show wg0 peers) ]] -n0 wg set wg0 private-key <(echo "$key1") ${low_order_points[@]/#/peer } -[[ -z $(n0 wg show wg0 peers) ]] +for low_order_point in AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= AQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= 4Ot6fDtBuK4WVuP68Z/EatoJjeucMrH9hmIFFl9JuAA= X5yVvKNQjCSx0LFVnIPvWwREXMRYHI6G2CJO3dCfEVc= 7P///////////////////////////////////////38= 7f///////////////////////////////////////38= 7v///////////////////////////////////////38=; do + n0 wg set wg0 peer "$low_order_point" persistent-keepalive 1 endpoint 127.0.0.1:1111 +done +[[ -n $(n0 wg show wg0 peers) ]] +exec 4< <(n0 ncat -l -u -p 1111) +ncat_pid=$! +waitncatudp $netns0 $ncat_pid +ip0 link set wg0 up +! read -r -n 1 -t 2 <&4 || false +kill $ncat_pid ip0 link del wg0 declare -A objects From 61abaf02d2ec3d4575c66fa1b4a863877736b932 Mon Sep 17 00:00:00 2001 From: Haishuang Yan Date: Tue, 17 Mar 2020 10:02:52 +0800 Subject: [PATCH 080/138] netfilter: flowtable: reload ip{v6}h in nf_flow_nat_ip{v6} Since nf_flow_snat_port and nf_flow_snat_ip{v6} call pskb_may_pull() which may change skb->data, so we need to reload ip{v6}h at the right place. Fixes: a908fdec3dda ("netfilter: nf_flow_table: move ipv6 offload hook code to nf_flow_table") Fixes: 7d2086871762 ("netfilter: nf_flow_table: move ipv4 offload hook code to nf_flow_table") Signed-off-by: Haishuang Yan Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_flow_table_ip.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c index 9e563fd3da0f..22caab7bb755 100644 --- a/net/netfilter/nf_flow_table_ip.c +++ b/net/netfilter/nf_flow_table_ip.c @@ -146,11 +146,13 @@ static int nf_flow_nat_ip(const struct flow_offload *flow, struct sk_buff *skb, if (test_bit(NF_FLOW_SNAT, &flow->flags) && (nf_flow_snat_port(flow, skb, thoff, iph->protocol, dir) < 0 || - nf_flow_snat_ip(flow, skb, iph, thoff, dir) < 0)) + nf_flow_snat_ip(flow, skb, ip_hdr(skb), thoff, dir) < 0)) return -1; + + iph = ip_hdr(skb); if (test_bit(NF_FLOW_DNAT, &flow->flags) && (nf_flow_dnat_port(flow, skb, thoff, iph->protocol, dir) < 0 || - nf_flow_dnat_ip(flow, skb, iph, thoff, dir) < 0)) + nf_flow_dnat_ip(flow, skb, ip_hdr(skb), thoff, dir) < 0)) return -1; return 0; @@ -426,11 +428,13 @@ static int nf_flow_nat_ipv6(const struct flow_offload *flow, if (test_bit(NF_FLOW_SNAT, &flow->flags) && (nf_flow_snat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 || - nf_flow_snat_ipv6(flow, skb, ip6h, thoff, dir) < 0)) + nf_flow_snat_ipv6(flow, skb, ipv6_hdr(skb), thoff, dir) < 0)) return -1; + + ip6h = ipv6_hdr(skb); if (test_bit(NF_FLOW_DNAT, &flow->flags) && (nf_flow_dnat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 || - nf_flow_dnat_ipv6(flow, skb, ip6h, thoff, dir) < 0)) + nf_flow_dnat_ipv6(flow, skb, ipv6_hdr(skb), thoff, dir) < 0)) return -1; return 0; From 41e9ec5a54f95eee1a57c8d26ab70e0492548c1b Mon Sep 17 00:00:00 2001 From: Haishuang Yan Date: Tue, 17 Mar 2020 10:02:53 +0800 Subject: [PATCH 081/138] netfilter: flowtable: reload ip{v6}h in nf_flow_tuple_ip{v6} Since pskb_may_pull may change skb->data, so we need to reload ip{v6}h at the right place. Fixes: a908fdec3dda ("netfilter: nf_flow_table: move ipv6 offload hook code to nf_flow_table") Fixes: 7d2086871762 ("netfilter: nf_flow_table: move ipv4 offload hook code to nf_flow_table") Signed-off-by: Haishuang Yan Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_flow_table_ip.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c index 22caab7bb755..ba775aecd89a 100644 --- a/net/netfilter/nf_flow_table_ip.c +++ b/net/netfilter/nf_flow_table_ip.c @@ -191,6 +191,7 @@ static int nf_flow_tuple_ip(struct sk_buff *skb, const struct net_device *dev, if (!pskb_may_pull(skb, thoff + sizeof(*ports))) return -1; + iph = ip_hdr(skb); ports = (struct flow_ports *)(skb_network_header(skb) + thoff); tuple->src_v4.s_addr = iph->saddr; @@ -463,6 +464,7 @@ static int nf_flow_tuple_ipv6(struct sk_buff *skb, const struct net_device *dev, if (!pskb_may_pull(skb, thoff + sizeof(*ports))) return -1; + ip6h = ipv6_hdr(skb); ports = (struct flow_ports *)(skb_network_header(skb) + thoff); tuple->src_v6 = ip6h->saddr; From c921ffe853332584eae4f5905cb2a14a7b3c9932 Mon Sep 17 00:00:00 2001 From: Paul Blakey Date: Thu, 19 Mar 2020 11:52:25 +0200 Subject: [PATCH 082/138] netfilter: flowtable: Fix flushing of offloaded flows on free Freeing a flowtable with offloaded flows, the flow are deleted from hardware but are not deleted from the flow table, leaking them, and leaving their offload bit on. Add a second pass of the disabled gc to delete the these flows from the flow table before freeing it. Fixes: c29f74e0df7a ("netfilter: nf_flow_table: hardware offload support") Signed-off-by: Paul Blakey Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_flow_table_core.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c index 8af28e10b4e6..70ebebaf5bc1 100644 --- a/net/netfilter/nf_flow_table_core.c +++ b/net/netfilter/nf_flow_table_core.c @@ -554,6 +554,9 @@ void nf_flow_table_free(struct nf_flowtable *flow_table) nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL); nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, flow_table); nf_flow_table_offload_flush(flow_table); + if (nf_flowtable_hw_offload(flow_table)) + nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, + flow_table); rhashtable_destroy(&flow_table->rhashtable); } EXPORT_SYMBOL_GPL(nf_flow_table_free); From 15ff197237e76c4dab06b7b518afaa4ebb1c43e0 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Thu, 19 Mar 2020 19:37:21 +0000 Subject: [PATCH 083/138] netfilter: flowtable: populate addr_type mask nf_flow_rule_match() sets control.addr_type in key, so needs to also set the corresponding mask. An exact match is wanted, so mask is all ones. Fixes: c29f74e0df7a ("netfilter: nf_flow_table: hardware offload support") Signed-off-by: Edward Cree Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_flow_table_offload.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c index 06f00cdc3891..f2c22c682851 100644 --- a/net/netfilter/nf_flow_table_offload.c +++ b/net/netfilter/nf_flow_table_offload.c @@ -87,6 +87,7 @@ static int nf_flow_rule_match(struct nf_flow_match *match, default: return -EOPNOTSUPP; } + mask->control.addr_type = 0xffff; match->dissector.used_keys |= BIT(key->control.addr_type); mask->basic.n_proto = 0xffff; From 6002059d7882c3512e6ac52fa82424272ddfcd5c Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Thu, 19 Mar 2020 13:25:39 +0200 Subject: [PATCH 084/138] mlxsw: pci: Only issue reset when system is ready During initialization the driver issues a software reset command and then waits for the system status to change back to "ready" state. However, before issuing the reset command the driver does not check that the system is actually in "ready" state. On Spectrum-{1,2} systems this was always the case as the hardware initialization time is very short. On Spectrum-3 systems this is no longer the case. This results in the software reset command timing-out and the driver failing to load: [ 6.347591] mlxsw_spectrum3 0000:06:00.0: Cmd exec timed-out (opcode=40(ACCESS_REG),opcode_mod=0,in_mod=0) [ 6.358382] mlxsw_spectrum3 0000:06:00.0: Reg cmd access failed (reg_id=9023(mrsr),type=write) [ 6.368028] mlxsw_spectrum3 0000:06:00.0: cannot register bus device [ 6.375274] mlxsw_spectrum3: probe of 0000:06:00.0 failed with error -110 Fix this by waiting for the system to become ready both before issuing the reset command and afterwards. In case of failure, print the last system status to aid in debugging. Fixes: da382875c616 ("mlxsw: spectrum: Extend to support Spectrum-3 ASIC") Signed-off-by: Ido Schimmel Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/pci.c | 50 ++++++++++++++++++----- 1 file changed, 39 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index 914c33e46fb4..e9ded1a6e131 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -1322,36 +1322,64 @@ static void mlxsw_pci_mbox_free(struct mlxsw_pci *mlxsw_pci, mbox->mapaddr); } -static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci, - const struct pci_device_id *id) +static int mlxsw_pci_sys_ready_wait(struct mlxsw_pci *mlxsw_pci, + const struct pci_device_id *id, + u32 *p_sys_status) { unsigned long end; - char mrsr_pl[MLXSW_REG_MRSR_LEN]; - int err; + u32 val; - mlxsw_reg_mrsr_pack(mrsr_pl); - err = mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl); - if (err) - return err; if (id->device == PCI_DEVICE_ID_MELLANOX_SWITCHX2) { msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS); return 0; } - /* We must wait for the HW to become responsive once again. */ + /* We must wait for the HW to become responsive. */ msleep(MLXSW_PCI_SW_RESET_WAIT_MSECS); end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS); do { - u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY); - + val = mlxsw_pci_read32(mlxsw_pci, FW_READY); if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC) return 0; cond_resched(); } while (time_before(jiffies, end)); + + *p_sys_status = val & MLXSW_PCI_FW_READY_MASK; + return -EBUSY; } +static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci, + const struct pci_device_id *id) +{ + struct pci_dev *pdev = mlxsw_pci->pdev; + char mrsr_pl[MLXSW_REG_MRSR_LEN]; + u32 sys_status; + int err; + + err = mlxsw_pci_sys_ready_wait(mlxsw_pci, id, &sys_status); + if (err) { + dev_err(&pdev->dev, "Failed to reach system ready status before reset. Status is 0x%x\n", + sys_status); + return err; + } + + mlxsw_reg_mrsr_pack(mrsr_pl); + err = mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl); + if (err) + return err; + + err = mlxsw_pci_sys_ready_wait(mlxsw_pci, id, &sys_status); + if (err) { + dev_err(&pdev->dev, "Failed to reach system ready status after reset. Status is 0x%x\n", + sys_status); + return err; + } + + return 0; +} + static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci) { int err; From 22259471b51925353bd7b16f864c79fdd76e425e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ren=C3=A9=20van=20Dorst?= Date: Thu, 19 Mar 2020 14:47:56 +0100 Subject: [PATCH 085/138] net: dsa: mt7530: Change the LINK bit to reflect the link status MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Andrew reported: After a number of network port link up/down changes, sometimes the switch port gets stuck in a state where it thinks it is still transmitting packets but the cpu port is not actually transmitting anymore. In this state you will see a message on the console "mtk_soc_eth 1e100000.ethernet eth0: transmit timed out" and the Tx counter in ifconfig will be incrementing on virtual port, but not incrementing on cpu port. The issue is that MAC TX/RX status has no impact on the link status or queue manager of the switch. So the queue manager just queues up packets of a disabled port and sends out pause frames when the queue is full. Change the LINK bit to reflect the link status. Fixes: b8f126a8d543 ("net-next: dsa: add dsa support for Mediatek MT7530 switch") Reported-by: Andrew Smith Signed-off-by: René van Dorst Reviewed-by: Vivien Didelot Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/mt7530.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 022466ca1c19..7cbd1bd4c5a6 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -566,7 +566,7 @@ mt7530_mib_reset(struct dsa_switch *ds) static void mt7530_port_set_status(struct mt7530_priv *priv, int port, int enable) { - u32 mask = PMCR_TX_EN | PMCR_RX_EN; + u32 mask = PMCR_TX_EN | PMCR_RX_EN | PMCR_FORCE_LNK; if (enable) mt7530_set(priv, MT7530_PMCR_P(port), mask); @@ -1444,7 +1444,7 @@ static void mt7530_phylink_mac_config(struct dsa_switch *ds, int port, mcr_new &= ~(PMCR_FORCE_SPEED_1000 | PMCR_FORCE_SPEED_100 | PMCR_FORCE_FDX | PMCR_TX_FC_EN | PMCR_RX_FC_EN); mcr_new |= PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | PMCR_BACKOFF_EN | - PMCR_BACKPR_EN | PMCR_FORCE_MODE | PMCR_FORCE_LNK; + PMCR_BACKPR_EN | PMCR_FORCE_MODE; /* Are we connected to external phy */ if (port == 5 && dsa_is_user_port(ds, 5)) From 7affd80802afb6ca92dba47d768632fbde365241 Mon Sep 17 00:00:00 2001 From: Rahul Lakkireddy Date: Thu, 19 Mar 2020 23:08:09 +0530 Subject: [PATCH 086/138] cxgb4: fix throughput drop during Tx backpressure commit 7c3bebc3d868 ("cxgb4: request the TX CIDX updates to status page") reverted back to getting Tx CIDX updates via DMA, instead of interrupts, introduced by commit d429005fdf2c ("cxgb4/cxgb4vf: Add support for SGE doorbell queue timer") However, it missed reverting back several code changes where Tx CIDX updates are not explicitly requested during backpressure when using interrupt mode. These missed changes cause slow recovery during backpressure because the corresponding interrupt no longer comes and hence results in Tx throughput drop. So, revert back these missed code changes, as well, which will allow explicitly requesting Tx CIDX updates when backpressure happens. This enables the corresponding interrupt with Tx CIDX update message to get generated and hence speed up recovery and restore back throughput. Fixes: 7c3bebc3d868 ("cxgb4: request the TX CIDX updates to status page") Fixes: d429005fdf2c ("cxgb4/cxgb4vf: Add support for SGE doorbell queue timer") Signed-off-by: Rahul Lakkireddy Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/sge.c | 42 ++---------------------- 1 file changed, 2 insertions(+), 40 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 97cda501e7e8..c816837fbd85 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -1486,16 +1486,7 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev) * has opened up. */ eth_txq_stop(q); - - /* If we're using the SGE Doorbell Queue Timer facility, we - * don't need to ask the Firmware to send us Egress Queue CIDX - * Updates: the Hardware will do this automatically. And - * since we send the Ingress Queue CIDX Updates to the - * corresponding Ethernet Response Queue, we'll get them very - * quickly. - */ - if (!q->dbqt) - wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; + wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; } wr = (void *)&q->q.desc[q->q.pidx]; @@ -1805,16 +1796,7 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb, * has opened up. */ eth_txq_stop(txq); - - /* If we're using the SGE Doorbell Queue Timer facility, we - * don't need to ask the Firmware to send us Egress Queue CIDX - * Updates: the Hardware will do this automatically. And - * since we send the Ingress Queue CIDX Updates to the - * corresponding Ethernet Response Queue, we'll get them very - * quickly. - */ - if (!txq->dbqt) - wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; + wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; } /* Start filling in our Work Request. Note that we do _not_ handle @@ -3370,26 +3352,6 @@ static void t4_tx_completion_handler(struct sge_rspq *rspq, } txq = &s->ethtxq[pi->first_qset + rspq->idx]; - - /* We've got the Hardware Consumer Index Update in the Egress Update - * message. If we're using the SGE Doorbell Queue Timer mechanism, - * these Egress Update messages will be our sole CIDX Updates we get - * since we don't want to chew up PCIe bandwidth for both Ingress - * Messages and Status Page writes. However, The code which manages - * reclaiming successfully DMA'ed TX Work Requests uses the CIDX value - * stored in the Status Page at the end of the TX Queue. It's easiest - * to simply copy the CIDX Update value from the Egress Update message - * to the Status Page. Also note that no Endian issues need to be - * considered here since both are Big Endian and we're just copying - * bytes consistently ... - */ - if (txq->dbqt) { - struct cpl_sge_egr_update *egr; - - egr = (struct cpl_sge_egr_update *)rsp; - WRITE_ONCE(txq->q.stat->cidx, egr->cidx); - } - t4_sge_eth_txq_egress_update(adapter, txq, -1); } From f1f20a8666c55cb534b8f3fc1130eebf01a06155 Mon Sep 17 00:00:00 2001 From: Rahul Lakkireddy Date: Thu, 19 Mar 2020 23:08:10 +0530 Subject: [PATCH 087/138] cxgb4: fix Txq restart check during backpressure Driver reclaims descriptors in much smaller batches, even if hardware indicates more to reclaim, during backpressure. So, fix the check to restart the Txq during backpressure, by looking at how many descriptors hardware had indicated to reclaim, and not on how many descriptors that driver had actually reclaimed. Once the Txq is restarted, driver will reclaim even more descriptors when Tx path is entered again. Fixes: d429005fdf2c ("cxgb4/cxgb4vf: Add support for SGE doorbell queue timer") Signed-off-by: Rahul Lakkireddy Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/sge.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index c816837fbd85..cab3d17e0e1a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -1307,8 +1307,9 @@ static inline void *write_tso_wr(struct adapter *adap, struct sk_buff *skb, int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *eq, int maxreclaim) { + unsigned int reclaimed, hw_cidx; struct sge_txq *q = &eq->q; - unsigned int reclaimed; + int hw_in_use; if (!q->in_use || !__netif_tx_trylock(eq->txq)) return 0; @@ -1316,12 +1317,17 @@ int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *eq, /* Reclaim pending completed TX Descriptors. */ reclaimed = reclaim_completed_tx(adap, &eq->q, maxreclaim, true); + hw_cidx = ntohs(READ_ONCE(q->stat->cidx)); + hw_in_use = q->pidx - hw_cidx; + if (hw_in_use < 0) + hw_in_use += q->size; + /* If the TX Queue is currently stopped and there's now more than half * the queue available, restart it. Otherwise bail out since the rest * of what we want do here is with the possibility of shipping any * currently buffered Coalesced TX Work Request. */ - if (netif_tx_queue_stopped(eq->txq) && txq_avail(q) > (q->size / 2)) { + if (netif_tx_queue_stopped(eq->txq) && hw_in_use < (q->size / 2)) { netif_tx_wake_queue(eq->txq); eq->q.restarts++; } From b738a185beaab8728943acdb3e67371b8a88185e Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 19 Mar 2020 12:49:55 -0700 Subject: [PATCH 088/138] tcp: ensure skb->dev is NULL before leaving TCP stack skb->rbnode is sharing three skb fields : next, prev, dev When a packet is sent, TCP keeps the original skb (master) in a rtx queue, which was converted to rbtree a while back. __tcp_transmit_skb() is responsible to clone the master skb, and add the TCP header to the clone before sending it to network layer. skb_clone() already clears skb->next and skb->prev, but copies the master oskb->dev into the clone. We need to clear skb->dev, otherwise lower layers could interpret the value as a pointer to a netdev. This old bug surfaced recently when commit 28f8bfd1ac94 ("netfilter: Support iif matches in POSTROUTING") was merged. Before this netfilter commit, skb->dev value was ignored and changed before reaching dev_queue_xmit() Fixes: 75c119afe14f ("tcp: implement rb-tree based retransmit queue") Fixes: 28f8bfd1ac94 ("netfilter: Support iif matches in POSTROUTING") Signed-off-by: Eric Dumazet Reported-by: Martin Zaharinov Cc: Florian Westphal Cc: Pablo Neira Ayuso Signed-off-by: David S. Miller --- net/ipv4/tcp_output.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 306e25d743e8..e8cf8fde3d37 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1109,6 +1109,10 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, if (unlikely(!skb)) return -ENOBUFS; + /* retransmit skbs might have a non zero value in skb->dev + * because skb->dev is aliased with skb->rbnode.rb_left + */ + skb->dev = NULL; } inet = inet_sk(sk); From 07f8e4d0fddbf2f87e4cefb551278abc38db8cdd Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 20 Mar 2020 16:52:02 +0100 Subject: [PATCH 089/138] tcp: also NULL skb->dev when copy was needed In rare cases retransmit logic will make a full skb copy, which will not trigger the zeroing added in recent change b738a185beaa ("tcp: ensure skb->dev is NULL before leaving TCP stack"). Cc: Eric Dumazet Fixes: 75c119afe14f ("tcp: implement rb-tree based retransmit queue") Fixes: 28f8bfd1ac94 ("netfilter: Support iif matches in POSTROUTING") Signed-off-by: Florian Westphal Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp_output.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index e8cf8fde3d37..2f45cde168c4 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -3041,8 +3041,12 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs) tcp_skb_tsorted_save(skb) { nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC); - err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : - -ENOBUFS; + if (nskb) { + nskb->dev = NULL; + err = tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC); + } else { + err = -ENOBUFS; + } } tcp_skb_tsorted_restore(skb); if (!err) { From 96758117dc528e6d84bd23d205e8cf7f31eda029 Mon Sep 17 00:00:00 2001 From: Luo bin Date: Fri, 20 Mar 2020 23:13:16 +0000 Subject: [PATCH 090/138] hinic: fix a bug of waitting for IO stopped it's unreliable for fw to check whether IO is stopped, so driver wait for enough time to ensure IO process is done in hw before freeing resources Signed-off-by: Luo bin Signed-off-by: David S. Miller --- .../net/ethernet/huawei/hinic/hinic_hw_dev.c | 51 +------------------ 1 file changed, 2 insertions(+), 49 deletions(-) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c index 79b3d53f2fbf..c7c75b772a86 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c @@ -360,50 +360,6 @@ static int wait_for_db_state(struct hinic_hwdev *hwdev) return -EFAULT; } -static int wait_for_io_stopped(struct hinic_hwdev *hwdev) -{ - struct hinic_cmd_io_status cmd_io_status; - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - struct hinic_pfhwdev *pfhwdev; - unsigned long end; - u16 out_size; - int err; - - if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { - dev_err(&pdev->dev, "Unsupported PCI Function type\n"); - return -EINVAL; - } - - pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); - - cmd_io_status.func_idx = HINIC_HWIF_FUNC_IDX(hwif); - - end = jiffies + msecs_to_jiffies(IO_STATUS_TIMEOUT); - do { - err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM, - HINIC_COMM_CMD_IO_STATUS_GET, - &cmd_io_status, sizeof(cmd_io_status), - &cmd_io_status, &out_size, - HINIC_MGMT_MSG_SYNC); - if ((err) || (out_size != sizeof(cmd_io_status))) { - dev_err(&pdev->dev, "Failed to get IO status, ret = %d\n", - err); - return err; - } - - if (cmd_io_status.status == IO_STOPPED) { - dev_info(&pdev->dev, "IO stopped\n"); - return 0; - } - - msleep(20); - } while (time_before(jiffies, end)); - - dev_err(&pdev->dev, "Wait for IO stopped - Timeout\n"); - return -ETIMEDOUT; -} - /** * clear_io_resource - set the IO resources as not active in the NIC * @hwdev: the NIC HW device @@ -423,11 +379,8 @@ static int clear_io_resources(struct hinic_hwdev *hwdev) return -EINVAL; } - err = wait_for_io_stopped(hwdev); - if (err) { - dev_err(&pdev->dev, "IO has not stopped yet\n"); - return err; - } + /* sleep 100ms to wait for firmware stopping I/O */ + msleep(100); cmd_clear_io_res.func_idx = HINIC_HWIF_FUNC_IDX(hwif); From 614eaa943e9fc3fcdbd4aa0692ae84973d363333 Mon Sep 17 00:00:00 2001 From: Luo bin Date: Fri, 20 Mar 2020 23:13:17 +0000 Subject: [PATCH 091/138] hinic: fix the bug of clearing event queue should disable eq irq before freeing it, must clear event queue depth in hw before freeing relevant memory to avoid illegal memory access and update consumer idx to avoid invalid interrupt Signed-off-by: Luo bin Signed-off-by: David S. Miller --- .../net/ethernet/huawei/hinic/hinic_hw_eqs.c | 24 +++++++++++++------ 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c index 79243b626ddb..6a723c4757bc 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c @@ -188,7 +188,7 @@ static u8 eq_cons_idx_checksum_set(u32 val) * eq_update_ci - update the HW cons idx of event queue * @eq: the event queue to update the cons idx for **/ -static void eq_update_ci(struct hinic_eq *eq) +static void eq_update_ci(struct hinic_eq *eq, u32 arm_state) { u32 val, addr = EQ_CONS_IDX_REG_ADDR(eq); @@ -202,7 +202,7 @@ static void eq_update_ci(struct hinic_eq *eq) val |= HINIC_EQ_CI_SET(eq->cons_idx, IDX) | HINIC_EQ_CI_SET(eq->wrapped, WRAPPED) | - HINIC_EQ_CI_SET(EQ_ARMED, INT_ARMED); + HINIC_EQ_CI_SET(arm_state, INT_ARMED); val |= HINIC_EQ_CI_SET(eq_cons_idx_checksum_set(val), XOR_CHKSUM); @@ -347,7 +347,7 @@ static void eq_irq_handler(void *data) else if (eq->type == HINIC_CEQ) ceq_irq_handler(eq); - eq_update_ci(eq); + eq_update_ci(eq, EQ_ARMED); } /** @@ -702,7 +702,7 @@ static int init_eq(struct hinic_eq *eq, struct hinic_hwif *hwif, } set_eq_ctrls(eq); - eq_update_ci(eq); + eq_update_ci(eq, EQ_ARMED); err = alloc_eq_pages(eq); if (err) { @@ -752,18 +752,28 @@ err_req_irq: **/ static void remove_eq(struct hinic_eq *eq) { - struct msix_entry *entry = &eq->msix_entry; - - free_irq(entry->vector, eq); + hinic_set_msix_state(eq->hwif, eq->msix_entry.entry, + HINIC_MSIX_DISABLE); + free_irq(eq->msix_entry.vector, eq); if (eq->type == HINIC_AEQ) { struct hinic_eq_work *aeq_work = &eq->aeq_work; cancel_work_sync(&aeq_work->work); + /* clear aeq_len to avoid hw access host memory */ + hinic_hwif_write_reg(eq->hwif, + HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id), 0); } else if (eq->type == HINIC_CEQ) { tasklet_kill(&eq->ceq_tasklet); + /* clear ceq_len to avoid hw access host memory */ + hinic_hwif_write_reg(eq->hwif, + HINIC_CSR_CEQ_CTRL_1_ADDR(eq->q_id), 0); } + /* update cons_idx to avoid invalid interrupt */ + eq->cons_idx = hinic_hwif_read_reg(eq->hwif, EQ_PROD_IDX_REG_ADDR(eq)); + eq_update_ci(eq, EQ_NOT_ARMED); + free_eq_pages(eq); } From 33f15da216a1f4566b4ec880942556ace30615df Mon Sep 17 00:00:00 2001 From: Luo bin Date: Fri, 20 Mar 2020 23:13:18 +0000 Subject: [PATCH 092/138] hinic: fix out-of-order excution in arm cpu add read barrier in driver code to keep from reading other fileds in dma memory which is writable for hw until we have verified the memory is valid for driver Signed-off-by: Luo bin Signed-off-by: David S. Miller --- drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c | 2 ++ drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c | 2 ++ drivers/net/ethernet/huawei/hinic/hinic_rx.c | 3 +++ drivers/net/ethernet/huawei/hinic/hinic_tx.c | 2 ++ 4 files changed, 9 insertions(+) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c index eb53c15b13f3..33f93cc25193 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c @@ -623,6 +623,8 @@ static int cmdq_cmd_ceq_handler(struct hinic_cmdq *cmdq, u16 ci, if (!CMDQ_WQE_COMPLETED(be32_to_cpu(ctrl->ctrl_info))) return -EBUSY; + dma_rmb(); + errcode = CMDQ_WQE_ERRCODE_GET(be32_to_cpu(status->status_info), VAL); cmdq_sync_cmd_handler(cmdq, ci, errcode); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c index 6a723c4757bc..c0b6bcb067cd 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c @@ -235,6 +235,8 @@ static void aeq_irq_handler(struct hinic_eq *eq) if (HINIC_EQ_ELEM_DESC_GET(aeqe_desc, WRAPPED) == eq->wrapped) break; + dma_rmb(); + event = HINIC_EQ_ELEM_DESC_GET(aeqe_desc, TYPE); if (event >= HINIC_MAX_AEQ_EVENTS) { dev_err(&pdev->dev, "Unknown AEQ Event %d\n", event); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c index 2695ad69fca6..815649e37cb1 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c @@ -350,6 +350,9 @@ static int rxq_recv(struct hinic_rxq *rxq, int budget) if (!rq_wqe) break; + /* make sure we read rx_done before packet length */ + dma_rmb(); + cqe = rq->cqe[ci]; status = be32_to_cpu(cqe->status); hinic_rq_get_sge(rxq->rq, rq_wqe, ci, &sge); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c index 0e13d1c7e474..375d81d03e86 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c @@ -622,6 +622,8 @@ static int free_tx_poll(struct napi_struct *napi, int budget) do { hw_ci = HW_CONS_IDX(sq) & wq->mask; + dma_rmb(); + /* Reading a WQEBB to get real WQE size and consumer index. */ sq_wqe = hinic_sq_read_wqebb(sq, &skb, &wqe_size, &sw_ci); if ((!sq_wqe) || From 0da7c322f116210ebfdda59c7da663a6fc5e9cc8 Mon Sep 17 00:00:00 2001 From: Luo bin Date: Fri, 20 Mar 2020 23:13:19 +0000 Subject: [PATCH 093/138] hinic: fix wrong para of wait_for_completion_timeout the second input parameter of wait_for_completion_timeout should be jiffies instead of millisecond Signed-off-by: Luo bin Signed-off-by: David S. Miller --- drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c | 3 ++- drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c | 5 +++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c index 33f93cc25193..5f2d57d1b2d3 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c @@ -389,7 +389,8 @@ static int cmdq_sync_cmd_direct_resp(struct hinic_cmdq *cmdq, spin_unlock_bh(&cmdq->cmdq_lock); - if (!wait_for_completion_timeout(&done, CMDQ_TIMEOUT)) { + if (!wait_for_completion_timeout(&done, + msecs_to_jiffies(CMDQ_TIMEOUT))) { spin_lock_bh(&cmdq->cmdq_lock); if (cmdq->errcode[curr_prod_idx] == &errcode) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c index c1a6be6bf6a8..8995e32dd1c0 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c @@ -43,7 +43,7 @@ #define MSG_NOT_RESP 0xFFFF -#define MGMT_MSG_TIMEOUT 1000 +#define MGMT_MSG_TIMEOUT 5000 #define mgmt_to_pfhwdev(pf_mgmt) \ container_of(pf_mgmt, struct hinic_pfhwdev, pf_to_mgmt) @@ -267,7 +267,8 @@ static int msg_to_mgmt_sync(struct hinic_pf_to_mgmt *pf_to_mgmt, goto unlock_sync_msg; } - if (!wait_for_completion_timeout(recv_done, MGMT_MSG_TIMEOUT)) { + if (!wait_for_completion_timeout(recv_done, + msecs_to_jiffies(MGMT_MSG_TIMEOUT))) { dev_err(&pdev->dev, "MGMT timeout, MSG id = %d\n", msg_id); err = -ETIMEDOUT; goto unlock_sync_msg; From 7296695fc16dd1761dbba8b68a9181c71cef0633 Mon Sep 17 00:00:00 2001 From: Luo bin Date: Fri, 20 Mar 2020 23:13:20 +0000 Subject: [PATCH 094/138] hinic: fix wrong value of MIN_SKB_LEN the minimum value of skb len that hw supports is 32 rather than 17 Signed-off-by: Luo bin Signed-off-by: David S. Miller --- drivers/net/ethernet/huawei/hinic/hinic_tx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c index 375d81d03e86..365016450bdb 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c @@ -45,7 +45,7 @@ #define HW_CONS_IDX(sq) be16_to_cpu(*(u16 *)((sq)->hw_ci_addr)) -#define MIN_SKB_LEN 17 +#define MIN_SKB_LEN 32 #define MAX_PAYLOAD_OFFSET 221 #define TRANSPORT_OFFSET(l4_hdr, skb) ((u32)((l4_hdr) - (skb)->data)) From 3a303cfdd28d5f930a307c82e8a9d996394d5ebd Mon Sep 17 00:00:00 2001 From: Taehee Yoo Date: Sat, 21 Mar 2020 06:46:50 +0000 Subject: [PATCH 095/138] hsr: fix general protection fault in hsr_addr_is_self() The port->hsr is used in the hsr_handle_frame(), which is a callback of rx_handler. hsr master and slaves are initialized in hsr_add_port(). This function initializes several pointers, which includes port->hsr after registering rx_handler. So, in the rx_handler routine, un-initialized pointer would be used. In order to fix this, pointers should be initialized before registering rx_handler. Test commands: ip netns del left ip netns del right modprobe -rv veth modprobe -rv hsr killall ping modprobe hsr ip netns add left ip netns add right ip link add veth0 type veth peer name veth1 ip link add veth2 type veth peer name veth3 ip link add veth4 type veth peer name veth5 ip link set veth1 netns left ip link set veth3 netns right ip link set veth4 netns left ip link set veth5 netns right ip link set veth0 up ip link set veth2 up ip link set veth0 address fc:00:00:00:00:01 ip link set veth2 address fc:00:00:00:00:02 ip netns exec left ip link set veth1 up ip netns exec left ip link set veth4 up ip netns exec right ip link set veth3 up ip netns exec right ip link set veth5 up ip link add hsr0 type hsr slave1 veth0 slave2 veth2 ip a a 192.168.100.1/24 dev hsr0 ip link set hsr0 up ip netns exec left ip link add hsr1 type hsr slave1 veth1 slave2 veth4 ip netns exec left ip a a 192.168.100.2/24 dev hsr1 ip netns exec left ip link set hsr1 up ip netns exec left ip n a 192.168.100.1 dev hsr1 lladdr \ fc:00:00:00:00:01 nud permanent ip netns exec left ip n r 192.168.100.1 dev hsr1 lladdr \ fc:00:00:00:00:01 nud permanent for i in {1..100} do ip netns exec left ping 192.168.100.1 & done ip netns exec left hping3 192.168.100.1 -2 --flood & ip netns exec right ip link add hsr2 type hsr slave1 veth3 slave2 veth5 ip netns exec right ip a a 192.168.100.3/24 dev hsr2 ip netns exec right ip link set hsr2 up ip netns exec right ip n a 192.168.100.1 dev hsr2 lladdr \ fc:00:00:00:00:02 nud permanent ip netns exec right ip n r 192.168.100.1 dev hsr2 lladdr \ fc:00:00:00:00:02 nud permanent for i in {1..100} do ip netns exec right ping 192.168.100.1 & done ip netns exec right hping3 192.168.100.1 -2 --flood & while : do ip link add hsr0 type hsr slave1 veth0 slave2 veth2 ip a a 192.168.100.1/24 dev hsr0 ip link set hsr0 up ip link del hsr0 done Splat looks like: [ 120.954938][ C0] general protection fault, probably for non-canonical address 0xdffffc0000000006: 0000 [#1]I [ 120.957761][ C0] KASAN: null-ptr-deref in range [0x0000000000000030-0x0000000000000037] [ 120.959064][ C0] CPU: 0 PID: 1511 Comm: hping3 Not tainted 5.6.0-rc5+ #460 [ 120.960054][ C0] Hardware name: innotek GmbH VirtualBox/VirtualBox, BIOS VirtualBox 12/01/2006 [ 120.962261][ C0] RIP: 0010:hsr_addr_is_self+0x65/0x2a0 [hsr] [ 120.963149][ C0] Code: 44 24 18 70 73 2f c0 48 c1 eb 03 48 8d 04 13 c7 00 f1 f1 f1 f1 c7 40 04 00 f2 f2 f2 4 [ 120.966277][ C0] RSP: 0018:ffff8880d9c09af0 EFLAGS: 00010206 [ 120.967293][ C0] RAX: 0000000000000006 RBX: 1ffff1101b38135f RCX: 0000000000000000 [ 120.968516][ C0] RDX: dffffc0000000000 RSI: ffff8880d17cb208 RDI: 0000000000000000 [ 120.969718][ C0] RBP: 0000000000000030 R08: ffffed101b3c0e3c R09: 0000000000000001 [ 120.972203][ C0] R10: 0000000000000001 R11: ffffed101b3c0e3b R12: 0000000000000000 [ 120.973379][ C0] R13: ffff8880aaf80100 R14: ffff8880aaf800f2 R15: ffff8880aaf80040 [ 120.974410][ C0] FS: 00007f58e693f740(0000) GS:ffff8880d9c00000(0000) knlGS:0000000000000000 [ 120.979794][ C0] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 120.980773][ C0] CR2: 00007ffcb8b38f29 CR3: 00000000afe8e001 CR4: 00000000000606f0 [ 120.981945][ C0] Call Trace: [ 120.982411][ C0] [ 120.982848][ C0] ? hsr_add_node+0x8c0/0x8c0 [hsr] [ 120.983522][ C0] ? rcu_read_lock_held+0x90/0xa0 [ 120.984159][ C0] ? rcu_read_lock_sched_held+0xc0/0xc0 [ 120.984944][ C0] hsr_handle_frame+0x1db/0x4e0 [hsr] [ 120.985597][ C0] ? hsr_nl_nodedown+0x2b0/0x2b0 [hsr] [ 120.986289][ C0] __netif_receive_skb_core+0x6bf/0x3170 [ 120.992513][ C0] ? check_chain_key+0x236/0x5d0 [ 120.993223][ C0] ? do_xdp_generic+0x1460/0x1460 [ 120.993875][ C0] ? register_lock_class+0x14d0/0x14d0 [ 120.994609][ C0] ? __netif_receive_skb_one_core+0x8d/0x160 [ 120.995377][ C0] __netif_receive_skb_one_core+0x8d/0x160 [ 120.996204][ C0] ? __netif_receive_skb_core+0x3170/0x3170 [ ... ] Reported-by: syzbot+fcf5dd39282ceb27108d@syzkaller.appspotmail.com Fixes: c5a759117210 ("net/hsr: Use list_head (and rcu) instead of array for slave devices.") Signed-off-by: Taehee Yoo Signed-off-by: David S. Miller --- net/hsr/hsr_slave.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/net/hsr/hsr_slave.c b/net/hsr/hsr_slave.c index fbfd0db182b7..a9104d42aafb 100644 --- a/net/hsr/hsr_slave.c +++ b/net/hsr/hsr_slave.c @@ -145,16 +145,16 @@ int hsr_add_port(struct hsr_priv *hsr, struct net_device *dev, if (!port) return -ENOMEM; + port->hsr = hsr; + port->dev = dev; + port->type = type; + if (type != HSR_PT_MASTER) { res = hsr_portdev_setup(dev, port); if (res) goto fail_dev_setup; } - port->hsr = hsr; - port->dev = dev; - port->type = type; - list_add_tail_rcu(&port->port_list, &hsr->ports); synchronize_rcu(); From 06e9bfc1e57daeead541c9f02968412cd284b189 Mon Sep 17 00:00:00 2001 From: Lukas Bulwahn Date: Sat, 21 Mar 2020 13:05:14 +0100 Subject: [PATCH 096/138] ionic: make spdxcheck.py happy Headers ionic_if.h and ionic_regs.h are licensed under three alternative licenses and the used SPDX-License-Identifier expression makes ./scripts/spdxcheck.py complain: drivers/net/ethernet/pensando/ionic/ionic_if.h: 1:52 Syntax error: OR drivers/net/ethernet/pensando/ionic/ionic_regs.h: 1:52 Syntax error: OR As OR is associative, it is irrelevant if the parentheses are put around the first or the second OR-expression. Simply add parentheses to make spdxcheck.py happy. Signed-off-by: Lukas Bulwahn Acked-by: Shannon Nelson Signed-off-by: David S. Miller --- drivers/net/ethernet/pensando/ionic/ionic_if.h | 2 +- drivers/net/ethernet/pensando/ionic/ionic_regs.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h index 54547d53b0f2..51adf5059834 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_if.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB OR BSD-2-Clause */ +/* SPDX-License-Identifier: (GPL-2.0 OR Linux-OpenIB) OR BSD-2-Clause */ /* Copyright (c) 2017-2019 Pensando Systems, Inc. All rights reserved. */ #ifndef _IONIC_IF_H_ diff --git a/drivers/net/ethernet/pensando/ionic/ionic_regs.h b/drivers/net/ethernet/pensando/ionic/ionic_regs.h index 03ee5a36472b..2e174f45c030 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_regs.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_regs.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB OR BSD-2-Clause */ +/* SPDX-License-Identifier: (GPL-2.0 OR Linux-OpenIB) OR BSD-2-Clause */ /* Copyright (c) 2018-2019 Pensando Systems, Inc. All rights reserved. */ #ifndef IONIC_REGS_H From 2091a3d42b4f339eaeed11228e0cbe9d4f92f558 Mon Sep 17 00:00:00 2001 From: Oliver Hartkopp Date: Sat, 21 Mar 2020 14:08:29 +0100 Subject: [PATCH 097/138] slcan: not call free_netdev before rtnl_unlock in slcan_open As the description before netdev_run_todo, we cannot call free_netdev before rtnl_unlock, fix it by reorder the code. This patch is a 1:1 copy of upstream slip.c commit f596c87005f7 ("slip: not call free_netdev before rtnl_unlock in slip_open"). Reported-by: yangerkun Signed-off-by: Oliver Hartkopp Signed-off-by: David S. Miller --- drivers/net/can/slcan.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c index 2f5c287eac95..a3664281a33f 100644 --- a/drivers/net/can/slcan.c +++ b/drivers/net/can/slcan.c @@ -625,7 +625,10 @@ err_free_chan: tty->disc_data = NULL; clear_bit(SLF_INUSE, &sl->flags); slc_free_netdev(sl->dev); + /* do not call free_netdev before rtnl_unlock */ + rtnl_unlock(); free_netdev(sl->dev); + return err; err_exit: rtnl_unlock(); From 9de9aa487daff7a5c73434c24269b44ed6a428e6 Mon Sep 17 00:00:00 2001 From: Emil Renner Berthing Date: Sat, 21 Mar 2020 15:36:19 +0100 Subject: [PATCH 098/138] net: stmmac: dwmac-rk: fix error path in rk_gmac_probe Make sure we clean up devicetree related configuration also when clock init fails. Fixes: fecd4d7eef8b ("net: stmmac: dwmac-rk: Add integrated PHY support") Signed-off-by: Emil Renner Berthing Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index dc50ba13a746..2d5573b3dee1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -1411,7 +1411,7 @@ static int rk_gmac_probe(struct platform_device *pdev) ret = rk_gmac_clk_init(plat_dat); if (ret) - return ret; + goto err_remove_config_dt; ret = rk_gmac_powerup(plat_dat->bsp_priv); if (ret) From 749f6f6843115b424680f1aada3c0dd613ad807c Mon Sep 17 00:00:00 2001 From: Grygorii Strashko Date: Tue, 17 Mar 2020 20:04:54 +0200 Subject: [PATCH 099/138] net: phy: dp83867: w/a for fld detect threshold bootstrapping issue When the DP83867 PHY is strapped to enable Fast Link Drop (FLD) feature STRAP_STS2.STRAP_ FLD (reg 0x006F bit 10), the Energy Lost Threshold for FLD Energy Lost Mode FLD_THR_CFG.ENERGY_LOST_FLD_THR (reg 0x002e bits 2:0) will be defaulted to 0x2. This may cause the phy link to be unstable. The new DP83867 DM recommends to always restore ENERGY_LOST_FLD_THR to 0x1. Hence, restore default value of FLD_THR_CFG.ENERGY_LOST_FLD_THR to 0x1 when FLD is enabled by bootstrapping as recommended by DM. Signed-off-by: Grygorii Strashko Signed-off-by: David S. Miller --- drivers/net/phy/dp83867.c | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index 967f57ed0b65..9a07ad137c2e 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -28,7 +28,8 @@ #define DP83867_CTRL 0x1f /* Extended Registers */ -#define DP83867_CFG4 0x0031 +#define DP83867_FLD_THR_CFG 0x002e +#define DP83867_CFG4 0x0031 #define DP83867_CFG4_SGMII_ANEG_MASK (BIT(5) | BIT(6)) #define DP83867_CFG4_SGMII_ANEG_TIMER_11MS (3 << 5) #define DP83867_CFG4_SGMII_ANEG_TIMER_800US (2 << 5) @@ -91,6 +92,7 @@ #define DP83867_STRAP_STS2_CLK_SKEW_RX_MASK GENMASK(2, 0) #define DP83867_STRAP_STS2_CLK_SKEW_RX_SHIFT 0 #define DP83867_STRAP_STS2_CLK_SKEW_NONE BIT(2) +#define DP83867_STRAP_STS2_STRAP_FLD BIT(10) /* PHY CTRL bits */ #define DP83867_PHYCR_TX_FIFO_DEPTH_SHIFT 14 @@ -125,6 +127,9 @@ /* CFG4 bits */ #define DP83867_CFG4_PORT_MIRROR_EN BIT(0) +/* FLD_THR_CFG */ +#define DP83867_FLD_THR_CFG_ENERGY_LOST_THR_MASK 0x7 + enum { DP83867_PORT_MIRROING_KEEP, DP83867_PORT_MIRROING_EN, @@ -476,6 +481,20 @@ static int dp83867_config_init(struct phy_device *phydev) phy_clear_bits_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4, BIT(7)); + bs = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_STRAP_STS2); + if (bs & DP83867_STRAP_STS2_STRAP_FLD) { + /* When using strap to enable FLD, the ENERGY_LOST_FLD_THR will + * be set to 0x2. This may causes the PHY link to be unstable - + * the default value 0x1 need to be restored. + */ + ret = phy_modify_mmd(phydev, DP83867_DEVADDR, + DP83867_FLD_THR_CFG, + DP83867_FLD_THR_CFG_ENERGY_LOST_THR_MASK, + 0x1); + if (ret) + return ret; + } + if (phy_interface_is_rgmii(phydev) || phydev->interface == PHY_INTERFACE_MODE_SGMII) { val = phy_read(phydev, MII_DP83867_PHYCTRL); From 9a9ba2a4aaaa4e75a5f118b8ab642a55c34f95cb Mon Sep 17 00:00:00 2001 From: Doug Berger Date: Tue, 17 Mar 2020 17:05:36 -0700 Subject: [PATCH 100/138] net: bcmgenet: always enable status blocks The hardware offloading of the NETIF_F_HW_CSUM and NETIF_F_RXCSUM features requires the use of Transmit Status Blocks before transmit frame data and Receive Status Blocks before receive frame data to carry the checksum information. Unfortunately, these status blocks are currently only enabled when the NETIF_F_HW_CSUM feature is enabled. As a result NETIF_F_RXCSUM will not actually be offloaded to the hardware unless both it and NETIF_F_HW_CSUM are enabled. Fortunately, that is the default configuration. This commit addresses this issue by always enabling the use of status blocks on both transmit and receive frames. Further, it replaces the use of a dedicated flag within the driver private data structure with direct use of the netdev features flags. Fixes: 810155397890 ("net: bcmgenet: use CHECKSUM_COMPLETE for NETIF_F_RXCSUM") Signed-off-by: Doug Berger Acked-by: Florian Fainelli Signed-off-by: David S. Miller --- .../net/ethernet/broadcom/genet/bcmgenet.c | 132 +++++------------- .../net/ethernet/broadcom/genet/bcmgenet.h | 3 +- 2 files changed, 38 insertions(+), 97 deletions(-) diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index ce64b4e47feb..1d678bee2cc9 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -94,12 +94,6 @@ static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv, bcmgenet_writel(value, d + DMA_DESC_LENGTH_STATUS); } -static inline u32 dmadesc_get_length_status(struct bcmgenet_priv *priv, - void __iomem *d) -{ - return bcmgenet_readl(d + DMA_DESC_LENGTH_STATUS); -} - static inline void dmadesc_set_addr(struct bcmgenet_priv *priv, void __iomem *d, dma_addr_t addr) @@ -508,61 +502,6 @@ static int bcmgenet_set_link_ksettings(struct net_device *dev, return phy_ethtool_ksettings_set(dev->phydev, cmd); } -static void bcmgenet_set_rx_csum(struct net_device *dev, - netdev_features_t wanted) -{ - struct bcmgenet_priv *priv = netdev_priv(dev); - u32 rbuf_chk_ctrl; - bool rx_csum_en; - - rx_csum_en = !!(wanted & NETIF_F_RXCSUM); - - rbuf_chk_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL); - - /* enable rx checksumming */ - if (rx_csum_en) - rbuf_chk_ctrl |= RBUF_RXCHK_EN | RBUF_L3_PARSE_DIS; - else - rbuf_chk_ctrl &= ~RBUF_RXCHK_EN; - priv->desc_rxchk_en = rx_csum_en; - - /* If UniMAC forwards CRC, we need to skip over it to get - * a valid CHK bit to be set in the per-packet status word - */ - if (rx_csum_en && priv->crc_fwd_en) - rbuf_chk_ctrl |= RBUF_SKIP_FCS; - else - rbuf_chk_ctrl &= ~RBUF_SKIP_FCS; - - bcmgenet_rbuf_writel(priv, rbuf_chk_ctrl, RBUF_CHK_CTRL); -} - -static void bcmgenet_set_tx_csum(struct net_device *dev, - netdev_features_t wanted) -{ - struct bcmgenet_priv *priv = netdev_priv(dev); - bool desc_64b_en; - u32 tbuf_ctrl, rbuf_ctrl; - - tbuf_ctrl = bcmgenet_tbuf_ctrl_get(priv); - rbuf_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CTRL); - - desc_64b_en = !!(wanted & NETIF_F_HW_CSUM); - - /* enable 64 bytes descriptor in both directions (RBUF and TBUF) */ - if (desc_64b_en) { - tbuf_ctrl |= RBUF_64B_EN; - rbuf_ctrl |= RBUF_64B_EN; - } else { - tbuf_ctrl &= ~RBUF_64B_EN; - rbuf_ctrl &= ~RBUF_64B_EN; - } - priv->desc_64b_en = desc_64b_en; - - bcmgenet_tbuf_ctrl_set(priv, tbuf_ctrl); - bcmgenet_rbuf_writel(priv, rbuf_ctrl, RBUF_CTRL); -} - static int bcmgenet_set_features(struct net_device *dev, netdev_features_t features) { @@ -578,9 +517,6 @@ static int bcmgenet_set_features(struct net_device *dev, reg = bcmgenet_umac_readl(priv, UMAC_CMD); priv->crc_fwd_en = !!(reg & CMD_CRC_FWD); - bcmgenet_set_tx_csum(dev, features); - bcmgenet_set_rx_csum(dev, features); - clk_disable_unprepare(priv->clk); return ret; @@ -1475,8 +1411,8 @@ static void bcmgenet_tx_reclaim_all(struct net_device *dev) /* Reallocate the SKB to put enough headroom in front of it and insert * the transmit checksum offsets in the descriptors */ -static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev, - struct sk_buff *skb) +static struct sk_buff *bcmgenet_add_tsb(struct net_device *dev, + struct sk_buff *skb) { struct bcmgenet_priv *priv = netdev_priv(dev); struct status_64 *status = NULL; @@ -1590,13 +1526,11 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) */ GENET_CB(skb)->bytes_sent = skb->len; - /* set the SKB transmit checksum */ - if (priv->desc_64b_en) { - skb = bcmgenet_put_tx_csum(dev, skb); - if (!skb) { - ret = NETDEV_TX_OK; - goto out; - } + /* add the Transmit Status Block */ + skb = bcmgenet_add_tsb(dev, skb); + if (!skb) { + ret = NETDEV_TX_OK; + goto out; } for (i = 0; i <= nr_frags; i++) { @@ -1775,6 +1709,9 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, while ((rxpktprocessed < rxpkttoprocess) && (rxpktprocessed < budget)) { + struct status_64 *status; + __be16 rx_csum; + cb = &priv->rx_cbs[ring->read_ptr]; skb = bcmgenet_rx_refill(priv, cb); @@ -1783,20 +1720,12 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, goto next; } - if (!priv->desc_64b_en) { - dma_length_status = - dmadesc_get_length_status(priv, cb->bd_addr); - } else { - struct status_64 *status; - __be16 rx_csum; - - status = (struct status_64 *)skb->data; - dma_length_status = status->length_status; + status = (struct status_64 *)skb->data; + dma_length_status = status->length_status; + if (dev->features & NETIF_F_RXCSUM) { rx_csum = (__force __be16)(status->rx_csum & 0xffff); - if (priv->desc_rxchk_en) { - skb->csum = (__force __wsum)ntohs(rx_csum); - skb->ip_summed = CHECKSUM_COMPLETE; - } + skb->csum = (__force __wsum)ntohs(rx_csum); + skb->ip_summed = CHECKSUM_COMPLETE; } /* DMA flags and length are still valid no matter how @@ -1840,14 +1769,10 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, } /* error packet */ skb_put(skb, len); - if (priv->desc_64b_en) { - skb_pull(skb, 64); - len -= 64; - } - /* remove hardware 2bytes added for IP alignment */ - skb_pull(skb, 2); - len -= 2; + /* remove RSB and hardware 2bytes added for IP alignment */ + skb_pull(skb, 66); + len -= 66; if (priv->crc_fwd_en) { skb_trim(skb, len - ETH_FCS_LEN); @@ -2038,11 +1963,28 @@ static void init_umac(struct bcmgenet_priv *priv) bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); - /* init rx registers, enable ip header optimization */ + /* init tx registers, enable TSB */ + reg = bcmgenet_tbuf_ctrl_get(priv); + reg |= TBUF_64B_EN; + bcmgenet_tbuf_ctrl_set(priv, reg); + + /* init rx registers, enable ip header optimization and RSB */ reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL); - reg |= RBUF_ALIGN_2B; + reg |= RBUF_ALIGN_2B | RBUF_64B_EN; bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL); + /* enable rx checksumming */ + reg = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL); + reg |= RBUF_RXCHK_EN | RBUF_L3_PARSE_DIS; + /* If UniMAC forwards CRC, we need to skip over it to get + * a valid CHK bit to be set in the per-packet status word + */ + if (priv->crc_fwd_en) + reg |= RBUF_SKIP_FCS; + else + reg &= ~RBUF_SKIP_FCS; + bcmgenet_rbuf_writel(priv, reg, RBUF_CHK_CTRL); + if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv)) bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index 61a6fe9f4cec..daf8fb2c39b6 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -273,6 +273,7 @@ struct bcmgenet_mib_counters { #define RBUF_FLTR_LEN_SHIFT 8 #define TBUF_CTRL 0x00 +#define TBUF_64B_EN (1 << 0) #define TBUF_BP_MC 0x0C #define TBUF_ENERGY_CTRL 0x14 #define TBUF_EEE_EN (1 << 0) @@ -662,8 +663,6 @@ struct bcmgenet_priv { unsigned int irq0_stat; /* HW descriptors/checksum variables */ - bool desc_64b_en; - bool desc_rxchk_en; bool crc_fwd_en; u32 dma_max_burst_length; From 83a9b6f639e9f6b632337f9776de17d51d969c77 Mon Sep 17 00:00:00 2001 From: Alan Maguire Date: Wed, 18 Mar 2020 18:53:21 +0000 Subject: [PATCH 101/138] selftests/net: add definition for SOL_DCCP to fix compilation errors for old libc Many systems build/test up-to-date kernels with older libcs, and an older glibc (2.17) lacks the definition of SOL_DCCP in /usr/include/bits/socket.h (it was added in the 4.6 timeframe). Adding the definition to the test program avoids a compilation failure that gets in the way of building tools/testing/selftests/net. The test itself will work once the definition is added; either skipping due to DCCP not being configured in the kernel under test or passing, so there are no other more up-to-date glibc dependencies here it seems beyond that missing definition. Fixes: 11fb60d1089f ("selftests: net: reuseport_addr_any: add DCCP") Signed-off-by: Alan Maguire Signed-off-by: David S. Miller --- tools/testing/selftests/net/reuseport_addr_any.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tools/testing/selftests/net/reuseport_addr_any.c b/tools/testing/selftests/net/reuseport_addr_any.c index c6233935fed1..b8475cb29be7 100644 --- a/tools/testing/selftests/net/reuseport_addr_any.c +++ b/tools/testing/selftests/net/reuseport_addr_any.c @@ -21,6 +21,10 @@ #include #include +#ifndef SOL_DCCP +#define SOL_DCCP 269 +#endif + static const char *IP4_ADDR = "127.0.0.1"; static const char *IP6_ADDR = "::1"; static const char *IP4_MAPPED6 = "::ffff:127.0.0.1"; From cf52c8a776d1b31493a987a0fb3c4dbaa0f7f2cc Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Tue, 17 Mar 2020 12:33:41 +0200 Subject: [PATCH 102/138] iwlwifi: pcie: add 0x2526/0x401* devices back to cfg detection Three devices, with PCI device ID 0x2526 and subdevice IDs 0x4010, 0x4018 and 0x401C were removed accidentally. Add them back. Reported-by: Brett Hassal Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=206661 Fixes: 0b295a1eb81f ("iwlwifi: add device name to device_info") Signed-off-by: Luca Coelho Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/iwlwifi.20200317123331.16762b29f26c.I928bcaa799e7b3d33838c0667714eeb9fa665290@changeid --- drivers/net/wireless/intel/iwlwifi/pcie/drv.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 97f227f3cbc3..f441b20e1642 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -981,6 +981,9 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { IWL_DEV_INFO(0x2526, 0x0014, iwl9260_2ac_160_cfg, iwl9260_160_name), IWL_DEV_INFO(0x2526, 0x0018, iwl9260_2ac_160_cfg, iwl9260_160_name), IWL_DEV_INFO(0x2526, 0x001C, iwl9260_2ac_160_cfg, iwl9260_160_name), + IWL_DEV_INFO(0x2526, 0x4010, iwl9260_2ac_160_cfg, iwl9260_160_name), + IWL_DEV_INFO(0x2526, 0x4018, iwl9260_2ac_160_cfg, iwl9260_160_name), + IWL_DEV_INFO(0x2526, 0x401C, iwl9260_2ac_160_cfg, iwl9260_160_name), IWL_DEV_INFO(0x2526, 0x6010, iwl9260_2ac_160_cfg, iwl9260_160_name), IWL_DEV_INFO(0x2526, 0x6014, iwl9260_2ac_160_cfg, iwl9260_160_name), IWL_DEV_INFO(0x2526, 0x8014, iwl9260_2ac_160_cfg, iwl9260_160_name), From 0433ae556ec8fb588a0735ddb09d3eb9806df479 Mon Sep 17 00:00:00 2001 From: Golan Ben Ami Date: Wed, 18 Mar 2020 08:12:54 +0200 Subject: [PATCH 103/138] iwlwifi: don't send GEO_TX_POWER_LIMIT if no wgds table The GEO_TX_POWER_LIMIT command was sent although there is no wgds table, so the fw got wrong SAR values from the driver. Fix this by avoiding sending the command if no wgds tables are available. Signed-off-by: Golan Ben Ami Fixes: 39c1a9728f93 ("iwlwifi: refactor the SAR tables from mvm to acpi") Signed-off-by: Luca Coelho Tested-By: Jonathan McDowell Tested-by: Len Brown Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/iwlwifi.20200318081237.46db40617cc6.Id5cf852ec8c5dbf20ba86bad7b165a0c828f8b2e@changeid --- drivers/net/wireless/intel/iwlwifi/fw/acpi.c | 14 ++++++++------ drivers/net/wireless/intel/iwlwifi/fw/acpi.h | 14 ++++++++------ drivers/net/wireless/intel/iwlwifi/mvm/fw.c | 9 ++++++++- 3 files changed, 24 insertions(+), 13 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c index 48d375a86d86..ba2aff3af0fe 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright (C) 2019 Intel Corporation + * Copyright (C) 2019 - 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -27,7 +27,7 @@ * BSD LICENSE * * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright (C) 2019 Intel Corporation + * Copyright (C) 2019 - 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -491,13 +491,13 @@ int iwl_validate_sar_geo_profile(struct iwl_fw_runtime *fwrt, } IWL_EXPORT_SYMBOL(iwl_validate_sar_geo_profile); -void iwl_sar_geo_init(struct iwl_fw_runtime *fwrt, - struct iwl_per_chain_offset_group *table) +int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt, + struct iwl_per_chain_offset_group *table) { int ret, i, j; if (!iwl_sar_geo_support(fwrt)) - return; + return -EOPNOTSUPP; ret = iwl_sar_get_wgds_table(fwrt); if (ret < 0) { @@ -505,7 +505,7 @@ void iwl_sar_geo_init(struct iwl_fw_runtime *fwrt, "Geo SAR BIOS table invalid or unavailable. (%d)\n", ret); /* we don't fail if the table is not available */ - return; + return -ENOENT; } BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES * ACPI_WGDS_NUM_BANDS * @@ -530,5 +530,7 @@ void iwl_sar_geo_init(struct iwl_fw_runtime *fwrt, i, j, value[1], value[2], value[0]); } } + + return 0; } IWL_EXPORT_SYMBOL(iwl_sar_geo_init); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h index 4a6e8262974b..5590e5cc8fbb 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2018 - 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -27,7 +27,7 @@ * BSD LICENSE * * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2018 - 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -171,8 +171,9 @@ bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt); int iwl_validate_sar_geo_profile(struct iwl_fw_runtime *fwrt, struct iwl_host_cmd *cmd); -void iwl_sar_geo_init(struct iwl_fw_runtime *fwrt, - struct iwl_per_chain_offset_group *table); +int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt, + struct iwl_per_chain_offset_group *table); + #else /* CONFIG_ACPI */ static inline void *iwl_acpi_get_object(struct device *dev, acpi_string method) @@ -243,9 +244,10 @@ static inline int iwl_validate_sar_geo_profile(struct iwl_fw_runtime *fwrt, return -ENOENT; } -static inline void iwl_sar_geo_init(struct iwl_fw_runtime *fwrt, - struct iwl_per_chain_offset_group *table) +static inline int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt, + struct iwl_per_chain_offset_group *table) { + return -ENOENT; } #endif /* CONFIG_ACPI */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 54c094e88474..98263cd37944 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -762,10 +762,17 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm) u16 cmd_wide_id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT); union geo_tx_power_profiles_cmd cmd; u16 len; + int ret; cmd.geo_cmd.ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES); - iwl_sar_geo_init(&mvm->fwrt, cmd.geo_cmd.table); + ret = iwl_sar_geo_init(&mvm->fwrt, cmd.geo_cmd.table); + /* + * It is a valid scenario to not support SAR, or miss wgds table, + * but in that case there is no need to send the command. + */ + if (ret) + return 0; cmd.geo_cmd.table_revision = cpu_to_le32(mvm->fwrt.geo_rev); From 6cd6cbf593bfa3ae6fc3ed34ac21da4d35045425 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 18 Mar 2020 19:21:02 -0700 Subject: [PATCH 104/138] tcp: repair: fix TCP_QUEUE_SEQ implementation When application uses TCP_QUEUE_SEQ socket option to change tp->rcv_next, we must also update tp->copied_seq. Otherwise, stuff relying on tcp_inq() being precise can eventually be confused. For example, tcp_zerocopy_receive() might crash because it does not expect tcp_recv_skb() to return NULL. We could add tests in various places to fix the issue, or simply make sure tcp_inq() wont return a random value, and leave fast path as it is. Note that this fixes ioctl(fd, SIOCINQ, &val) at the same time. Fixes: ee9952831cfd ("tcp: Initial repair mode") Fixes: 05255b823a61 ("tcp: add TCP_ZEROCOPY_RECEIVE support for zerocopy receive") Signed-off-by: Eric Dumazet Reported-by: syzbot Signed-off-by: David S. Miller --- net/ipv4/tcp.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index eb2d80519f8e..dc77c303e6f7 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2948,8 +2948,10 @@ static int do_tcp_setsockopt(struct sock *sk, int level, err = -EPERM; else if (tp->repair_queue == TCP_SEND_QUEUE) WRITE_ONCE(tp->write_seq, val); - else if (tp->repair_queue == TCP_RECV_QUEUE) + else if (tp->repair_queue == TCP_RECV_QUEUE) { WRITE_ONCE(tp->rcv_nxt, val); + WRITE_ONCE(tp->copied_seq, val); + } else err = -EINVAL; break; From dddeb30bfc43926620f954266fd12c65a7206f07 Mon Sep 17 00:00:00 2001 From: Qian Cai Date: Thu, 19 Mar 2020 22:54:21 -0400 Subject: [PATCH 105/138] ipv4: fix a RCU-list lock in inet_dump_fib() There is a place, inet_dump_fib() fib_table_dump fn_trie_dump_leaf() hlist_for_each_entry_rcu() without rcu_read_lock() will trigger a warning, WARNING: suspicious RCU usage ----------------------------- net/ipv4/fib_trie.c:2216 RCU-list traversed in non-reader section!! other info that might help us debug this: rcu_scheduler_active = 2, debug_locks = 1 1 lock held by ip/1923: #0: ffffffff8ce76e40 (rtnl_mutex){+.+.}, at: netlink_dump+0xd6/0x840 Call Trace: dump_stack+0xa1/0xea lockdep_rcu_suspicious+0x103/0x10d fn_trie_dump_leaf+0x581/0x590 fib_table_dump+0x15f/0x220 inet_dump_fib+0x4ad/0x5d0 netlink_dump+0x350/0x840 __netlink_dump_start+0x315/0x3e0 rtnetlink_rcv_msg+0x4d1/0x720 netlink_rcv_skb+0xf0/0x220 rtnetlink_rcv+0x15/0x20 netlink_unicast+0x306/0x460 netlink_sendmsg+0x44b/0x770 __sys_sendto+0x259/0x270 __x64_sys_sendto+0x80/0xa0 do_syscall_64+0x69/0xf4 entry_SYSCALL_64_after_hwframe+0x49/0xb3 Fixes: 18a8021a7be3 ("net/ipv4: Plumb support for filtering route dumps") Signed-off-by: Qian Cai Reviewed-by: David Ahern Signed-off-by: David S. Miller --- net/ipv4/fib_frontend.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 577db1d50a24..213be9c050ad 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -997,7 +997,9 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) return -ENOENT; } + rcu_read_lock(); err = fib_table_dump(tb, skb, cb, &filter); + rcu_read_unlock(); return skb->len ? : err; } From 0dcdf9f64028ec3b75db6b691560f8286f3898bf Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 20 Mar 2020 16:21:17 +0300 Subject: [PATCH 106/138] NFC: fdp: Fix a signedness bug in fdp_nci_send_patch() The nci_conn_max_data_pkt_payload_size() function sometimes returns -EPROTO so "max_size" needs to be signed for the error handling to work. We can make "payload_size" an int as well. Fixes: a06347c04c13 ("NFC: Add Intel Fields Peak NFC solution driver") Signed-off-by: Dan Carpenter Signed-off-by: David S. Miller --- drivers/nfc/fdp/fdp.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/nfc/fdp/fdp.c b/drivers/nfc/fdp/fdp.c index 0cc9ac856fe2..ed2123129e0e 100644 --- a/drivers/nfc/fdp/fdp.c +++ b/drivers/nfc/fdp/fdp.c @@ -184,7 +184,7 @@ static int fdp_nci_send_patch(struct nci_dev *ndev, u8 conn_id, u8 type) const struct firmware *fw; struct sk_buff *skb; unsigned long len; - u8 max_size, payload_size; + int max_size, payload_size; int rc = 0; if ((type == NCI_PATCH_TYPE_OTP && !info->otp_patch) || @@ -207,8 +207,7 @@ static int fdp_nci_send_patch(struct nci_dev *ndev, u8 conn_id, u8 type) while (len) { - payload_size = min_t(unsigned long, (unsigned long) max_size, - len); + payload_size = min_t(unsigned long, max_size, len); skb = nci_skb_alloc(ndev, (NCI_CTRL_HDR_SIZE + payload_size), GFP_KERNEL); From 12a5ba5a1994568d4ceaff9e78c6b0329d953386 Mon Sep 17 00:00:00 2001 From: Pawel Dembicki Date: Fri, 20 Mar 2020 21:46:14 +0100 Subject: [PATCH 107/138] net: qmi_wwan: add support for ASKEY WWHC050 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ASKEY WWHC050 is a mcie LTE modem. The oem configuration states: T: Bus=01 Lev=01 Prnt=01 Port=00 Cnt=01 Dev#= 2 Spd=480 MxCh= 0 D: Ver= 2.10 Cls=00(>ifc ) Sub=00 Prot=00 MxPS=64 #Cfgs= 1 P: Vendor=1690 ProdID=7588 Rev=ff.ff S: Manufacturer=Android S: Product=Android S: SerialNumber=813f0eef6e6e C:* #Ifs= 6 Cfg#= 1 Atr=80 MxPwr=500mA I:* If#= 0 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=ff Prot=ff Driver=option E: Ad=81(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms E: Ad=01(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms I:* If#= 1 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=42 Prot=01 Driver=(none) E: Ad=02(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms E: Ad=82(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms I:* If#= 2 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=00 Driver=option E: Ad=84(I) Atr=03(Int.) MxPS= 10 Ivl=32ms E: Ad=83(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms E: Ad=03(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms I:* If#= 3 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=00 Driver=option E: Ad=86(I) Atr=03(Int.) MxPS= 10 Ivl=32ms E: Ad=85(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms E: Ad=04(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms I:* If#= 4 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=ff Driver=qmi_wwan E: Ad=88(I) Atr=03(Int.) MxPS= 8 Ivl=32ms E: Ad=87(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms E: Ad=05(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms I:* If#= 5 Alt= 0 #EPs= 2 Cls=08(stor.) Sub=06 Prot=50 Driver=(none) E: Ad=89(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms E: Ad=06(O) Atr=02(Bulk) MxPS= 512 Ivl=125us Tested on openwrt distribution. Signed-off-by: Cezary Jackiewicz Signed-off-by: Pawel Dembicki Acked-by: Bjørn Mork Signed-off-by: David S. Miller --- drivers/net/usb/qmi_wwan.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 5754bb6ca0ee..6c738a271257 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1210,6 +1210,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x1435, 0xd182, 5)}, /* Wistron NeWeb D18 */ {QMI_FIXED_INTF(0x1435, 0xd191, 4)}, /* Wistron NeWeb D19Q1 */ {QMI_QUIRK_SET_DTR(0x1508, 0x1001, 4)}, /* Fibocom NL668 series */ + {QMI_FIXED_INTF(0x1690, 0x7588, 4)}, /* ASKEY WWHC050 */ {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */ {QMI_FIXED_INTF(0x16d8, 0x6007, 0)}, /* CMOTech CHE-628S */ {QMI_FIXED_INTF(0x16d8, 0x6008, 0)}, /* CMOTech CMU-301 */ From 55b474c41e586a5c21c7ab81ff474eb6bacb4322 Mon Sep 17 00:00:00 2001 From: Michal Kubecek Date: Sat, 21 Mar 2020 00:46:50 +0100 Subject: [PATCH 108/138] netlink: check for null extack in cookie helpers Unlike NL_SET_ERR_* macros, nl_set_extack_cookie_u64() and nl_set_extack_cookie_u32() helpers do not check extack argument for null and neither do their callers, as syzbot recently discovered for ethnl_parse_header(). Instead of fixing the callers and leaving the trap in place, add check of null extack to both helpers to make them consistent with NL_SET_ERR_* macros. v2: drop incorrect second Fixes tag Fixes: 2363d73a2f3e ("ethtool: reject unrecognized request flags") Reported-by: syzbot+258a9089477493cea67b@syzkaller.appspotmail.com Signed-off-by: Michal Kubecek Signed-off-by: David S. Miller --- include/linux/netlink.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 4090524c3462..60739d0cbf93 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -115,6 +115,8 @@ static inline void nl_set_extack_cookie_u64(struct netlink_ext_ack *extack, { u64 __cookie = cookie; + if (!extack) + return; memcpy(extack->cookie, &__cookie, sizeof(__cookie)); extack->cookie_len = sizeof(__cookie); } @@ -124,6 +126,8 @@ static inline void nl_set_extack_cookie_u32(struct netlink_ext_ack *extack, { u32 __cookie = cookie; + if (!extack) + return; memcpy(extack->cookie, &__cookie, sizeof(__cookie)); extack->cookie_len = sizeof(__cookie); } From b06d072ccc4b1acd0147b17914b7ad1caa1818bb Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Sun, 22 Mar 2020 13:51:13 -0400 Subject: [PATCH 109/138] macsec: restrict to ethernet devices Only attach macsec to ethernet devices. Syzbot was able to trigger a KMSAN warning in macsec_handle_frame by attaching to a phonet device. Macvlan has a similar check in macvlan_port_create. v1->v2 - fix commit message typo Reported-by: syzbot Signed-off-by: Willem de Bruijn Signed-off-by: David S. Miller --- drivers/net/macsec.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 6ec6fc191a6e..92bc2b2df660 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -19,6 +19,7 @@ #include #include #include +#include #include @@ -3665,6 +3666,8 @@ static int macsec_newlink(struct net *net, struct net_device *dev, real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK])); if (!real_dev) return -ENODEV; + if (real_dev->type != ARPHRD_ETHER) + return -EINVAL; dev->priv_flags |= IFF_MACSEC; From a24ec3220f369aa0b94c863b6b310685a727151c Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Sun, 22 Mar 2020 16:40:01 -0400 Subject: [PATCH 110/138] bnxt_en: Fix Priority Bytes and Packets counters in ethtool -S. There is an indexing bug in determining these ethtool priority counters. Instead of using the queue ID to index, we need to normalize by modulo 10 to get the index. This index is then used to obtain the proper CoS queue counter. Rename bp->pri2cos to bp->pri2cos_idx to make this more clear. Fixes: e37fed790335 ("bnxt_en: Add ethtool -S priority counters.") Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 10 +++++++++- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 2 +- drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 8 ++++---- 3 files changed, 14 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index c5c8effc0139..b66ee1dcd417 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -7406,14 +7406,22 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp) pri2cos = &resp2->pri0_cos_queue_id; for (i = 0; i < 8; i++) { u8 queue_id = pri2cos[i]; + u8 queue_idx; + /* Per port queue IDs start from 0, 10, 20, etc */ + queue_idx = queue_id % 10; + if (queue_idx > BNXT_MAX_QUEUE) { + bp->pri2cos_valid = false; + goto qstats_done; + } for (j = 0; j < bp->max_q; j++) { if (bp->q_ids[j] == queue_id) - bp->pri2cos[i] = j; + bp->pri2cos_idx[i] = queue_idx; } } bp->pri2cos_valid = 1; } +qstats_done: mutex_unlock(&bp->hwrm_cmd_lock); return rc; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index cabef0b4f5fb..63b170658532 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1716,7 +1716,7 @@ struct bnxt { u16 fw_rx_stats_ext_size; u16 fw_tx_stats_ext_size; u16 hw_ring_stats_size; - u8 pri2cos[8]; + u8 pri2cos_idx[8]; u8 pri2cos_valid; u16 hwrm_max_req_len; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 1f67e6729a2c..3f8a1ded662a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -589,25 +589,25 @@ skip_ring_stats: if (bp->pri2cos_valid) { for (i = 0; i < 8; i++, j++) { long n = bnxt_rx_bytes_pri_arr[i].base_off + - bp->pri2cos[i]; + bp->pri2cos_idx[i]; buf[j] = le64_to_cpu(*(rx_port_stats_ext + n)); } for (i = 0; i < 8; i++, j++) { long n = bnxt_rx_pkts_pri_arr[i].base_off + - bp->pri2cos[i]; + bp->pri2cos_idx[i]; buf[j] = le64_to_cpu(*(rx_port_stats_ext + n)); } for (i = 0; i < 8; i++, j++) { long n = bnxt_tx_bytes_pri_arr[i].base_off + - bp->pri2cos[i]; + bp->pri2cos_idx[i]; buf[j] = le64_to_cpu(*(tx_port_stats_ext + n)); } for (i = 0; i < 8; i++, j++) { long n = bnxt_tx_pkts_pri_arr[i].base_off + - bp->pri2cos[i]; + bp->pri2cos_idx[i]; buf[j] = le64_to_cpu(*(tx_port_stats_ext + n)); } From 62d4073e86e62e316bea2c53e77db10418fd5dd7 Mon Sep 17 00:00:00 2001 From: Edwin Peer Date: Sun, 22 Mar 2020 16:40:02 -0400 Subject: [PATCH 111/138] bnxt_en: fix memory leaks in bnxt_dcbnl_ieee_getets() The allocated ieee_ets structure goes out of scope without being freed, leaking memory. Appropriate result codes should be returned so that callers do not rely on invalid data passed by reference. Also cache the ETS config retrieved from the device so that it doesn't need to be freed. The balance of the code was clearly written with the intent of having the results of querying the hardware cached in the device structure. The commensurate store was evidently missed though. Fixes: 7df4ae9fe855 ("bnxt_en: Implement DCBNL to support host-based DCBX.") Signed-off-by: Edwin Peer Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c index fb6f30d0d1d0..b1511bcffb1b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c @@ -479,24 +479,26 @@ static int bnxt_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets) { struct bnxt *bp = netdev_priv(dev); struct ieee_ets *my_ets = bp->ieee_ets; + int rc; ets->ets_cap = bp->max_tc; if (!my_ets) { - int rc; - if (bp->dcbx_cap & DCB_CAP_DCBX_HOST) return 0; my_ets = kzalloc(sizeof(*my_ets), GFP_KERNEL); if (!my_ets) - return 0; + return -ENOMEM; rc = bnxt_hwrm_queue_cos2bw_qcfg(bp, my_ets); if (rc) - return 0; + goto error; rc = bnxt_hwrm_queue_pri2cos_qcfg(bp, my_ets); if (rc) - return 0; + goto error; + + /* cache result */ + bp->ieee_ets = my_ets; } ets->cbs = my_ets->cbs; @@ -505,6 +507,9 @@ static int bnxt_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets) memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa)); memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc)); return 0; +error: + kfree(my_ets); + return rc; } static int bnxt_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets) From 0b5b561cea32d5bb1e0a82d65b755a3cb5212141 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Sun, 22 Mar 2020 16:40:03 -0400 Subject: [PATCH 112/138] bnxt_en: Return error if bnxt_alloc_ctx_mem() fails. The current code ignores the return value from bnxt_hwrm_func_backing_store_cfg(), causing the driver to proceed in the init path even when this vital firmware call has failed. Fix it by propagating the error code to the caller. Fixes: 1b9394e5a2ad ("bnxt_en: Configure context memory on new devices.") Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index b66ee1dcd417..0628a6abcc3e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -6880,12 +6880,12 @@ skip_rdma: } ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES; rc = bnxt_hwrm_func_backing_store_cfg(bp, ena); - if (rc) + if (rc) { netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n", rc); - else - ctx->flags |= BNXT_CTX_FLAG_INITED; - + return rc; + } + ctx->flags |= BNXT_CTX_FLAG_INITED; return 0; } From 62bfb932a51f6d08eb409248e69f8d6428c2cabd Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Sun, 22 Mar 2020 16:40:04 -0400 Subject: [PATCH 113/138] bnxt_en: Free context memory after disabling PCI in probe error path. Other shutdown code paths will always disable PCI first to shutdown DMA before freeing context memory. Do the same sequence in the error path of probe to be safe and consistent. Fixes: c20dc142dd7b ("bnxt_en: Disable bus master during PCI shutdown and driver unload.") Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 0628a6abcc3e..95f4c02aa7e1 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -11970,12 +11970,12 @@ init_err_pci_clean: bnxt_hwrm_func_drv_unrgtr(bp); bnxt_free_hwrm_short_cmd_req(bp); bnxt_free_hwrm_resources(bp); - bnxt_free_ctx_mem(bp); - kfree(bp->ctx); - bp->ctx = NULL; kfree(bp->fw_health); bp->fw_health = NULL; bnxt_cleanup_pci(bp); + bnxt_free_ctx_mem(bp); + kfree(bp->ctx); + bp->ctx = NULL; init_err_free: free_netdev(dev); From 5d765a5e4bd7c368e564e11402bba74cf7f03ac1 Mon Sep 17 00:00:00 2001 From: Vasundhara Volam Date: Sun, 22 Mar 2020 16:40:05 -0400 Subject: [PATCH 114/138] bnxt_en: Reset rings if ring reservation fails during open() If ring counts are not reset when ring reservation fails, bnxt_init_dflt_ring_mode() will not be called again to reinitialise IRQs when open() is called and results in system crash as napi will also be not initialised. This patch fixes it by resetting the ring counts. Fixes: 47558acd56a7 ("bnxt_en: Reserve rings at driver open if none was reserved at probe time.") Signed-off-by: Vasundhara Volam Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 95f4c02aa7e1..d28b406a26b1 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -11677,6 +11677,10 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) bp->rx_nr_rings++; bp->cp_nr_rings++; } + if (rc) { + bp->tx_nr_rings = 0; + bp->rx_nr_rings = 0; + } return rc; } From 0e62f543bed03a64495bd2651d4fe1aa4bcb7fe5 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Sun, 22 Mar 2020 13:58:50 -0700 Subject: [PATCH 115/138] net: dsa: Fix duplicate frames flooded by learning When both the switch and the bridge are learning about new addresses, switch ports attached to the bridge would see duplicate ARP frames because both entities would attempt to send them. Fixes: 5037d532b83d ("net: dsa: add Broadcom tag RX/TX handler") Reported-by: Maxime Bizon Signed-off-by: Florian Fainelli Reviewed-by: Vivien Didelot Signed-off-by: David S. Miller --- net/dsa/tag_brcm.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/dsa/tag_brcm.c b/net/dsa/tag_brcm.c index 9c3114179690..9169b63a89e3 100644 --- a/net/dsa/tag_brcm.c +++ b/net/dsa/tag_brcm.c @@ -140,6 +140,8 @@ static struct sk_buff *brcm_tag_rcv_ll(struct sk_buff *skb, /* Remove Broadcom tag and update checksum */ skb_pull_rcsum(skb, BRCM_TAG_LEN); + skb->offload_fwd_mark = 1; + return skb; } #endif From 2f599ec422ad6634fb5ad43748b9969ca9d742bd Mon Sep 17 00:00:00 2001 From: Michal Kubecek Date: Sun, 22 Mar 2020 22:24:21 +0100 Subject: [PATCH 116/138] ethtool: fix reference leak in some *_SET handlers Andrew noticed that some handlers for *_SET commands leak a netdev reference if required ethtool_ops callbacks do not exist. A simple reproducer would be e.g. ip link add veth1 type veth peer name veth2 ethtool -s veth1 wol g ip link del veth1 Make sure dev_put() is called when ethtool_ops check fails. v2: add Fixes tags Fixes: a53f3d41e4d3 ("ethtool: set link settings with LINKINFO_SET request") Fixes: bfbcfe2032e7 ("ethtool: set link modes related data with LINKMODES_SET request") Fixes: e54d04e3afea ("ethtool: set message mask with DEBUG_SET request") Fixes: 8d425b19b305 ("ethtool: set wake-on-lan settings with WOL_SET request") Reported-by: Andrew Lunn Signed-off-by: Michal Kubecek Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Reviewed-by: Jakub Kicinski Signed-off-by: David S. Miller --- net/ethtool/debug.c | 4 +++- net/ethtool/linkinfo.c | 4 +++- net/ethtool/linkmodes.c | 4 +++- net/ethtool/wol.c | 4 +++- 4 files changed, 12 insertions(+), 4 deletions(-) diff --git a/net/ethtool/debug.c b/net/ethtool/debug.c index aaef4843e6ba..92599ad7b3c2 100644 --- a/net/ethtool/debug.c +++ b/net/ethtool/debug.c @@ -107,8 +107,9 @@ int ethnl_set_debug(struct sk_buff *skb, struct genl_info *info) if (ret < 0) return ret; dev = req_info.dev; + ret = -EOPNOTSUPP; if (!dev->ethtool_ops->get_msglevel || !dev->ethtool_ops->set_msglevel) - return -EOPNOTSUPP; + goto out_dev; rtnl_lock(); ret = ethnl_ops_begin(dev); @@ -129,6 +130,7 @@ out_ops: ethnl_ops_complete(dev); out_rtnl: rtnl_unlock(); +out_dev: dev_put(dev); return ret; } diff --git a/net/ethtool/linkinfo.c b/net/ethtool/linkinfo.c index 5d16cb4e8693..6e9e0b590bb5 100644 --- a/net/ethtool/linkinfo.c +++ b/net/ethtool/linkinfo.c @@ -126,9 +126,10 @@ int ethnl_set_linkinfo(struct sk_buff *skb, struct genl_info *info) if (ret < 0) return ret; dev = req_info.dev; + ret = -EOPNOTSUPP; if (!dev->ethtool_ops->get_link_ksettings || !dev->ethtool_ops->set_link_ksettings) - return -EOPNOTSUPP; + goto out_dev; rtnl_lock(); ret = ethnl_ops_begin(dev); @@ -162,6 +163,7 @@ out_ops: ethnl_ops_complete(dev); out_rtnl: rtnl_unlock(); +out_dev: dev_put(dev); return ret; } diff --git a/net/ethtool/linkmodes.c b/net/ethtool/linkmodes.c index 96f20be64553..18cc37be2d9c 100644 --- a/net/ethtool/linkmodes.c +++ b/net/ethtool/linkmodes.c @@ -338,9 +338,10 @@ int ethnl_set_linkmodes(struct sk_buff *skb, struct genl_info *info) if (ret < 0) return ret; dev = req_info.dev; + ret = -EOPNOTSUPP; if (!dev->ethtool_ops->get_link_ksettings || !dev->ethtool_ops->set_link_ksettings) - return -EOPNOTSUPP; + goto out_dev; rtnl_lock(); ret = ethnl_ops_begin(dev); @@ -370,6 +371,7 @@ out_ops: ethnl_ops_complete(dev); out_rtnl: rtnl_unlock(); +out_dev: dev_put(dev); return ret; } diff --git a/net/ethtool/wol.c b/net/ethtool/wol.c index e1b8a65b64c4..55e1ecaaf739 100644 --- a/net/ethtool/wol.c +++ b/net/ethtool/wol.c @@ -128,8 +128,9 @@ int ethnl_set_wol(struct sk_buff *skb, struct genl_info *info) if (ret < 0) return ret; dev = req_info.dev; + ret = -EOPNOTSUPP; if (!dev->ethtool_ops->get_wol || !dev->ethtool_ops->set_wol) - return -EOPNOTSUPP; + goto out_dev; rtnl_lock(); ret = ethnl_ops_begin(dev); @@ -172,6 +173,7 @@ out_ops: ethnl_ops_complete(dev); out_rtnl: rtnl_unlock(); +out_dev: dev_put(dev); return ret; } From 81573b18f26defe672a7d960f9af9ac2c97f324d Mon Sep 17 00:00:00 2001 From: Vadym Kochan Date: Mon, 23 Mar 2020 16:24:04 +0200 Subject: [PATCH 117/138] selftests/net/forwarding: add Makefile to install tests Add missing Makefile for net/forwarding tests and include it to the targets list, otherwise forwarding tests are not installed in case of cross-compilation. Signed-off-by: Vadym Kochan Signed-off-by: David S. Miller --- tools/testing/selftests/Makefile | 1 + .../testing/selftests/net/forwarding/Makefile | 75 +++++++++++++++++++ 2 files changed, 76 insertions(+) create mode 100644 tools/testing/selftests/net/forwarding/Makefile diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile index 6ec503912bea..b93fa645ee54 100644 --- a/tools/testing/selftests/Makefile +++ b/tools/testing/selftests/Makefile @@ -33,6 +33,7 @@ TARGETS += memory-hotplug TARGETS += mount TARGETS += mqueue TARGETS += net +TARGETS += net/forwarding TARGETS += net/mptcp TARGETS += netfilter TARGETS += networking/timestamping diff --git a/tools/testing/selftests/net/forwarding/Makefile b/tools/testing/selftests/net/forwarding/Makefile new file mode 100644 index 000000000000..44616103508b --- /dev/null +++ b/tools/testing/selftests/net/forwarding/Makefile @@ -0,0 +1,75 @@ +# SPDX-License-Identifier: GPL-2.0+ OR MIT + +TEST_PROGS = bridge_igmp.sh \ + bridge_port_isolation.sh \ + bridge_sticky_fdb.sh \ + bridge_vlan_aware.sh \ + bridge_vlan_unaware.sh \ + devlink_lib.sh \ + ethtool_lib.sh \ + ethtool.sh \ + fib_offload_lib.sh \ + forwarding.config.sample \ + gre_inner_v4_multipath.sh \ + gre_inner_v6_multipath.sh \ + gre_multipath.sh \ + ip6gre_inner_v4_multipath.sh \ + ip6gre_inner_v6_multipath.sh \ + ipip_flat_gre_key.sh \ + ipip_flat_gre_keys.sh \ + ipip_flat_gre.sh \ + ipip_hier_gre_key.sh \ + ipip_hier_gre_keys.sh \ + ipip_hier_gre.sh \ + ipip_lib.sh \ + lib.sh \ + loopback.sh \ + mirror_gre_bound.sh \ + mirror_gre_bridge_1d.sh \ + mirror_gre_bridge_1d_vlan.sh \ + mirror_gre_bridge_1q_lag.sh \ + mirror_gre_bridge_1q.sh \ + mirror_gre_changes.sh \ + mirror_gre_flower.sh \ + mirror_gre_lag_lacp.sh \ + mirror_gre_lib.sh \ + mirror_gre_neigh.sh \ + mirror_gre_nh.sh \ + mirror_gre.sh \ + mirror_gre_topo_lib.sh \ + mirror_gre_vlan_bridge_1q.sh \ + mirror_gre_vlan.sh \ + mirror_lib.sh \ + mirror_topo_lib.sh \ + mirror_vlan.sh \ + router_bridge.sh \ + router_bridge_vlan.sh \ + router_broadcast.sh \ + router_mpath_nh.sh \ + router_multicast.sh \ + router_multipath.sh \ + router.sh \ + router_vid_1.sh \ + sch_ets_core.sh \ + sch_ets.sh \ + sch_ets_tests.sh \ + sch_tbf_core.sh \ + sch_tbf_etsprio.sh \ + sch_tbf_ets.sh \ + sch_tbf_prio.sh \ + sch_tbf_root.sh \ + tc_actions.sh \ + tc_chains.sh \ + tc_common.sh \ + tc_flower_router.sh \ + tc_flower.sh \ + tc_shblocks.sh \ + tc_vlan_modify.sh \ + vxlan_asymmetric.sh \ + vxlan_bridge_1d_port_8472.sh \ + vxlan_bridge_1d.sh \ + vxlan_bridge_1q_port_8472.sh \ + vxlan_bridge_1q.sh \ + vxlan_symmetric.sh + +include ../../lib.mk From 8c2d45b2b65ca1f215244be1c600236e83f9815f Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Sun, 22 Mar 2020 03:21:58 +0100 Subject: [PATCH 118/138] netfilter: nf_tables: Allow set back-ends to report partial overlaps on insertion Currently, the -EEXIST return code of ->insert() callbacks is ambiguous: it might indicate that a given element (including intervals) already exists as such, or that the new element would clash with existing ones. If identical elements already exist, the front-end is ignoring this without returning error, in case NLM_F_EXCL is not set. However, if the new element can't be inserted due an overlap, we should report this to the user. To this purpose, allow set back-ends to return -ENOTEMPTY on collision with existing elements, translate that to -EEXIST, and return that to userspace, no matter if NLM_F_EXCL was set. Reported-by: Phil Sutter Signed-off-by: Stefano Brivio Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_tables_api.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 38c680f28f15..d11f1a74d43c 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -5082,6 +5082,11 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, err = -EBUSY; else if (!(nlmsg_flags & NLM_F_EXCL)) err = 0; + } else if (err == -ENOTEMPTY) { + /* ENOTEMPTY reports overlapping between this element + * and an existing one. + */ + err = -EEXIST; } goto err_element_clash; } From 0eb4b5ee33f2461d149408a247af8ae24756a6ca Mon Sep 17 00:00:00 2001 From: Stefano Brivio Date: Sun, 22 Mar 2020 03:21:59 +0100 Subject: [PATCH 119/138] netfilter: nft_set_pipapo: Separate partial and complete overlap cases on insertion ...and return -ENOTEMPTY to the front-end on collision, -EEXIST if an identical element already exists. Together with the previous patch, element collision will now be returned to the user as -EEXIST. Reported-by: Phil Sutter Signed-off-by: Stefano Brivio Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nft_set_pipapo.c | 34 +++++++++++++++++++++++++++------- 1 file changed, 27 insertions(+), 7 deletions(-) diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c index 4fc0c924ed5d..ef7e8ad2e344 100644 --- a/net/netfilter/nft_set_pipapo.c +++ b/net/netfilter/nft_set_pipapo.c @@ -1098,21 +1098,41 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set, struct nft_pipapo_field *f; int i, bsize_max, err = 0; + if (nft_set_ext_exists(ext, NFT_SET_EXT_KEY_END)) + end = (const u8 *)nft_set_ext_key_end(ext)->data; + else + end = start; + dup = pipapo_get(net, set, start, genmask); - if (PTR_ERR(dup) == -ENOENT) { - if (nft_set_ext_exists(ext, NFT_SET_EXT_KEY_END)) { - end = (const u8 *)nft_set_ext_key_end(ext)->data; - dup = pipapo_get(net, set, end, nft_genmask_next(net)); - } else { - end = start; + if (!IS_ERR(dup)) { + /* Check if we already have the same exact entry */ + const struct nft_data *dup_key, *dup_end; + + dup_key = nft_set_ext_key(&dup->ext); + if (nft_set_ext_exists(&dup->ext, NFT_SET_EXT_KEY_END)) + dup_end = nft_set_ext_key_end(&dup->ext); + else + dup_end = dup_key; + + if (!memcmp(start, dup_key->data, sizeof(*dup_key->data)) && + !memcmp(end, dup_end->data, sizeof(*dup_end->data))) { + *ext2 = &dup->ext; + return -EEXIST; } + + return -ENOTEMPTY; + } + + if (PTR_ERR(dup) == -ENOENT) { + /* Look for partially overlapping entries */ + dup = pipapo_get(net, set, end, nft_genmask_next(net)); } if (PTR_ERR(dup) != -ENOENT) { if (IS_ERR(dup)) return PTR_ERR(dup); *ext2 = &dup->ext; - return -EEXIST; + return -ENOTEMPTY; } /* Validate */ From 6f7c9caf017be8ab0fe3b99509580d0793bf0833 Mon Sep 17 00:00:00 2001 From: Stefano Brivio Date: Sun, 22 Mar 2020 03:22:00 +0100 Subject: [PATCH 120/138] netfilter: nft_set_rbtree: Introduce and use nft_rbtree_interval_start() Replace negations of nft_rbtree_interval_end() with a new helper, nft_rbtree_interval_start(), wherever this helps to visualise the problem at hand, that is, for all the occurrences except for the comparison against given flags in __nft_rbtree_get(). This gets especially useful in the next patch. Signed-off-by: Stefano Brivio Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nft_set_rbtree.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c index 5000b938ab1e..85572b2a6051 100644 --- a/net/netfilter/nft_set_rbtree.c +++ b/net/netfilter/nft_set_rbtree.c @@ -33,6 +33,11 @@ static bool nft_rbtree_interval_end(const struct nft_rbtree_elem *rbe) (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END); } +static bool nft_rbtree_interval_start(const struct nft_rbtree_elem *rbe) +{ + return !nft_rbtree_interval_end(rbe); +} + static bool nft_rbtree_equal(const struct nft_set *set, const void *this, const struct nft_rbtree_elem *interval) { @@ -64,7 +69,7 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set if (interval && nft_rbtree_equal(set, this, interval) && nft_rbtree_interval_end(rbe) && - !nft_rbtree_interval_end(interval)) + nft_rbtree_interval_start(interval)) continue; interval = rbe; } else if (d > 0) @@ -89,7 +94,7 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set if (set->flags & NFT_SET_INTERVAL && interval != NULL && nft_set_elem_active(&interval->ext, genmask) && - !nft_rbtree_interval_end(interval)) { + nft_rbtree_interval_start(interval)) { *ext = &interval->ext; return true; } @@ -224,9 +229,9 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set, p = &parent->rb_right; else { if (nft_rbtree_interval_end(rbe) && - !nft_rbtree_interval_end(new)) { + nft_rbtree_interval_start(new)) { p = &parent->rb_left; - } else if (!nft_rbtree_interval_end(rbe) && + } else if (nft_rbtree_interval_start(rbe) && nft_rbtree_interval_end(new)) { p = &parent->rb_right; } else if (nft_set_elem_active(&rbe->ext, genmask)) { @@ -317,10 +322,10 @@ static void *nft_rbtree_deactivate(const struct net *net, parent = parent->rb_right; else { if (nft_rbtree_interval_end(rbe) && - !nft_rbtree_interval_end(this)) { + nft_rbtree_interval_start(this)) { parent = parent->rb_left; continue; - } else if (!nft_rbtree_interval_end(rbe) && + } else if (nft_rbtree_interval_start(rbe) && nft_rbtree_interval_end(this)) { parent = parent->rb_right; continue; From 7c84d41416d836ef7e533bd4d64ccbdf40c5ac70 Mon Sep 17 00:00:00 2001 From: Stefano Brivio Date: Sun, 22 Mar 2020 03:22:01 +0100 Subject: [PATCH 121/138] netfilter: nft_set_rbtree: Detect partial overlaps on insertion ...and return -ENOTEMPTY to the front-end in this case, instead of proceeding. Currently, nft takes care of checking for these cases and not sending them to the kernel, but if we drop the set_overlap() call in nft we can end up in situations like: # nft add table t # nft add set t s '{ type inet_service ; flags interval ; }' # nft add element t s '{ 1 - 5 }' # nft add element t s '{ 6 - 10 }' # nft add element t s '{ 4 - 7 }' # nft list set t s table ip t { set s { type inet_service flags interval elements = { 1-3, 4-5, 6-7 } } } This change has the primary purpose of making the behaviour consistent with nft_set_pipapo, but is also functional to avoid inconsistent behaviour if userspace sends overlapping elements for any reason. v2: When we meet the same key data in the tree, as start element while inserting an end element, or as end element while inserting a start element, actually check that the existing element is active, before resetting the overlap flag (Pablo Neira Ayuso) Signed-off-by: Stefano Brivio Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nft_set_rbtree.c | 70 ++++++++++++++++++++++++++++++++-- 1 file changed, 67 insertions(+), 3 deletions(-) diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c index 85572b2a6051..8617fc16a1ed 100644 --- a/net/netfilter/nft_set_rbtree.c +++ b/net/netfilter/nft_set_rbtree.c @@ -213,8 +213,43 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set, u8 genmask = nft_genmask_next(net); struct nft_rbtree_elem *rbe; struct rb_node *parent, **p; + bool overlap = false; int d; + /* Detect overlaps as we descend the tree. Set the flag in these cases: + * + * a1. |__ _ _? >|__ _ _ (insert start after existing start) + * a2. _ _ __>| ?_ _ __| (insert end before existing end) + * a3. _ _ ___| ?_ _ _>| (insert end after existing end) + * a4. >|__ _ _ _ _ __| (insert start before existing end) + * + * and clear it later on, as we eventually reach the points indicated by + * '?' above, in the cases described below. We'll always meet these + * later, locally, due to tree ordering, and overlaps for the intervals + * that are the closest together are always evaluated last. + * + * b1. |__ _ _! >|__ _ _ (insert start after existing end) + * b2. _ _ __>| !_ _ __| (insert end before existing start) + * b3. !_____>| (insert end after existing start) + * + * Case a4. resolves to b1.: + * - if the inserted start element is the leftmost, because the '0' + * element in the tree serves as end element + * - otherwise, if an existing end is found. Note that end elements are + * always inserted after corresponding start elements. + * + * For a new, rightmost pair of elements, we'll hit cases b1. and b3., + * in that order. + * + * The flag is also cleared in two special cases: + * + * b4. |__ _ _!|<_ _ _ (insert start right before existing end) + * b5. |__ _ >|!__ _ _ (insert end right after existing start) + * + * which always happen as last step and imply that no further + * overlapping is possible. + */ + parent = NULL; p = &priv->root.rb_node; while (*p != NULL) { @@ -223,17 +258,42 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set, d = memcmp(nft_set_ext_key(&rbe->ext), nft_set_ext_key(&new->ext), set->klen); - if (d < 0) + if (d < 0) { p = &parent->rb_left; - else if (d > 0) + + if (nft_rbtree_interval_start(new)) { + overlap = nft_rbtree_interval_start(rbe) && + nft_set_elem_active(&rbe->ext, + genmask); + } else { + overlap = nft_rbtree_interval_end(rbe) && + nft_set_elem_active(&rbe->ext, + genmask); + } + } else if (d > 0) { p = &parent->rb_right; - else { + + if (nft_rbtree_interval_end(new)) { + overlap = nft_rbtree_interval_end(rbe) && + nft_set_elem_active(&rbe->ext, + genmask); + } else if (nft_rbtree_interval_end(rbe) && + nft_set_elem_active(&rbe->ext, genmask)) { + overlap = true; + } + } else { if (nft_rbtree_interval_end(rbe) && nft_rbtree_interval_start(new)) { p = &parent->rb_left; + + if (nft_set_elem_active(&rbe->ext, genmask)) + overlap = false; } else if (nft_rbtree_interval_start(rbe) && nft_rbtree_interval_end(new)) { p = &parent->rb_right; + + if (nft_set_elem_active(&rbe->ext, genmask)) + overlap = false; } else if (nft_set_elem_active(&rbe->ext, genmask)) { *ext = &rbe->ext; return -EEXIST; @@ -242,6 +302,10 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set, } } } + + if (overlap) + return -ENOTEMPTY; + rb_link_node_rcu(&new->node, parent, p); rb_insert_color(&new->node, &priv->root); return 0; From 76a109fac206e158eb3c967af98c178cff738e6a Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Mon, 23 Mar 2020 14:27:16 +0100 Subject: [PATCH 122/138] netfilter: nft_fwd_netdev: validate family and chain type Make sure the forward action is only used from ingress. Fixes: 39e6dea28adc ("netfilter: nf_tables: add forward expression to the netdev family") Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nft_fwd_netdev.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/net/netfilter/nft_fwd_netdev.c b/net/netfilter/nft_fwd_netdev.c index aba11c2333f3..ddd28de810b6 100644 --- a/net/netfilter/nft_fwd_netdev.c +++ b/net/netfilter/nft_fwd_netdev.c @@ -190,6 +190,13 @@ nla_put_failure: return -1; } +static int nft_fwd_validate(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nft_data **data) +{ + return nft_chain_validate_hooks(ctx->chain, (1 << NF_NETDEV_INGRESS)); +} + static struct nft_expr_type nft_fwd_netdev_type; static const struct nft_expr_ops nft_fwd_neigh_netdev_ops = { .type = &nft_fwd_netdev_type, @@ -197,6 +204,7 @@ static const struct nft_expr_ops nft_fwd_neigh_netdev_ops = { .eval = nft_fwd_neigh_eval, .init = nft_fwd_neigh_init, .dump = nft_fwd_neigh_dump, + .validate = nft_fwd_validate, }; static const struct nft_expr_ops nft_fwd_netdev_ops = { @@ -205,6 +213,7 @@ static const struct nft_expr_ops nft_fwd_netdev_ops = { .eval = nft_fwd_netdev_eval, .init = nft_fwd_netdev_init, .dump = nft_fwd_netdev_dump, + .validate = nft_fwd_validate, .offload = nft_fwd_netdev_offload, }; From bcfabee1afd99484b6ba067361b8678e28bbc065 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Mon, 23 Mar 2020 19:53:10 +0100 Subject: [PATCH 123/138] netfilter: nft_fwd_netdev: allow to redirect to ifb via ingress Set skb->tc_redirected to 1, otherwise the ifb driver drops the packet. Set skb->tc_from_ingress to 1 to reinject the packet back to the ingress path after leaving the ifb egress path. This patch inconditionally sets on these two skb fields that are meaningful to the ifb driver. The existing forward action is guaranteed to run from ingress path. Fixes: 39e6dea28adc ("netfilter: nf_tables: add forward expression to the netdev family") Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nft_fwd_netdev.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/net/netfilter/nft_fwd_netdev.c b/net/netfilter/nft_fwd_netdev.c index ddd28de810b6..74f050ba6bad 100644 --- a/net/netfilter/nft_fwd_netdev.c +++ b/net/netfilter/nft_fwd_netdev.c @@ -28,6 +28,10 @@ static void nft_fwd_netdev_eval(const struct nft_expr *expr, struct nft_fwd_netdev *priv = nft_expr_priv(expr); int oif = regs->data[priv->sreg_dev]; + /* These are used by ifb only. */ + pkt->skb->tc_redirected = 1; + pkt->skb->tc_from_ingress = 1; + nf_fwd_netdev_egress(pkt, oif); regs->verdict.code = NF_STOLEN; } From a64d558d8cf98424cc5eb9ae6631782cd8bf789c Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 23 Mar 2020 17:34:30 +0100 Subject: [PATCH 124/138] selftests: netfilter: add nfqueue test case Add a test case to check nf queue infrastructure. Could be extended in the future to also cover serialization of conntrack, uid and secctx attributes in nfqueue. For now, this checks that 'queue bypass' works, that a queue rule with no bypass option blocks traffic and that userspace receives the expected number of packets. For this we add two queues and hook all of prerouting/input/forward/output/postrouting. Packets get queued twice with a dummy base chain in between: This passes with current nf tree, but reverting commit 946c0d8e6ed4 ("netfilter: nf_queue: fix reinject verdict handling") makes this trip (it processes 30 instead of expected 20 packets). v2: update config file with queue and other options missing/needed for other tests. v3: also test with tcp, this reveals problem with commit 28f8bfd1ac94 ("netfilter: Support iif matches in POSTROUTING"), due to skb->dev pointing at another skb in the retransmit rbtree (skb->dev aliases to rbnode child). Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- tools/testing/selftests/netfilter/Makefile | 6 +- tools/testing/selftests/netfilter/config | 6 + tools/testing/selftests/netfilter/nf-queue.c | 352 ++++++++++++++++++ .../testing/selftests/netfilter/nft_queue.sh | 332 +++++++++++++++++ 4 files changed, 695 insertions(+), 1 deletion(-) create mode 100644 tools/testing/selftests/netfilter/nf-queue.c create mode 100755 tools/testing/selftests/netfilter/nft_queue.sh diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile index 08194aa44006..9c0f758310fe 100644 --- a/tools/testing/selftests/netfilter/Makefile +++ b/tools/testing/selftests/netfilter/Makefile @@ -3,6 +3,10 @@ TEST_PROGS := nft_trans_stress.sh nft_nat.sh bridge_brouter.sh \ conntrack_icmp_related.sh nft_flowtable.sh ipvs.sh \ - nft_concat_range.sh + nft_concat_range.sh \ + nft_queue.sh + +LDLIBS = -lmnl +TEST_GEN_FILES = nf-queue include ../lib.mk diff --git a/tools/testing/selftests/netfilter/config b/tools/testing/selftests/netfilter/config index 59caa8f71cd8..4faf2ce021d9 100644 --- a/tools/testing/selftests/netfilter/config +++ b/tools/testing/selftests/netfilter/config @@ -1,2 +1,8 @@ CONFIG_NET_NS=y CONFIG_NF_TABLES_INET=y +CONFIG_NFT_QUEUE=m +CONFIG_NFT_NAT=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_FLOW_OFFLOAD=m +CONFIG_NF_CT_NETLINK=m diff --git a/tools/testing/selftests/netfilter/nf-queue.c b/tools/testing/selftests/netfilter/nf-queue.c new file mode 100644 index 000000000000..29c73bce38fa --- /dev/null +++ b/tools/testing/selftests/netfilter/nf-queue.c @@ -0,0 +1,352 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +struct options { + bool count_packets; + int verbose; + unsigned int queue_num; + unsigned int timeout; +}; + +static unsigned int queue_stats[5]; +static struct options opts; + +static void help(const char *p) +{ + printf("Usage: %s [-c|-v [-vv] ] [-t timeout] [-q queue_num]\n", p); +} + +static int parse_attr_cb(const struct nlattr *attr, void *data) +{ + const struct nlattr **tb = data; + int type = mnl_attr_get_type(attr); + + /* skip unsupported attribute in user-space */ + if (mnl_attr_type_valid(attr, NFQA_MAX) < 0) + return MNL_CB_OK; + + switch (type) { + case NFQA_MARK: + case NFQA_IFINDEX_INDEV: + case NFQA_IFINDEX_OUTDEV: + case NFQA_IFINDEX_PHYSINDEV: + case NFQA_IFINDEX_PHYSOUTDEV: + if (mnl_attr_validate(attr, MNL_TYPE_U32) < 0) { + perror("mnl_attr_validate"); + return MNL_CB_ERROR; + } + break; + case NFQA_TIMESTAMP: + if (mnl_attr_validate2(attr, MNL_TYPE_UNSPEC, + sizeof(struct nfqnl_msg_packet_timestamp)) < 0) { + perror("mnl_attr_validate2"); + return MNL_CB_ERROR; + } + break; + case NFQA_HWADDR: + if (mnl_attr_validate2(attr, MNL_TYPE_UNSPEC, + sizeof(struct nfqnl_msg_packet_hw)) < 0) { + perror("mnl_attr_validate2"); + return MNL_CB_ERROR; + } + break; + case NFQA_PAYLOAD: + break; + } + tb[type] = attr; + return MNL_CB_OK; +} + +static int queue_cb(const struct nlmsghdr *nlh, void *data) +{ + struct nlattr *tb[NFQA_MAX+1] = { 0 }; + struct nfqnl_msg_packet_hdr *ph = NULL; + uint32_t id = 0; + + (void)data; + + mnl_attr_parse(nlh, sizeof(struct nfgenmsg), parse_attr_cb, tb); + if (tb[NFQA_PACKET_HDR]) { + ph = mnl_attr_get_payload(tb[NFQA_PACKET_HDR]); + id = ntohl(ph->packet_id); + + if (opts.verbose > 0) + printf("packet hook=%u, hwproto 0x%x", + ntohs(ph->hw_protocol), ph->hook); + + if (ph->hook >= 5) { + fprintf(stderr, "Unknown hook %d\n", ph->hook); + return MNL_CB_ERROR; + } + + if (opts.verbose > 0) { + uint32_t skbinfo = 0; + + if (tb[NFQA_SKB_INFO]) + skbinfo = ntohl(mnl_attr_get_u32(tb[NFQA_SKB_INFO])); + if (skbinfo & NFQA_SKB_CSUMNOTREADY) + printf(" csumnotready"); + if (skbinfo & NFQA_SKB_GSO) + printf(" gso"); + if (skbinfo & NFQA_SKB_CSUM_NOTVERIFIED) + printf(" csumnotverified"); + puts(""); + } + + if (opts.count_packets) + queue_stats[ph->hook]++; + } + + return MNL_CB_OK + id; +} + +static struct nlmsghdr * +nfq_build_cfg_request(char *buf, uint8_t command, int queue_num) +{ + struct nlmsghdr *nlh = mnl_nlmsg_put_header(buf); + struct nfqnl_msg_config_cmd cmd = { + .command = command, + .pf = htons(AF_INET), + }; + struct nfgenmsg *nfg; + + nlh->nlmsg_type = (NFNL_SUBSYS_QUEUE << 8) | NFQNL_MSG_CONFIG; + nlh->nlmsg_flags = NLM_F_REQUEST; + + nfg = mnl_nlmsg_put_extra_header(nlh, sizeof(*nfg)); + + nfg->nfgen_family = AF_UNSPEC; + nfg->version = NFNETLINK_V0; + nfg->res_id = htons(queue_num); + + mnl_attr_put(nlh, NFQA_CFG_CMD, sizeof(cmd), &cmd); + + return nlh; +} + +static struct nlmsghdr * +nfq_build_cfg_params(char *buf, uint8_t mode, int range, int queue_num) +{ + struct nlmsghdr *nlh = mnl_nlmsg_put_header(buf); + struct nfqnl_msg_config_params params = { + .copy_range = htonl(range), + .copy_mode = mode, + }; + struct nfgenmsg *nfg; + + nlh->nlmsg_type = (NFNL_SUBSYS_QUEUE << 8) | NFQNL_MSG_CONFIG; + nlh->nlmsg_flags = NLM_F_REQUEST; + + nfg = mnl_nlmsg_put_extra_header(nlh, sizeof(*nfg)); + nfg->nfgen_family = AF_UNSPEC; + nfg->version = NFNETLINK_V0; + nfg->res_id = htons(queue_num); + + mnl_attr_put(nlh, NFQA_CFG_PARAMS, sizeof(params), ¶ms); + + return nlh; +} + +static struct nlmsghdr * +nfq_build_verdict(char *buf, int id, int queue_num, int verd) +{ + struct nfqnl_msg_verdict_hdr vh = { + .verdict = htonl(verd), + .id = htonl(id), + }; + struct nlmsghdr *nlh; + struct nfgenmsg *nfg; + + nlh = mnl_nlmsg_put_header(buf); + nlh->nlmsg_type = (NFNL_SUBSYS_QUEUE << 8) | NFQNL_MSG_VERDICT; + nlh->nlmsg_flags = NLM_F_REQUEST; + nfg = mnl_nlmsg_put_extra_header(nlh, sizeof(*nfg)); + nfg->nfgen_family = AF_UNSPEC; + nfg->version = NFNETLINK_V0; + nfg->res_id = htons(queue_num); + + mnl_attr_put(nlh, NFQA_VERDICT_HDR, sizeof(vh), &vh); + + return nlh; +} + +static void print_stats(void) +{ + unsigned int last, total; + int i; + + if (!opts.count_packets) + return; + + total = 0; + last = queue_stats[0]; + + for (i = 0; i < 5; i++) { + printf("hook %d packets %08u\n", i, queue_stats[i]); + last = queue_stats[i]; + total += last; + } + + printf("%u packets total\n", total); +} + +struct mnl_socket *open_queue(void) +{ + char buf[MNL_SOCKET_BUFFER_SIZE]; + unsigned int queue_num; + struct mnl_socket *nl; + struct nlmsghdr *nlh; + struct timeval tv; + uint32_t flags; + + nl = mnl_socket_open(NETLINK_NETFILTER); + if (nl == NULL) { + perror("mnl_socket_open"); + exit(EXIT_FAILURE); + } + + if (mnl_socket_bind(nl, 0, MNL_SOCKET_AUTOPID) < 0) { + perror("mnl_socket_bind"); + exit(EXIT_FAILURE); + } + + queue_num = opts.queue_num; + nlh = nfq_build_cfg_request(buf, NFQNL_CFG_CMD_BIND, queue_num); + + if (mnl_socket_sendto(nl, nlh, nlh->nlmsg_len) < 0) { + perror("mnl_socket_sendto"); + exit(EXIT_FAILURE); + } + + nlh = nfq_build_cfg_params(buf, NFQNL_COPY_PACKET, 0xFFFF, queue_num); + + flags = NFQA_CFG_F_GSO | NFQA_CFG_F_UID_GID; + mnl_attr_put_u32(nlh, NFQA_CFG_FLAGS, htonl(flags)); + mnl_attr_put_u32(nlh, NFQA_CFG_MASK, htonl(flags)); + + if (mnl_socket_sendto(nl, nlh, nlh->nlmsg_len) < 0) { + perror("mnl_socket_sendto"); + exit(EXIT_FAILURE); + } + + memset(&tv, 0, sizeof(tv)); + tv.tv_sec = opts.timeout; + if (opts.timeout && setsockopt(mnl_socket_get_fd(nl), + SOL_SOCKET, SO_RCVTIMEO, + &tv, sizeof(tv))) { + perror("setsockopt(SO_RCVTIMEO)"); + exit(EXIT_FAILURE); + } + + return nl; +} + +static int mainloop(void) +{ + unsigned int buflen = 64 * 1024 + MNL_SOCKET_BUFFER_SIZE; + struct mnl_socket *nl; + struct nlmsghdr *nlh; + unsigned int portid; + char *buf; + int ret; + + buf = malloc(buflen); + if (!buf) { + perror("malloc"); + exit(EXIT_FAILURE); + } + + nl = open_queue(); + portid = mnl_socket_get_portid(nl); + + for (;;) { + uint32_t id; + + ret = mnl_socket_recvfrom(nl, buf, buflen); + if (ret == -1) { + if (errno == ENOBUFS) + continue; + + if (errno == EAGAIN) { + errno = 0; + ret = 0; + break; + } + + perror("mnl_socket_recvfrom"); + exit(EXIT_FAILURE); + } + + ret = mnl_cb_run(buf, ret, 0, portid, queue_cb, NULL); + if (ret < 0) { + perror("mnl_cb_run"); + exit(EXIT_FAILURE); + } + + id = ret - MNL_CB_OK; + nlh = nfq_build_verdict(buf, id, opts.queue_num, NF_ACCEPT); + if (mnl_socket_sendto(nl, nlh, nlh->nlmsg_len) < 0) { + perror("mnl_socket_sendto"); + exit(EXIT_FAILURE); + } + } + + mnl_socket_close(nl); + + return ret; +} + +static void parse_opts(int argc, char **argv) +{ + int c; + + while ((c = getopt(argc, argv, "chvt:q:")) != -1) { + switch (c) { + case 'c': + opts.count_packets = true; + break; + case 'h': + help(argv[0]); + exit(0); + break; + case 'q': + opts.queue_num = atoi(optarg); + if (opts.queue_num > 0xffff) + opts.queue_num = 0; + break; + case 't': + opts.timeout = atoi(optarg); + break; + case 'v': + opts.verbose++; + break; + } + } +} + +int main(int argc, char *argv[]) +{ + int ret; + + parse_opts(argc, argv); + + ret = mainloop(); + if (opts.count_packets) + print_stats(); + + return ret; +} diff --git a/tools/testing/selftests/netfilter/nft_queue.sh b/tools/testing/selftests/netfilter/nft_queue.sh new file mode 100755 index 000000000000..6898448b4266 --- /dev/null +++ b/tools/testing/selftests/netfilter/nft_queue.sh @@ -0,0 +1,332 @@ +#!/bin/bash +# +# This tests nf_queue: +# 1. can process packets from all hooks +# 2. support running nfqueue from more than one base chain +# +# Kselftest framework requirement - SKIP code is 4. +ksft_skip=4 +ret=0 + +sfx=$(mktemp -u "XXXXXXXX") +ns1="ns1-$sfx" +ns2="ns2-$sfx" +nsrouter="nsrouter-$sfx" + +cleanup() +{ + ip netns del ${ns1} + ip netns del ${ns2} + ip netns del ${nsrouter} + rm -f "$TMPFILE0" + rm -f "$TMPFILE1" +} + +nft --version > /dev/null 2>&1 +if [ $? -ne 0 ];then + echo "SKIP: Could not run test without nft tool" + exit $ksft_skip +fi + +ip -Version > /dev/null 2>&1 +if [ $? -ne 0 ];then + echo "SKIP: Could not run test without ip tool" + exit $ksft_skip +fi + +ip netns add ${nsrouter} +if [ $? -ne 0 ];then + echo "SKIP: Could not create net namespace" + exit $ksft_skip +fi + +TMPFILE0=$(mktemp) +TMPFILE1=$(mktemp) +trap cleanup EXIT + +ip netns add ${ns1} +ip netns add ${ns2} + +ip link add veth0 netns ${nsrouter} type veth peer name eth0 netns ${ns1} > /dev/null 2>&1 +if [ $? -ne 0 ];then + echo "SKIP: No virtual ethernet pair device support in kernel" + exit $ksft_skip +fi +ip link add veth1 netns ${nsrouter} type veth peer name eth0 netns ${ns2} + +ip -net ${nsrouter} link set lo up +ip -net ${nsrouter} link set veth0 up +ip -net ${nsrouter} addr add 10.0.1.1/24 dev veth0 +ip -net ${nsrouter} addr add dead:1::1/64 dev veth0 + +ip -net ${nsrouter} link set veth1 up +ip -net ${nsrouter} addr add 10.0.2.1/24 dev veth1 +ip -net ${nsrouter} addr add dead:2::1/64 dev veth1 + +ip -net ${ns1} link set lo up +ip -net ${ns1} link set eth0 up + +ip -net ${ns2} link set lo up +ip -net ${ns2} link set eth0 up + +ip -net ${ns1} addr add 10.0.1.99/24 dev eth0 +ip -net ${ns1} addr add dead:1::99/64 dev eth0 +ip -net ${ns1} route add default via 10.0.1.1 +ip -net ${ns1} route add default via dead:1::1 + +ip -net ${ns2} addr add 10.0.2.99/24 dev eth0 +ip -net ${ns2} addr add dead:2::99/64 dev eth0 +ip -net ${ns2} route add default via 10.0.2.1 +ip -net ${ns2} route add default via dead:2::1 + +load_ruleset() { + local name=$1 + local prio=$2 + +ip netns exec ${nsrouter} nft -f - < /dev/null + if [ $? -ne 0 ];then + return 1 + fi + + ip netns exec ${ns1} ping -c 1 -q dead:2::99 > /dev/null + if [ $? -ne 0 ];then + return 1 + fi + + return 0 +} + +test_ping_router() { + ip netns exec ${ns1} ping -c 1 -q 10.0.2.1 > /dev/null + if [ $? -ne 0 ];then + return 1 + fi + + ip netns exec ${ns1} ping -c 1 -q dead:2::1 > /dev/null + if [ $? -ne 0 ];then + return 1 + fi + + return 0 +} + +test_queue_blackhole() { + local proto=$1 + +ip netns exec ${nsrouter} nft -f - < /dev/null + lret=$? + elif [ $proto = "ip6" ]; then + ip netns exec ${ns1} ping -c 1 -q dead:2::99 > /dev/null + lret=$? + else + lret=111 + fi + + # queue without bypass keyword should drop traffic if no listener exists. + if [ $lret -eq 0 ];then + echo "FAIL: $proto expected failure, got $lret" 1>&2 + exit 1 + fi + + ip netns exec ${nsrouter} nft delete table $proto blackh + if [ $? -ne 0 ] ;then + echo "FAIL: $proto: Could not delete blackh table" + exit 1 + fi + + echo "PASS: $proto: statement with no listener results in packet drop" +} + +test_queue() +{ + local expected=$1 + local last="" + + # spawn nf-queue listeners + ip netns exec ${nsrouter} ./nf-queue -c -q 0 -t 3 > "$TMPFILE0" & + ip netns exec ${nsrouter} ./nf-queue -c -q 1 -t 3 > "$TMPFILE1" & + sleep 1 + test_ping + ret=$? + if [ $ret -ne 0 ];then + echo "FAIL: netns routing/connectivity with active listener on queue $queue: $ret" 1>&2 + exit $ret + fi + + test_ping_router + ret=$? + if [ $ret -ne 0 ];then + echo "FAIL: netns router unreachable listener on queue $queue: $ret" 1>&2 + exit $ret + fi + + wait + ret=$? + + for file in $TMPFILE0 $TMPFILE1; do + last=$(tail -n1 "$file") + if [ x"$last" != x"$expected packets total" ]; then + echo "FAIL: Expected $expected packets total, but got $last" 1>&2 + cat "$file" 1>&2 + + ip netns exec ${nsrouter} nft list ruleset + exit 1 + fi + done + + echo "PASS: Expected and received $last" +} + +test_tcp_forward() +{ + ip netns exec ${nsrouter} ./nf-queue -q 2 -t 10 & + local nfqpid=$! + + tmpfile=$(mktemp) || exit 1 + dd conv=sparse status=none if=/dev/zero bs=1M count=100 of=$tmpfile + ip netns exec ${ns2} nc -w 5 -l -p 12345 <"$tmpfile" >/dev/null & + local rpid=$! + + sleep 1 + ip netns exec ${ns1} nc -w 5 10.0.2.99 12345 <"$tmpfile" >/dev/null & + + rm -f "$tmpfile" + + wait $rpid + wait $lpid + [ $? -eq 0 ] && echo "PASS: tcp and nfqueue in forward chain" +} + +test_tcp_localhost() +{ + tc -net "${nsrouter}" qdisc add dev lo root netem loss random 1% + + tmpfile=$(mktemp) || exit 1 + + dd conv=sparse status=none if=/dev/zero bs=1M count=900 of=$tmpfile + ip netns exec ${nsrouter} nc -w 5 -l -p 12345 <"$tmpfile" >/dev/null & + local rpid=$! + + ip netns exec ${nsrouter} ./nf-queue -q 3 -t 30 & + local nfqpid=$! + + sleep 1 + ip netns exec ${nsrouter} nc -w 5 127.0.0.1 12345 <"$tmpfile" > /dev/null + rm -f "$tmpfile" + + wait $rpid + [ $? -eq 0 ] && echo "PASS: tcp via loopback" +} + +ip netns exec ${nsrouter} sysctl net.ipv6.conf.all.forwarding=1 > /dev/null +ip netns exec ${nsrouter} sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null +ip netns exec ${nsrouter} sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null + +load_ruleset "filter" 0 + +sleep 3 + +test_ping +ret=$? +if [ $ret -eq 0 ];then + # queue bypass works (rules were skipped, no listener) + echo "PASS: ${ns1} can reach ${ns2}" +else + echo "FAIL: ${ns1} cannot reach ${ns2}: $ret" 1>&2 + exit $ret +fi + +test_queue_blackhole ip +test_queue_blackhole ip6 + +# dummy ruleset to add base chains between the +# queueing rules. We don't want the second reinject +# to re-execute the old hooks. +load_counter_ruleset 10 + +# we are hooking all: prerouting/input/forward/output/postrouting. +# we ping ${ns2} from ${ns1} via ${nsrouter} using ipv4 and ipv6, so: +# 1x icmp prerouting,forward,postrouting -> 3 queue events (6 incl. reply). +# 1x icmp prerouting,input,output postrouting -> 4 queue events incl. reply. +# so we expect that userspace program receives 10 packets. +test_queue 10 + +# same. We queue to a second program as well. +load_ruleset "filter2" 20 +test_queue 20 + +test_tcp_forward +test_tcp_localhost + +exit $ret From 306f354c67397b3138300cde875c5cab45b857f7 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Mon, 16 Mar 2020 09:31:03 +0200 Subject: [PATCH 125/138] net/mlx5_core: Set IB capability mask1 to fix ib_srpt connection failure The cap_mask1 isn't protected by field_select and not listed among RW fields, but it is required to be written to properly initialize ports in IB virtualization mode. Link: https://lore.kernel.org/linux-rdma/88bab94d2fd72f3145835b4518bc63dda587add6.camel@redhat.com Fixes: ab118da4c10a ("net/mlx5: Don't write read-only fields in MODIFY_HCA_VPORT_CONTEXT command") Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/vport.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index 1faac31f74d0..23f879da9104 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -1071,6 +1071,9 @@ int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev, MLX5_SET64(hca_vport_context, ctx, port_guid, req->port_guid); if (req->field_select & MLX5_HCA_VPORT_SEL_NODE_GUID) MLX5_SET64(hca_vport_context, ctx, node_guid, req->node_guid); + MLX5_SET(hca_vport_context, ctx, cap_mask1, req->cap_mask1); + MLX5_SET(hca_vport_context, ctx, cap_mask1_field_select, + req->cap_mask1_perm); err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out)); ex: kfree(in); From 1de0306c3a05d305e45b1f1fabe2f4e94222eb6b Mon Sep 17 00:00:00 2001 From: Aya Levin Date: Mon, 9 Mar 2020 09:44:18 +0200 Subject: [PATCH 126/138] net/mlx5e: Enhance ICOSQ WQE info fields Add number of WQEBBs (WQE's Basic Block) to WQE info struct. Set the number of WQEBBs on WQE post, and increment the consumer counter (cc) on completion. In case of error completions, the cc was mistakenly not incremented, keeping a gap between cc and pc (producer counter). This failed the recovery flow on the ICOSQ from a CQE error which timed-out waiting for the cc and pc to meet. Fixes: be5323c8379f ("net/mlx5e: Report and recover from CQE error on ICOSQ") Signed-off-by: Aya Levin Reviewed-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 1 + drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 11 +++++------ drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c | 1 + 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 220ef9f06f84..571ac5976549 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -371,6 +371,7 @@ enum { struct mlx5e_sq_wqe_info { u8 opcode; + u8 num_wqebbs; /* Auxiliary data for different opcodes. */ union { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 1c3ab69cbd96..312d4692425b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -477,6 +477,7 @@ static inline void mlx5e_fill_icosq_frag_edge(struct mlx5e_icosq *sq, /* fill sq frag edge with nops to avoid wqe wrapping two pages */ for (; wi < edge_wi; wi++) { wi->opcode = MLX5_OPCODE_NOP; + wi->num_wqebbs = 1; mlx5e_post_nop(wq, sq->sqn, &sq->pc); } } @@ -525,6 +526,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) umr_wqe->uctrl.xlt_offset = cpu_to_be16(xlt_offset); sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR; + sq->db.ico_wqe[pi].num_wqebbs = MLX5E_UMR_WQEBBS; sq->db.ico_wqe[pi].umr.rq = rq; sq->pc += MLX5E_UMR_WQEBBS; @@ -621,6 +623,7 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq) ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); wi = &sq->db.ico_wqe[ci]; + sqcc += wi->num_wqebbs; if (last_wqe && unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) { netdev_WARN_ONCE(cq->channel->netdev, @@ -631,16 +634,12 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq) break; } - if (likely(wi->opcode == MLX5_OPCODE_UMR)) { - sqcc += MLX5E_UMR_WQEBBS; + if (likely(wi->opcode == MLX5_OPCODE_UMR)) wi->umr.rq->mpwqe.umr_completed++; - } else if (likely(wi->opcode == MLX5_OPCODE_NOP)) { - sqcc++; - } else { + else if (unlikely(wi->opcode != MLX5_OPCODE_NOP)) netdev_WARN_ONCE(cq->channel->netdev, "Bad OPCODE in ICOSQ WQE info: 0x%x\n", wi->opcode); - } } while (!last_wqe); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c index 257a7c9f7a14..800d34ed8a96 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -78,6 +78,7 @@ void mlx5e_trigger_irq(struct mlx5e_icosq *sq) u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP; + sq->db.ico_wqe[pi].num_wqebbs = 1; nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc); mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nopwqe->ctrl); } From 39369fd536d485a99a59d8e357c0d4d3ce19a3b8 Mon Sep 17 00:00:00 2001 From: Aya Levin Date: Thu, 12 Mar 2020 12:35:32 +0200 Subject: [PATCH 127/138] net/mlx5e: Fix missing reset of SW metadata in Striding RQ reset When resetting the RQ (moving RQ state from RST to RDY), the driver resets the WQ's SW metadata. In striding RQ mode, we maintain a field that reflects the actual expected WQ head (including in progress WQEs posted to the ICOSQ). It was mistakenly not reset together with the WQ. Fix this here. Fixes: 8276ea1353a4 ("net/mlx5e: Report and recover from CQE with error on RQ") Signed-off-by: Aya Levin Reviewed-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h index a226277b0980..f07b1399744e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h @@ -181,10 +181,12 @@ mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma) static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq) { - if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) + if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { mlx5_wq_ll_reset(&rq->mpwqe.wq); - else + rq->mpwqe.actual_wq_head = 0; + } else { mlx5_wq_cyc_reset(&rq->wqe.wq); + } } /* SW parser related functions */ From e239c6d686e1c37fb2ab143162dfb57471a8643f Mon Sep 17 00:00:00 2001 From: Aya Levin Date: Mon, 16 Mar 2020 16:53:10 +0200 Subject: [PATCH 128/138] net/mlx5e: Fix ICOSQ recovery flow with Striding RQ In striding RQ mode, the buffers of an RX WQE are first prepared and posted to the HW using a UMR WQEs via the ICOSQ. We maintain the state of these in-progress WQEs in the RQ SW struct. In the flow of ICOSQ recovery, the corresponding RQ is not in error state, hence: - The buffers of the in-progress WQEs must be released and the RQ metadata should reflect it. - Existing RX WQEs in the RQ should not be affected. For this, wrap the dealloc of the in-progress WQEs in a function, and use it in the ICOSQ recovery flow instead of mlx5e_free_rx_descs(). Fixes: be5323c8379f ("net/mlx5e: Report and recover from CQE error on ICOSQ") Signed-off-by: Aya Levin Reviewed-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 1 + .../mellanox/mlx5/core/en/reporter_rx.c | 2 +- .../net/ethernet/mellanox/mlx5/core/en_main.c | 31 ++++++++++++++----- 3 files changed, 26 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 571ac5976549..c9606b8ab6ef 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -1060,6 +1060,7 @@ int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state); void mlx5e_activate_rq(struct mlx5e_rq *rq); void mlx5e_deactivate_rq(struct mlx5e_rq *rq); void mlx5e_free_rx_descs(struct mlx5e_rq *rq); +void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq); void mlx5e_activate_icosq(struct mlx5e_icosq *icosq); void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c index 6c72b592315b..a01e2de2488f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c @@ -90,7 +90,7 @@ static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx) goto out; mlx5e_reset_icosq_cc_pc(icosq); - mlx5e_free_rx_descs(rq); + mlx5e_free_rx_in_progress_descs(rq); clear_bit(MLX5E_SQ_STATE_RECOVERING, &icosq->state); mlx5e_activate_icosq(icosq); mlx5e_activate_rq(rq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 21de4764d4c0..4ef3dc79f73c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -813,6 +813,29 @@ int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time) return -ETIMEDOUT; } +void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq) +{ + struct mlx5_wq_ll *wq; + u16 head; + int i; + + if (rq->wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) + return; + + wq = &rq->mpwqe.wq; + head = wq->head; + + /* Outstanding UMR WQEs (in progress) start at wq->head */ + for (i = 0; i < rq->mpwqe.umr_in_progress; i++) { + rq->dealloc_wqe(rq, head); + head = mlx5_wq_ll_get_wqe_next_ix(wq, head); + } + + rq->mpwqe.actual_wq_head = wq->head; + rq->mpwqe.umr_in_progress = 0; + rq->mpwqe.umr_completed = 0; +} + void mlx5e_free_rx_descs(struct mlx5e_rq *rq) { __be16 wqe_ix_be; @@ -820,14 +843,8 @@ void mlx5e_free_rx_descs(struct mlx5e_rq *rq) if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { struct mlx5_wq_ll *wq = &rq->mpwqe.wq; - u16 head = wq->head; - int i; - /* Outstanding UMR WQEs (in progress) start at wq->head */ - for (i = 0; i < rq->mpwqe.umr_in_progress; i++) { - rq->dealloc_wqe(rq, head); - head = mlx5_wq_ll_get_wqe_next_ix(wq, head); - } + mlx5e_free_rx_in_progress_descs(rq); while (!mlx5_wq_ll_is_empty(wq)) { struct mlx5e_rx_wqe_ll *wqe; From 187a9830c921d92c4a9a8e2921ecc4b35a97532c Mon Sep 17 00:00:00 2001 From: Aya Levin Date: Thu, 19 Mar 2020 13:25:17 +0200 Subject: [PATCH 129/138] net/mlx5e: Do not recover from a non-fatal syndrome For non-fatal syndromes like LOCAL_LENGTH_ERR, recovery shouldn't be triggered. In these scenarios, the RQ is not actually in ERR state. This misleads the recovery flow which assumes that the RQ is really in error state and no more completions arrive, causing crashes on bad page state. Fixes: 8276ea1353a4 ("net/mlx5e: Report and recover from CQE with error on RQ") Signed-off-by: Aya Levin Reviewed-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en/health.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h index d3693fa547ac..e54f70d9af22 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h @@ -10,8 +10,7 @@ static inline bool cqe_syndrome_needs_recover(u8 syndrome) { - return syndrome == MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR || - syndrome == MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR || + return syndrome == MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR || syndrome == MLX5_CQE_SYNDROME_LOCAL_PROT_ERR || syndrome == MLX5_CQE_SYNDROME_WR_FLUSH_ERR; } From 961d0e5b32946703125964f9f5b6321d60f4d706 Mon Sep 17 00:00:00 2001 From: Zh-yuan Ye Date: Tue, 24 Mar 2020 17:28:25 +0900 Subject: [PATCH 130/138] net: cbs: Fix software cbs to consider packet sending time Currently the software CBS does not consider the packet sending time when depleting the credits. It caused the throughput to be Idleslope[kbps] * (Port transmit rate[kbps] / |Sendslope[kbps]|) where Idleslope * (Port transmit rate / (Idleslope + |Sendslope|)) = Idleslope is expected. In order to fix the issue above, this patch takes the time when the packet sending completes into account by moving the anchor time variable "last" ahead to the send completion time upon transmission and adding wait when the next dequeue request comes before the send completion time of the previous packet. changelog: V2->V3: - remove unnecessary whitespace cleanup - add the checks if port_rate is 0 before division V1->V2: - combine variable "send_completed" into "last" - add the comment for estimate of the packet sending Fixes: 585d763af09c ("net/sched: Introduce Credit Based Shaper (CBS) qdisc") Signed-off-by: Zh-yuan Ye Reviewed-by: Vinicius Costa Gomes Signed-off-by: David S. Miller --- net/sched/sch_cbs.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/net/sched/sch_cbs.c b/net/sched/sch_cbs.c index b2905b03a432..2eaac2ff380f 100644 --- a/net/sched/sch_cbs.c +++ b/net/sched/sch_cbs.c @@ -181,6 +181,11 @@ static struct sk_buff *cbs_dequeue_soft(struct Qdisc *sch) s64 credits; int len; + /* The previous packet is still being sent */ + if (now < q->last) { + qdisc_watchdog_schedule_ns(&q->watchdog, q->last); + return NULL; + } if (q->credits < 0) { credits = timediff_to_credits(now - q->last, q->idleslope); @@ -212,7 +217,12 @@ static struct sk_buff *cbs_dequeue_soft(struct Qdisc *sch) credits += q->credits; q->credits = max_t(s64, credits, q->locredit); - q->last = now; + /* Estimate of the transmission of the last byte of the packet in ns */ + if (unlikely(atomic64_read(&q->port_rate) == 0)) + q->last = now; + else + q->last = now + div64_s64(len * NSEC_PER_SEC, + atomic64_read(&q->port_rate)); return skb; } From e80f40cbe4dd51371818e967d40da8fe305db5e4 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Tue, 24 Mar 2020 11:45:34 +0200 Subject: [PATCH 131/138] net: dsa: tag_8021q: replace dsa_8021q_remove_header with __skb_vlan_pop Not only did this wheel did not need reinventing, but there is also an issue with it: It doesn't remove the VLAN header in a way that preserves the L2 payload checksum when that is being provided by the DSA master hw. It should recalculate checksum both for the push, before removing the header, and for the pull afterwards. But the current implementation is quite dizzying, with pulls followed immediately afterwards by pushes, the memmove is done before the push, etc. This makes a DSA master with RX checksumming offload to print stack traces with the infamous 'hw csum failure' message. So remove the dsa_8021q_remove_header function and replace it with something that actually works with inet checksumming. Fixes: d461933638ae ("net: dsa: tag_8021q: Create helper function for removing VLAN header") Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- include/linux/dsa/8021q.h | 7 ------- net/dsa/tag_8021q.c | 43 --------------------------------------- net/dsa/tag_sja1105.c | 19 ++++++++--------- 3 files changed, 9 insertions(+), 60 deletions(-) diff --git a/include/linux/dsa/8021q.h b/include/linux/dsa/8021q.h index 0aa803c451a3..c620d9139c28 100644 --- a/include/linux/dsa/8021q.h +++ b/include/linux/dsa/8021q.h @@ -28,8 +28,6 @@ int dsa_8021q_rx_switch_id(u16 vid); int dsa_8021q_rx_source_port(u16 vid); -struct sk_buff *dsa_8021q_remove_header(struct sk_buff *skb); - #else int dsa_port_setup_8021q_tagging(struct dsa_switch *ds, int index, @@ -64,11 +62,6 @@ int dsa_8021q_rx_source_port(u16 vid) return 0; } -struct sk_buff *dsa_8021q_remove_header(struct sk_buff *skb) -{ - return NULL; -} - #endif /* IS_ENABLED(CONFIG_NET_DSA_TAG_8021Q) */ #endif /* _NET_DSA_8021Q_H */ diff --git a/net/dsa/tag_8021q.c b/net/dsa/tag_8021q.c index 2fb6c26294b5..b97ad93d1c1a 100644 --- a/net/dsa/tag_8021q.c +++ b/net/dsa/tag_8021q.c @@ -298,47 +298,4 @@ struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev, } EXPORT_SYMBOL_GPL(dsa_8021q_xmit); -/* In the DSA packet_type handler, skb->data points in the middle of the VLAN - * tag, after tpid and before tci. This is because so far, ETH_HLEN - * (DMAC, SMAC, EtherType) bytes were pulled. - * There are 2 bytes of VLAN tag left in skb->data, and upper - * layers expect the 'real' EtherType to be consumed as well. - * Coincidentally, a VLAN header is also of the same size as - * the number of bytes that need to be pulled. - * - * skb_mac_header skb->data - * | | - * v v - * | | | | | | | | | | | | | | | | | | | - * +-----------------------+-----------------------+-------+-------+-------+ - * | Destination MAC | Source MAC | TPID | TCI | EType | - * +-----------------------+-----------------------+-------+-------+-------+ - * ^ | | - * |<--VLAN_HLEN-->to <---VLAN_HLEN---> - * from | - * >>>>>>> v - * >>>>>>> | | | | | | | | | | | | | | | - * >>>>>>> +-----------------------+-----------------------+-------+ - * >>>>>>> | Destination MAC | Source MAC | EType | - * +-----------------------+-----------------------+-------+ - * ^ ^ - * (now part of | | - * skb->head) skb_mac_header skb->data - */ -struct sk_buff *dsa_8021q_remove_header(struct sk_buff *skb) -{ - u8 *from = skb_mac_header(skb); - u8 *dest = from + VLAN_HLEN; - - memmove(dest, from, ETH_HLEN - VLAN_HLEN); - skb_pull(skb, VLAN_HLEN); - skb_push(skb, ETH_HLEN); - skb_reset_mac_header(skb); - skb_reset_mac_len(skb); - skb_pull_rcsum(skb, ETH_HLEN); - - return skb; -} -EXPORT_SYMBOL_GPL(dsa_8021q_remove_header); - MODULE_LICENSE("GPL v2"); diff --git a/net/dsa/tag_sja1105.c b/net/dsa/tag_sja1105.c index 5366ea430349..d553bf36bd41 100644 --- a/net/dsa/tag_sja1105.c +++ b/net/dsa/tag_sja1105.c @@ -250,14 +250,14 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb, { struct sja1105_meta meta = {0}; int source_port, switch_id; - struct vlan_ethhdr *hdr; + struct ethhdr *hdr; u16 tpid, vid, tci; bool is_link_local; bool is_tagged; bool is_meta; - hdr = vlan_eth_hdr(skb); - tpid = ntohs(hdr->h_vlan_proto); + hdr = eth_hdr(skb); + tpid = ntohs(hdr->h_proto); is_tagged = (tpid == ETH_P_SJA1105); is_link_local = sja1105_is_link_local(skb); is_meta = sja1105_is_meta_frame(skb); @@ -266,7 +266,12 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb, if (is_tagged) { /* Normal traffic path. */ - tci = ntohs(hdr->h_vlan_TCI); + skb_push_rcsum(skb, ETH_HLEN); + __skb_vlan_pop(skb, &tci); + skb_pull_rcsum(skb, ETH_HLEN); + skb_reset_network_header(skb); + skb_reset_transport_header(skb); + vid = tci & VLAN_VID_MASK; source_port = dsa_8021q_rx_source_port(vid); switch_id = dsa_8021q_rx_switch_id(vid); @@ -295,12 +300,6 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb, return NULL; } - /* Delete/overwrite fake VLAN header, DSA expects to not find - * it there, see dsa_switch_rcv: skb_push(skb, ETH_HLEN). - */ - if (is_tagged) - skb = dsa_8021q_remove_header(skb); - return sja1105_rcv_meta_state_machine(skb, &meta, is_link_local, is_meta); } From 50e0d28d3808146cc19b0d5564ef4ba9e5bf3846 Mon Sep 17 00:00:00 2001 From: Raju Rangoju Date: Tue, 24 Mar 2020 17:10:00 +0530 Subject: [PATCH 132/138] cxgb4/ptp: pass the sign of offset delta in FW CMD cxgb4_ptp_fineadjtime() doesn't pass the signedness of offset delta in FW_PTP_CMD. Fix it by passing correct sign. Signed-off-by: Raju Rangoju Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c index 58a039c3224a..af1f40cbccc8 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c @@ -246,6 +246,9 @@ static int cxgb4_ptp_fineadjtime(struct adapter *adapter, s64 delta) FW_PTP_CMD_PORTID_V(0)); c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16)); c.u.ts.sc = FW_PTP_SC_ADJ_FTIME; + c.u.ts.sign = (delta < 0) ? 1 : 0; + if (delta < 0) + delta = -delta; c.u.ts.tm = cpu_to_be64(delta); err = t4_wr_mbox(adapter, adapter->mbox, &c, sizeof(c), NULL); From c312c7818b86b663d32ec5d4b512abf06b23899a Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Tue, 24 Mar 2020 16:10:10 +0000 Subject: [PATCH 133/138] net: phy: mdio-bcm-unimac: Fix clock handling The DT binding for this PHY describes an *optional* clock property. Due to a bug in the error handling logic, we are actually ignoring this clock *all* of the time so far. Fix this by using devm_clk_get_optional() to handle this clock properly. Fixes: b78ac6ecd1b6b ("net: phy: mdio-bcm-unimac: Allow configuring MDIO clock divider") Signed-off-by: Andre Przywara Reviewed-by: Andrew Lunn Acked-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/mdio-bcm-unimac.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/phy/mdio-bcm-unimac.c index 4a28fb29adaa..fbd36891ee64 100644 --- a/drivers/net/phy/mdio-bcm-unimac.c +++ b/drivers/net/phy/mdio-bcm-unimac.c @@ -242,11 +242,9 @@ static int unimac_mdio_probe(struct platform_device *pdev) return -ENOMEM; } - priv->clk = devm_clk_get(&pdev->dev, NULL); - if (PTR_ERR(priv->clk) == -EPROBE_DEFER) + priv->clk = devm_clk_get_optional(&pdev->dev, NULL); + if (IS_ERR(priv->clk)) return PTR_ERR(priv->clk); - else - priv->clk = NULL; ret = clk_prepare_enable(priv->clk); if (ret) From f13bc68131b0c0d67a77fb43444e109828a983bf Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Tue, 24 Mar 2020 20:58:29 +0100 Subject: [PATCH 134/138] r8169: re-enable MSI on RTL8168c The original change fixed an issue on RTL8168b by mimicking the vendor driver behavior to disable MSI on chip versions before RTL8168d. This however now caused an issue on a system with RTL8168c, see [0]. Therefore leave MSI disabled on RTL8168b, but re-enable it on RTL8168c. [0] https://bugzilla.redhat.com/show_bug.cgi?id=1792839 Fixes: 003bd5b4a7b4 ("r8169: don't use MSI before RTL8168d") Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index a2168a14794c..a9bdafd15a35 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -5194,7 +5194,7 @@ static int rtl_alloc_irq(struct rtl8169_private *tp) RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable); rtl_lock_config_regs(tp); /* fall through */ - case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_24: + case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_17: flags = PCI_IRQ_LEGACY; break; default: From 919a23e9d6ccf8b30e6b4c7aa36f8904c99d2dd7 Mon Sep 17 00:00:00 2001 From: Hangbin Liu Date: Wed, 25 Mar 2020 16:07:01 +0800 Subject: [PATCH 135/138] selftests/net: add missing tests to Makefile Find some tests are missed in Makefile by running: for file in $(ls *.sh); do grep -q $file Makefile || echo $file; done Signed-off-by: Hangbin Liu Reviewed-by: David Ahern Signed-off-by: David S. Miller --- tools/testing/selftests/net/Makefile | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile index 287ae916ec0b..4c1bd03ffa1c 100644 --- a/tools/testing/selftests/net/Makefile +++ b/tools/testing/selftests/net/Makefile @@ -11,7 +11,9 @@ TEST_PROGS += udpgso_bench.sh fib_rule_tests.sh msg_zerocopy.sh psock_snd.sh TEST_PROGS += udpgro_bench.sh udpgro.sh test_vxlan_under_vrf.sh reuseport_addr_any.sh TEST_PROGS += test_vxlan_fdb_changelink.sh so_txtime.sh ipv6_flowlabel.sh TEST_PROGS += tcp_fastopen_backup_key.sh fcnal-test.sh l2tp.sh traceroute.sh -TEST_PROGS += fin_ack_lat.sh +TEST_PROGS += fin_ack_lat.sh fib_nexthop_multiprefix.sh fib_nexthops.sh +TEST_PROGS += altnames.sh icmp_redirect.sh ip6_gre_headroom.sh +TEST_PROGS += route_localnet.sh TEST_PROGS_EXTENDED := in_netns.sh TEST_GEN_FILES = socket nettest TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy reuseport_addr_any From c085dbfb1cfcf74e2ef2ef435291e7e63f046d6a Mon Sep 17 00:00:00 2001 From: Hangbin Liu Date: Wed, 25 Mar 2020 16:41:01 +0800 Subject: [PATCH 136/138] selftests/net/forwarding: define libs as TEST_PROGS_EXTENDED The lib files should not be defined as TEST_PROGS, or we will run them in run_kselftest.sh. Also remove ethtool_lib.sh exec permission. Fixes: 81573b18f26d ("selftests/net/forwarding: add Makefile to install tests") Signed-off-by: Hangbin Liu Signed-off-by: David S. Miller --- .../testing/selftests/net/forwarding/Makefile | 31 ++++++++++--------- .../selftests/net/forwarding/ethtool_lib.sh | 0 2 files changed, 16 insertions(+), 15 deletions(-) mode change 100755 => 100644 tools/testing/selftests/net/forwarding/ethtool_lib.sh diff --git a/tools/testing/selftests/net/forwarding/Makefile b/tools/testing/selftests/net/forwarding/Makefile index 44616103508b..250fbb2d1625 100644 --- a/tools/testing/selftests/net/forwarding/Makefile +++ b/tools/testing/selftests/net/forwarding/Makefile @@ -5,11 +5,7 @@ TEST_PROGS = bridge_igmp.sh \ bridge_sticky_fdb.sh \ bridge_vlan_aware.sh \ bridge_vlan_unaware.sh \ - devlink_lib.sh \ - ethtool_lib.sh \ ethtool.sh \ - fib_offload_lib.sh \ - forwarding.config.sample \ gre_inner_v4_multipath.sh \ gre_inner_v6_multipath.sh \ gre_multipath.sh \ @@ -21,8 +17,6 @@ TEST_PROGS = bridge_igmp.sh \ ipip_hier_gre_key.sh \ ipip_hier_gre_keys.sh \ ipip_hier_gre.sh \ - ipip_lib.sh \ - lib.sh \ loopback.sh \ mirror_gre_bound.sh \ mirror_gre_bridge_1d.sh \ @@ -32,15 +26,11 @@ TEST_PROGS = bridge_igmp.sh \ mirror_gre_changes.sh \ mirror_gre_flower.sh \ mirror_gre_lag_lacp.sh \ - mirror_gre_lib.sh \ mirror_gre_neigh.sh \ mirror_gre_nh.sh \ mirror_gre.sh \ - mirror_gre_topo_lib.sh \ mirror_gre_vlan_bridge_1q.sh \ mirror_gre_vlan.sh \ - mirror_lib.sh \ - mirror_topo_lib.sh \ mirror_vlan.sh \ router_bridge.sh \ router_bridge_vlan.sh \ @@ -50,17 +40,12 @@ TEST_PROGS = bridge_igmp.sh \ router_multipath.sh \ router.sh \ router_vid_1.sh \ - sch_ets_core.sh \ sch_ets.sh \ - sch_ets_tests.sh \ - sch_tbf_core.sh \ - sch_tbf_etsprio.sh \ sch_tbf_ets.sh \ sch_tbf_prio.sh \ sch_tbf_root.sh \ tc_actions.sh \ tc_chains.sh \ - tc_common.sh \ tc_flower_router.sh \ tc_flower.sh \ tc_shblocks.sh \ @@ -72,4 +57,20 @@ TEST_PROGS = bridge_igmp.sh \ vxlan_bridge_1q.sh \ vxlan_symmetric.sh +TEST_PROGS_EXTENDED := devlink_lib.sh \ + ethtool_lib.sh \ + fib_offload_lib.sh \ + forwarding.config.sample \ + ipip_lib.sh \ + lib.sh \ + mirror_gre_lib.sh \ + mirror_gre_topo_lib.sh \ + mirror_lib.sh \ + mirror_topo_lib.sh \ + sch_ets_core.sh \ + sch_ets_tests.sh \ + sch_tbf_core.sh \ + sch_tbf_etsprio.sh \ + tc_common.sh + include ../../lib.mk diff --git a/tools/testing/selftests/net/forwarding/ethtool_lib.sh b/tools/testing/selftests/net/forwarding/ethtool_lib.sh old mode 100755 new mode 100644 From 428c491332bca498c8eb2127669af51506c346c7 Mon Sep 17 00:00:00 2001 From: "Guilherme G. Piccoli" Date: Fri, 20 Mar 2020 09:55:34 -0300 Subject: [PATCH 137/138] net: ena: Add PCI shutdown handler to allow safe kexec Currently ENA only provides the PCI remove() handler, used during rmmod for example. This is not called on shutdown/kexec path; we are potentially creating a failure scenario on kexec: (a) Kexec is triggered, no shutdown() / remove() handler is called for ENA; instead pci_device_shutdown() clears the master bit of the PCI device, stopping all DMA transactions; (b) Kexec reboot happens and the device gets enabled again, likely having its FW with that DMA transaction buffered; then it may trigger the (now invalid) memory operation in the new kernel, corrupting kernel memory area. This patch aims to prevent this, by implementing a shutdown() handler quite similar to the remove() one - the difference being the handling of the netdev, which is unregistered on remove(), but following the convention observed in other drivers, it's only detached on shutdown(). This prevents an odd issue in AWS Nitro instances, in which after the 2nd kexec the next one will fail with an initrd corruption, caused by a wild DMA write to invalid kernel memory. The lspci output for the adapter present in my instance is: 00:05.0 Ethernet controller [0200]: Amazon.com, Inc. Elastic Network Adapter (ENA) [1d0f:ec20] Suggested-by: Gavin Shan Signed-off-by: Guilherme G. Piccoli Acked-by: Sameeh Jubran Signed-off-by: David S. Miller --- drivers/net/ethernet/amazon/ena/ena_netdev.c | 51 ++++++++++++++++---- 1 file changed, 41 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 4647d7656761..cada6e7e30f4 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -4336,13 +4336,15 @@ err_disable_device: /*****************************************************************************/ -/* ena_remove - Device Removal Routine +/* __ena_shutoff - Helper used in both PCI remove/shutdown routines * @pdev: PCI device information struct + * @shutdown: Is it a shutdown operation? If false, means it is a removal * - * ena_remove is called by the PCI subsystem to alert the driver - * that it should release a PCI device. + * __ena_shutoff is a helper routine that does the real work on shutdown and + * removal paths; the difference between those paths is with regards to whether + * dettach or unregister the netdevice. */ -static void ena_remove(struct pci_dev *pdev) +static void __ena_shutoff(struct pci_dev *pdev, bool shutdown) { struct ena_adapter *adapter = pci_get_drvdata(pdev); struct ena_com_dev *ena_dev; @@ -4361,13 +4363,17 @@ static void ena_remove(struct pci_dev *pdev) cancel_work_sync(&adapter->reset_task); - rtnl_lock(); + rtnl_lock(); /* lock released inside the below if-else block */ ena_destroy_device(adapter, true); - rtnl_unlock(); - - unregister_netdev(netdev); - - free_netdev(netdev); + if (shutdown) { + netif_device_detach(netdev); + dev_close(netdev); + rtnl_unlock(); + } else { + rtnl_unlock(); + unregister_netdev(netdev); + free_netdev(netdev); + } ena_com_rss_destroy(ena_dev); @@ -4382,6 +4388,30 @@ static void ena_remove(struct pci_dev *pdev) vfree(ena_dev); } +/* ena_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * ena_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. + */ + +static void ena_remove(struct pci_dev *pdev) +{ + __ena_shutoff(pdev, false); +} + +/* ena_shutdown - Device Shutdown Routine + * @pdev: PCI device information struct + * + * ena_shutdown is called by the PCI subsystem to alert the driver that + * a shutdown/reboot (or kexec) is happening and device must be disabled. + */ + +static void ena_shutdown(struct pci_dev *pdev) +{ + __ena_shutoff(pdev, true); +} + #ifdef CONFIG_PM /* ena_suspend - PM suspend callback * @pdev: PCI device information struct @@ -4431,6 +4461,7 @@ static struct pci_driver ena_pci_driver = { .id_table = ena_pci_tbl, .probe = ena_probe, .remove = ena_remove, + .shutdown = ena_shutdown, #ifdef CONFIG_PM .suspend = ena_suspend, .resume = ena_resume, From 2c64605b590edadb3fb46d1ec6badb49e940b479 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Wed, 25 Mar 2020 13:47:18 +0100 Subject: [PATCH 138/138] net: Fix CONFIG_NET_CLS_ACT=n and CONFIG_NFT_FWD_NETDEV={y, m} build MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit net/netfilter/nft_fwd_netdev.c: In function ‘nft_fwd_netdev_eval’: net/netfilter/nft_fwd_netdev.c:32:10: error: ‘struct sk_buff’ has no member named ‘tc_redirected’ pkt->skb->tc_redirected = 1; ^~ net/netfilter/nft_fwd_netdev.c:33:10: error: ‘struct sk_buff’ has no member named ‘tc_from_ingress’ pkt->skb->tc_from_ingress = 1; ^~ To avoid a direct dependency with tc actions from netfilter, wrap the redirect bits around CONFIG_NET_REDIRECT and move helpers to include/linux/skbuff.h. Turn on this toggle from the ifb driver, the only existing client of these bits in the tree. This patch adds skb_set_redirected() that sets on the redirected bit on the skbuff, it specifies if the packet was redirect from ingress and resets the timestamp (timestamp reset was originally missing in the netfilter bugfix). Fixes: bcfabee1afd99484 ("netfilter: nft_fwd_netdev: allow to redirect to ifb via ingress") Reported-by: noreply@ellerman.id.au Reported-by: Geert Uytterhoeven Signed-off-by: Pablo Neira Ayuso Signed-off-by: David S. Miller --- drivers/net/Kconfig | 1 + drivers/net/ifb.c | 6 +++--- drivers/net/wireguard/queueing.h | 2 +- include/linux/skbuff.h | 36 ++++++++++++++++++++++++++++---- include/net/sch_generic.h | 16 -------------- net/Kconfig | 3 +++ net/core/dev.c | 4 ++-- net/core/pktgen.c | 2 +- net/netfilter/nft_fwd_netdev.c | 5 ++--- net/sched/act_mirred.c | 6 ++---- 10 files changed, 47 insertions(+), 34 deletions(-) diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 25a8f9387d5a..db8884ad6d40 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -149,6 +149,7 @@ config NET_FC config IFB tristate "Intermediate Functional Block support" depends on NET_CLS_ACT + select NET_REDIRECT ---help--- This is an intermediate driver that allows sharing of resources. diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c index 242b9b0943f8..7fe306e76281 100644 --- a/drivers/net/ifb.c +++ b/drivers/net/ifb.c @@ -75,7 +75,7 @@ static void ifb_ri_tasklet(unsigned long _txp) } while ((skb = __skb_dequeue(&txp->tq)) != NULL) { - skb->tc_redirected = 0; + skb->redirected = 0; skb->tc_skip_classify = 1; u64_stats_update_begin(&txp->tsync); @@ -96,7 +96,7 @@ static void ifb_ri_tasklet(unsigned long _txp) rcu_read_unlock(); skb->skb_iif = txp->dev->ifindex; - if (!skb->tc_from_ingress) { + if (!skb->from_ingress) { dev_queue_xmit(skb); } else { skb_pull_rcsum(skb, skb->mac_len); @@ -243,7 +243,7 @@ static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev) txp->rx_bytes += skb->len; u64_stats_update_end(&txp->rsync); - if (!skb->tc_redirected || !skb->skb_iif) { + if (!skb->redirected || !skb->skb_iif) { dev_kfree_skb(skb); dev->stats.rx_dropped++; return NETDEV_TX_OK; diff --git a/drivers/net/wireguard/queueing.h b/drivers/net/wireguard/queueing.h index cf1e0e2376d8..3432232afe06 100644 --- a/drivers/net/wireguard/queueing.h +++ b/drivers/net/wireguard/queueing.h @@ -100,8 +100,8 @@ static inline void wg_reset_packet(struct sk_buff *skb) skb->dev = NULL; #ifdef CONFIG_NET_SCHED skb->tc_index = 0; - skb_reset_tc(skb); #endif + skb_reset_redirect(skb); skb->hdr_len = skb_headroom(skb); skb_reset_mac_header(skb); skb_reset_network_header(skb); diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 5b50278c4bc8..e59620234415 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -645,8 +645,8 @@ typedef unsigned char *sk_buff_data_t; * @offload_l3_fwd_mark: Packet was L3-forwarded in hardware * @tc_skip_classify: do not classify packet. set by IFB device * @tc_at_ingress: used within tc_classify to distinguish in/egress - * @tc_redirected: packet was redirected by a tc action - * @tc_from_ingress: if tc_redirected, tc_at_ingress at time of redirect + * @redirected: packet was redirected by packet classifier + * @from_ingress: packet was redirected from the ingress path * @peeked: this packet has been seen already, so stats have been * done for it, don't do them again * @nf_trace: netfilter packet trace flag @@ -848,8 +848,10 @@ struct sk_buff { #ifdef CONFIG_NET_CLS_ACT __u8 tc_skip_classify:1; __u8 tc_at_ingress:1; - __u8 tc_redirected:1; - __u8 tc_from_ingress:1; +#endif +#ifdef CONFIG_NET_REDIRECT + __u8 redirected:1; + __u8 from_ingress:1; #endif #ifdef CONFIG_TLS_DEVICE __u8 decrypted:1; @@ -4579,5 +4581,31 @@ static inline __wsum lco_csum(struct sk_buff *skb) return csum_partial(l4_hdr, csum_start - l4_hdr, partial); } +static inline bool skb_is_redirected(const struct sk_buff *skb) +{ +#ifdef CONFIG_NET_REDIRECT + return skb->redirected; +#else + return false; +#endif +} + +static inline void skb_set_redirected(struct sk_buff *skb, bool from_ingress) +{ +#ifdef CONFIG_NET_REDIRECT + skb->redirected = 1; + skb->from_ingress = from_ingress; + if (skb->from_ingress) + skb->tstamp = 0; +#endif +} + +static inline void skb_reset_redirect(struct sk_buff *skb) +{ +#ifdef CONFIG_NET_REDIRECT + skb->redirected = 0; +#endif +} + #endif /* __KERNEL__ */ #endif /* _LINUX_SKBUFF_H */ diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 151208704ed2..c30f914867e6 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -675,22 +675,6 @@ void __qdisc_calculate_pkt_len(struct sk_buff *skb, const struct qdisc_size_table *stab); int skb_do_redirect(struct sk_buff *); -static inline void skb_reset_tc(struct sk_buff *skb) -{ -#ifdef CONFIG_NET_CLS_ACT - skb->tc_redirected = 0; -#endif -} - -static inline bool skb_is_tc_redirected(const struct sk_buff *skb) -{ -#ifdef CONFIG_NET_CLS_ACT - return skb->tc_redirected; -#else - return false; -#endif -} - static inline bool skb_at_tc_ingress(const struct sk_buff *skb) { #ifdef CONFIG_NET_CLS_ACT diff --git a/net/Kconfig b/net/Kconfig index 2eeb0e55f7c9..df8d8c9bd021 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -52,6 +52,9 @@ config NET_INGRESS config NET_EGRESS bool +config NET_REDIRECT + bool + config SKB_EXTENSIONS bool diff --git a/net/core/dev.c b/net/core/dev.c index 402a986659cf..500bba8874b0 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4516,7 +4516,7 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb, /* Reinjected packets coming from act_mirred or similar should * not get XDP generic processing. */ - if (skb_is_tc_redirected(skb)) + if (skb_is_redirected(skb)) return XDP_PASS; /* XDP packets must be linear and must have sufficient headroom @@ -5063,7 +5063,7 @@ skip_taps: goto out; } #endif - skb_reset_tc(skb); + skb_reset_redirect(skb); skip_classify: if (pfmemalloc && !skb_pfmemalloc_protocol(skb)) goto drop; diff --git a/net/core/pktgen.c b/net/core/pktgen.c index acc849df60b5..d0641bba6b81 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -3362,7 +3362,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev) /* skb was 'freed' by stack, so clean few * bits and reuse it */ - skb_reset_tc(skb); + skb_reset_redirect(skb); } while (--burst > 0); goto out; /* Skips xmit_mode M_START_XMIT */ } else if (pkt_dev->xmit_mode == M_QUEUE_XMIT) { diff --git a/net/netfilter/nft_fwd_netdev.c b/net/netfilter/nft_fwd_netdev.c index 74f050ba6bad..3087e23297db 100644 --- a/net/netfilter/nft_fwd_netdev.c +++ b/net/netfilter/nft_fwd_netdev.c @@ -28,9 +28,8 @@ static void nft_fwd_netdev_eval(const struct nft_expr *expr, struct nft_fwd_netdev *priv = nft_expr_priv(expr); int oif = regs->data[priv->sreg_dev]; - /* These are used by ifb only. */ - pkt->skb->tc_redirected = 1; - pkt->skb->tc_from_ingress = 1; + /* This is used by ifb only. */ + skb_set_redirected(pkt->skb, true); nf_fwd_netdev_egress(pkt, oif); regs->verdict.code = NF_STOLEN; diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 1ad300e6dbc0..83dd82fc9f40 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -284,10 +284,8 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a, /* mirror is always swallowed */ if (is_redirect) { - skb2->tc_redirected = 1; - skb2->tc_from_ingress = skb2->tc_at_ingress; - if (skb2->tc_from_ingress) - skb2->tstamp = 0; + skb_set_redirected(skb2, skb2->tc_at_ingress); + /* let's the caller reinsert the packet, if possible */ if (use_reinsert) { res->ingress = want_ingress;