net: rfs: fix crash in get_rps_cpus()

Commit 567e4b7973 ("net: rfs: add hash collision detection") had one
mistake :

RPS_NO_CPU is no longer the marker for invalid cpu in set_rps_cpu()
and get_rps_cpu(), as @next_cpu was the result of an AND with
rps_cpu_mask

This bug showed up on a host with 72 cpus :
next_cpu was 0x7f, and the code was trying to access percpu data of an
non existent cpu.

In a follow up patch, we might get rid of compares against nr_cpu_ids,
if we init the tables with 0. This is silly to test for a very unlikely
condition that exists only shortly after table initialization, as
we got rid of rps_reset_sock_flow() and similar functions that were
writing this RPS_NO_CPU magic value at flow dismantle : When table is
old enough, it never contains this value anymore.

Fixes: 567e4b7973 ("net: rfs: add hash collision detection")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Tom Herbert <tom@herbertland.com>
Cc: Ben Hutchings <ben@decadent.org.uk>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Eric Dumazet 2015-04-25 09:35:24 -07:00 committed by David S. Miller
parent 7cdbc6f74f
commit a31196b07f
2 changed files with 7 additions and 7 deletions

View File

@ -282,7 +282,7 @@ following is true:
- The current CPU's queue head counter >= the recorded tail counter - The current CPU's queue head counter >= the recorded tail counter
value in rps_dev_flow[i] value in rps_dev_flow[i]
- The current CPU is unset (equal to RPS_NO_CPU) - The current CPU is unset (>= nr_cpu_ids)
- The current CPU is offline - The current CPU is offline
After this check, the packet is sent to the (possibly updated) current After this check, the packet is sent to the (possibly updated) current

View File

@ -3079,7 +3079,7 @@ static struct rps_dev_flow *
set_rps_cpu(struct net_device *dev, struct sk_buff *skb, set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
struct rps_dev_flow *rflow, u16 next_cpu) struct rps_dev_flow *rflow, u16 next_cpu)
{ {
if (next_cpu != RPS_NO_CPU) { if (next_cpu < nr_cpu_ids) {
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
struct netdev_rx_queue *rxqueue; struct netdev_rx_queue *rxqueue;
struct rps_dev_flow_table *flow_table; struct rps_dev_flow_table *flow_table;
@ -3184,7 +3184,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
* If the desired CPU (where last recvmsg was done) is * If the desired CPU (where last recvmsg was done) is
* different from current CPU (one in the rx-queue flow * different from current CPU (one in the rx-queue flow
* table entry), switch if one of the following holds: * table entry), switch if one of the following holds:
* - Current CPU is unset (equal to RPS_NO_CPU). * - Current CPU is unset (>= nr_cpu_ids).
* - Current CPU is offline. * - Current CPU is offline.
* - The current CPU's queue tail has advanced beyond the * - The current CPU's queue tail has advanced beyond the
* last packet that was enqueued using this table entry. * last packet that was enqueued using this table entry.
@ -3192,14 +3192,14 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
* have been dequeued, thus preserving in order delivery. * have been dequeued, thus preserving in order delivery.
*/ */
if (unlikely(tcpu != next_cpu) && if (unlikely(tcpu != next_cpu) &&
(tcpu == RPS_NO_CPU || !cpu_online(tcpu) || (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
((int)(per_cpu(softnet_data, tcpu).input_queue_head - ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
rflow->last_qtail)) >= 0)) { rflow->last_qtail)) >= 0)) {
tcpu = next_cpu; tcpu = next_cpu;
rflow = set_rps_cpu(dev, skb, rflow, next_cpu); rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
} }
if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) { if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
*rflowp = rflow; *rflowp = rflow;
cpu = tcpu; cpu = tcpu;
goto done; goto done;
@ -3240,14 +3240,14 @@ bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
struct rps_dev_flow_table *flow_table; struct rps_dev_flow_table *flow_table;
struct rps_dev_flow *rflow; struct rps_dev_flow *rflow;
bool expire = true; bool expire = true;
int cpu; unsigned int cpu;
rcu_read_lock(); rcu_read_lock();
flow_table = rcu_dereference(rxqueue->rps_flow_table); flow_table = rcu_dereference(rxqueue->rps_flow_table);
if (flow_table && flow_id <= flow_table->mask) { if (flow_table && flow_id <= flow_table->mask) {
rflow = &flow_table->flows[flow_id]; rflow = &flow_table->flows[flow_id];
cpu = ACCESS_ONCE(rflow->cpu); cpu = ACCESS_ONCE(rflow->cpu);
if (rflow->filter == filter_id && cpu != RPS_NO_CPU && if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
((int)(per_cpu(softnet_data, cpu).input_queue_head - ((int)(per_cpu(softnet_data, cpu).input_queue_head -
rflow->last_qtail) < rflow->last_qtail) <
(int)(10 * flow_table->mask))) (int)(10 * flow_table->mask)))