rxrpc: Fix net namespace cleanup
In rxrpc_destroy_all_calls(), there are two phases: (1) make sure the
->calls list is empty, emitting error messages if not, and (2) wait for the
RCU cleanup to happen on outstanding calls (ie. ->nr_calls becomes 0).
To avoid taking the call_lock, the function prechecks ->calls and if empty,
it returns to avoid taking the lock - this is wrong, however: it still
needs to go and do the second phase and wait for ->nr_calls to become 0.
Without this, the rxrpc_net struct may get deallocated before we get to the
RCU cleanup for the last calls. This can lead to:
Slab corruption (Not tainted): kmalloc-16k start=ffff88802b178000, len=16384
050: 6b 6b 6b 6b 6b 6b 6b 6b 61 6b 6b 6b 6b 6b 6b 6b kkkkkkkkakkkkkkk
Note the "61" at offset 0x58. This corresponds to the ->nr_calls member of
struct rxrpc_net (which is >9k in size, and thus allocated out of the 16k
slab).
Fix this by flipping the condition on the if-statement, putting the locked
section inside the if-body and dropping the return from there. The
function will then always go on to wait for the RCU cleanup on outstanding
calls.
Fixes: 2baec2c3f8
("rxrpc: Support network namespacing")
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
b145745fc8
commit
b13023421b
|
@ -604,30 +604,30 @@ void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet)
|
|||
|
||||
_enter("");
|
||||
|
||||
if (list_empty(&rxnet->calls))
|
||||
return;
|
||||
if (!list_empty(&rxnet->calls)) {
|
||||
write_lock(&rxnet->call_lock);
|
||||
|
||||
write_lock(&rxnet->call_lock);
|
||||
while (!list_empty(&rxnet->calls)) {
|
||||
call = list_entry(rxnet->calls.next,
|
||||
struct rxrpc_call, link);
|
||||
_debug("Zapping call %p", call);
|
||||
|
||||
while (!list_empty(&rxnet->calls)) {
|
||||
call = list_entry(rxnet->calls.next, struct rxrpc_call, link);
|
||||
_debug("Zapping call %p", call);
|
||||
rxrpc_see_call(call);
|
||||
list_del_init(&call->link);
|
||||
|
||||
rxrpc_see_call(call);
|
||||
list_del_init(&call->link);
|
||||
pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n",
|
||||
call, atomic_read(&call->usage),
|
||||
rxrpc_call_states[call->state],
|
||||
call->flags, call->events);
|
||||
|
||||
pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n",
|
||||
call, atomic_read(&call->usage),
|
||||
rxrpc_call_states[call->state],
|
||||
call->flags, call->events);
|
||||
write_unlock(&rxnet->call_lock);
|
||||
cond_resched();
|
||||
write_lock(&rxnet->call_lock);
|
||||
}
|
||||
|
||||
write_unlock(&rxnet->call_lock);
|
||||
cond_resched();
|
||||
write_lock(&rxnet->call_lock);
|
||||
}
|
||||
|
||||
write_unlock(&rxnet->call_lock);
|
||||
|
||||
atomic_dec(&rxnet->nr_calls);
|
||||
wait_var_event(&rxnet->nr_calls, !atomic_read(&rxnet->nr_calls));
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue