tracing/syscalls: protect thread flag toggling from races
Impact: fix syscall tracer enable/disable race The current thread flag toggling is racy as shown in the following scenario: - task A is the last user of syscall tracing, it releases the TIF_SYSCALL_FTRACE on each tasks - at the same time task B start syscall tracing. refcount == 0 so it sets up TIF_SYSCALL_FTRACE on each tasks. The effect of the mixup is unpredictable. So this fix adds a mutex on {start,stop}_syscall_tracing(). Reported-by: Andrew Morton <akpm@linux-foundation.org> Reported-by: Ingo Molnar <mingo@elte.hu> LKML-Reference: <1237151439-6755-3-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
6404434525
commit
5be71b61f1
|
@ -5,7 +5,11 @@
|
||||||
#include "trace_output.h"
|
#include "trace_output.h"
|
||||||
#include "trace.h"
|
#include "trace.h"
|
||||||
|
|
||||||
static atomic_t refcount;
|
/* Keep a counter of the syscall tracing users */
|
||||||
|
static int refcount;
|
||||||
|
|
||||||
|
/* Prevent from races on thread flags toggling */
|
||||||
|
static DEFINE_MUTEX(syscall_trace_lock);
|
||||||
|
|
||||||
/* Option to display the parameters types */
|
/* Option to display the parameters types */
|
||||||
enum {
|
enum {
|
||||||
|
@ -96,9 +100,11 @@ void start_ftrace_syscalls(void)
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct task_struct *g, *t;
|
struct task_struct *g, *t;
|
||||||
|
|
||||||
|
mutex_lock(&syscall_trace_lock);
|
||||||
|
|
||||||
/* Don't enable the flag on the tasks twice */
|
/* Don't enable the flag on the tasks twice */
|
||||||
if (atomic_inc_return(&refcount) != 1)
|
if (++refcount != 1)
|
||||||
return;
|
goto unlock;
|
||||||
|
|
||||||
arch_init_ftrace_syscalls();
|
arch_init_ftrace_syscalls();
|
||||||
read_lock_irqsave(&tasklist_lock, flags);
|
read_lock_irqsave(&tasklist_lock, flags);
|
||||||
|
@ -108,6 +114,9 @@ void start_ftrace_syscalls(void)
|
||||||
} while_each_thread(g, t);
|
} while_each_thread(g, t);
|
||||||
|
|
||||||
read_unlock_irqrestore(&tasklist_lock, flags);
|
read_unlock_irqrestore(&tasklist_lock, flags);
|
||||||
|
|
||||||
|
unlock:
|
||||||
|
mutex_unlock(&syscall_trace_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
void stop_ftrace_syscalls(void)
|
void stop_ftrace_syscalls(void)
|
||||||
|
@ -115,9 +124,11 @@ void stop_ftrace_syscalls(void)
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct task_struct *g, *t;
|
struct task_struct *g, *t;
|
||||||
|
|
||||||
|
mutex_lock(&syscall_trace_lock);
|
||||||
|
|
||||||
/* There are perhaps still some users */
|
/* There are perhaps still some users */
|
||||||
if (atomic_dec_return(&refcount))
|
if (--refcount)
|
||||||
return;
|
goto unlock;
|
||||||
|
|
||||||
read_lock_irqsave(&tasklist_lock, flags);
|
read_lock_irqsave(&tasklist_lock, flags);
|
||||||
|
|
||||||
|
@ -126,6 +137,9 @@ void stop_ftrace_syscalls(void)
|
||||||
} while_each_thread(g, t);
|
} while_each_thread(g, t);
|
||||||
|
|
||||||
read_unlock_irqrestore(&tasklist_lock, flags);
|
read_unlock_irqrestore(&tasklist_lock, flags);
|
||||||
|
|
||||||
|
unlock:
|
||||||
|
mutex_unlock(&syscall_trace_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ftrace_syscall_enter(struct pt_regs *regs)
|
void ftrace_syscall_enter(struct pt_regs *regs)
|
||||||
|
|
Loading…
Reference in New Issue