runtime: prevent deadlock when profiling signal arrives in stack scan
Precise stack scan needs to unwind the stack. When it is unwinding the stack, if a profiling signal arrives, which also does a traceback, it may deadlock in dl_iterate_phdr. Prevent this deadlock by setting up runtime_in_callers before traceback. Reviewed-on: https://go-review.googlesource.com/c/155766 From-SVN: r267457
This commit is contained in:
parent
50bec22834
commit
e20bfbd18e
@ -1,4 +1,4 @@
|
|||||||
d9a30434440469c640a120fe7132057f5644d38c
|
0e482bef69d73b9381dbc543e200a1fe57275e81
|
||||||
|
|
||||||
The first line of this file holds the git revision number of the last
|
The first line of this file holds the git revision number of the last
|
||||||
merge done from the gofrontend repository.
|
merge done from the gofrontend repository.
|
||||||
|
@ -16,7 +16,7 @@
|
|||||||
older versions of glibc when a SIGPROF signal arrives while
|
older versions of glibc when a SIGPROF signal arrives while
|
||||||
collecting a backtrace. */
|
collecting a backtrace. */
|
||||||
|
|
||||||
static uint32 runtime_in_callers;
|
uint32 __go_runtime_in_callers;
|
||||||
|
|
||||||
/* Argument passed to callback function. */
|
/* Argument passed to callback function. */
|
||||||
|
|
||||||
@ -185,7 +185,7 @@ bool alreadyInCallers(void)
|
|||||||
bool
|
bool
|
||||||
alreadyInCallers()
|
alreadyInCallers()
|
||||||
{
|
{
|
||||||
return runtime_atomicload(&runtime_in_callers) > 0;
|
return runtime_atomicload(&__go_runtime_in_callers) > 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Gather caller PC's. */
|
/* Gather caller PC's. */
|
||||||
@ -203,9 +203,9 @@ runtime_callers (int32 skip, Location *locbuf, int32 m, bool keep_thunks)
|
|||||||
data.max = m;
|
data.max = m;
|
||||||
data.keep_thunks = keep_thunks;
|
data.keep_thunks = keep_thunks;
|
||||||
state = __go_get_backtrace_state ();
|
state = __go_get_backtrace_state ();
|
||||||
runtime_xadd (&runtime_in_callers, 1);
|
runtime_xadd (&__go_runtime_in_callers, 1);
|
||||||
backtrace_full (state, 0, callback, error_callback, &data);
|
backtrace_full (state, 0, callback, error_callback, &data);
|
||||||
runtime_xadd (&runtime_in_callers, -1);
|
runtime_xadd (&__go_runtime_in_callers, -1);
|
||||||
|
|
||||||
/* For some reason GCC sometimes loses the name of a thunk function
|
/* For some reason GCC sometimes loses the name of a thunk function
|
||||||
at the top of the stack. If we are skipping thunks, skip that
|
at the top of the stack. If we are skipping thunks, skip that
|
||||||
|
@ -792,7 +792,9 @@ bool
|
|||||||
scanstackwithmap (void *gcw)
|
scanstackwithmap (void *gcw)
|
||||||
{
|
{
|
||||||
_Unwind_Reason_Code code;
|
_Unwind_Reason_Code code;
|
||||||
|
runtime_xadd (&__go_runtime_in_callers, 1);
|
||||||
code = _Unwind_Backtrace (scanstackwithmap_callback, gcw);
|
code = _Unwind_Backtrace (scanstackwithmap_callback, gcw);
|
||||||
|
runtime_xadd (&__go_runtime_in_callers, -1);
|
||||||
return code == _URC_END_OF_STACK;
|
return code == _URC_END_OF_STACK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -515,3 +515,9 @@ bool runtime_usestackmaps;
|
|||||||
|
|
||||||
bool probestackmaps(void)
|
bool probestackmaps(void)
|
||||||
__asm__("runtime.probestackmaps");
|
__asm__("runtime.probestackmaps");
|
||||||
|
|
||||||
|
// This is set to non-zero when calling backtrace_full. This is used
|
||||||
|
// to avoid getting hanging on a recursive lock in dl_iterate_phdr on
|
||||||
|
// older versions of glibc when a SIGPROF signal arrives while
|
||||||
|
// collecting a backtrace.
|
||||||
|
extern uint32 __go_runtime_in_callers;
|
||||||
|
Loading…
Reference in New Issue
Block a user