2010-12-03 05:34:57 +01:00
|
|
|
|
// Copyright 2009 The Go Authors. All rights reserved.
|
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
2017-01-20 22:18:37 +01:00
|
|
|
|
#include <errno.h>
|
2011-11-28 06:45:49 +01:00
|
|
|
|
#include <limits.h>
|
2013-01-31 18:30:28 +01:00
|
|
|
|
#include <signal.h>
|
2011-11-28 06:45:49 +01:00
|
|
|
|
#include <stdlib.h>
|
|
|
|
|
#include <pthread.h>
|
|
|
|
|
#include <unistd.h>
|
|
|
|
|
|
|
|
|
|
#include "config.h"
|
2012-06-07 02:55:20 +02:00
|
|
|
|
|
|
|
|
|
#ifdef HAVE_DL_ITERATE_PHDR
|
|
|
|
|
#include <link.h>
|
|
|
|
|
#endif
|
|
|
|
|
|
2010-12-03 05:34:57 +01:00
|
|
|
|
#include "runtime.h"
|
2011-10-27 01:57:58 +02:00
|
|
|
|
#include "arch.h"
|
2011-11-28 06:45:49 +01:00
|
|
|
|
#include "defs.h"
|
|
|
|
|
#include "malloc.h"
|
2012-11-21 08:03:38 +01:00
|
|
|
|
#include "go-type.h"
|
2011-11-28 06:45:49 +01:00
|
|
|
|
|
|
|
|
|
#ifdef USING_SPLIT_STACK
|
|
|
|
|
|
|
|
|
|
/* FIXME: These are not declared anywhere. */
|
|
|
|
|
|
|
|
|
|
extern void __splitstack_getcontext(void *context[10]);
|
|
|
|
|
|
|
|
|
|
extern void __splitstack_setcontext(void *context[10]);
|
|
|
|
|
|
|
|
|
|
extern void *__splitstack_makecontext(size_t, void *context[10], size_t *);
|
|
|
|
|
|
|
|
|
|
extern void * __splitstack_resetcontext(void *context[10], size_t *);
|
|
|
|
|
|
|
|
|
|
extern void *__splitstack_find(void *, void *, size_t *, void **, void **,
|
|
|
|
|
void **);
|
|
|
|
|
|
2011-12-21 23:24:47 +01:00
|
|
|
|
extern void __splitstack_block_signals (int *, int *);
|
|
|
|
|
|
|
|
|
|
extern void __splitstack_block_signals_context (void *context[10], int *,
|
|
|
|
|
int *);
|
|
|
|
|
|
2011-11-28 06:45:49 +01:00
|
|
|
|
#endif
|
|
|
|
|
|
2012-06-07 02:55:20 +02:00
|
|
|
|
#ifndef PTHREAD_STACK_MIN
|
|
|
|
|
# define PTHREAD_STACK_MIN 8192
|
|
|
|
|
#endif
|
|
|
|
|
|
2011-11-28 06:45:49 +01:00
|
|
|
|
#if defined(USING_SPLIT_STACK) && defined(LINKER_SUPPORTS_SPLIT_STACK)
|
2012-06-07 02:55:20 +02:00
|
|
|
|
# define StackMin PTHREAD_STACK_MIN
|
2011-11-28 06:45:49 +01:00
|
|
|
|
#else
|
2015-01-05 17:13:06 +01:00
|
|
|
|
# define StackMin ((sizeof(char *) < 8) ? 2 * 1024 * 1024 : 4 * 1024 * 1024)
|
2011-11-28 06:45:49 +01:00
|
|
|
|
#endif
|
|
|
|
|
|
2012-04-20 06:58:26 +02:00
|
|
|
|
uintptr runtime_stacks_sys;
|
|
|
|
|
|
2012-05-17 07:30:25 +02:00
|
|
|
|
static void gtraceback(G*);
|
|
|
|
|
|
2010-12-03 05:34:57 +01:00
|
|
|
|
#ifdef __rtems__
|
|
|
|
|
#define __thread
|
|
|
|
|
#endif
|
|
|
|
|
|
2011-11-28 06:45:49 +01:00
|
|
|
|
static __thread G *g;
|
|
|
|
|
|
2012-02-08 06:30:12 +01:00
|
|
|
|
#ifndef SETCONTEXT_CLOBBERS_TLS
|
|
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
|
initcontext(void)
|
|
|
|
|
{
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
|
fixcontext(ucontext_t *c __attribute__ ((unused)))
|
|
|
|
|
{
|
|
|
|
|
}
|
|
|
|
|
|
2012-11-05 18:41:07 +01:00
|
|
|
|
#else
|
2012-02-08 06:30:12 +01:00
|
|
|
|
|
|
|
|
|
# if defined(__x86_64__) && defined(__sun__)
|
|
|
|
|
|
|
|
|
|
// x86_64 Solaris 10 and 11 have a bug: setcontext switches the %fs
|
|
|
|
|
// register to that of the thread which called getcontext. The effect
|
|
|
|
|
// is that the address of all __thread variables changes. This bug
|
|
|
|
|
// also affects pthread_self() and pthread_getspecific. We work
|
|
|
|
|
// around it by clobbering the context field directly to keep %fs the
|
|
|
|
|
// same.
|
|
|
|
|
|
|
|
|
|
static __thread greg_t fs;
|
|
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
|
initcontext(void)
|
|
|
|
|
{
|
|
|
|
|
ucontext_t c;
|
|
|
|
|
|
|
|
|
|
getcontext(&c);
|
|
|
|
|
fs = c.uc_mcontext.gregs[REG_FSBASE];
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
|
fixcontext(ucontext_t* c)
|
|
|
|
|
{
|
|
|
|
|
c->uc_mcontext.gregs[REG_FSBASE] = fs;
|
|
|
|
|
}
|
|
|
|
|
|
2012-11-05 18:41:07 +01:00
|
|
|
|
# elif defined(__NetBSD__)
|
|
|
|
|
|
|
|
|
|
// NetBSD has a bug: setcontext clobbers tlsbase, we need to save
|
|
|
|
|
// and restore it ourselves.
|
|
|
|
|
|
|
|
|
|
static __thread __greg_t tlsbase;
|
|
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
|
initcontext(void)
|
|
|
|
|
{
|
|
|
|
|
ucontext_t c;
|
|
|
|
|
|
|
|
|
|
getcontext(&c);
|
|
|
|
|
tlsbase = c.uc_mcontext._mc_tlsbase;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
|
fixcontext(ucontext_t* c)
|
|
|
|
|
{
|
|
|
|
|
c->uc_mcontext._mc_tlsbase = tlsbase;
|
|
|
|
|
}
|
|
|
|
|
|
2015-01-17 00:23:31 +01:00
|
|
|
|
# elif defined(__sparc__)
|
|
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
|
initcontext(void)
|
|
|
|
|
{
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
|
fixcontext(ucontext_t *c)
|
|
|
|
|
{
|
|
|
|
|
/* ??? Using
|
|
|
|
|
register unsigned long thread __asm__("%g7");
|
|
|
|
|
c->uc_mcontext.gregs[REG_G7] = thread;
|
|
|
|
|
results in
|
|
|
|
|
error: variable ‘thread’ might be clobbered by \
|
|
|
|
|
‘longjmp’ or ‘vfork’ [-Werror=clobbered]
|
|
|
|
|
which ought to be false, as %g7 is a fixed register. */
|
|
|
|
|
|
|
|
|
|
if (sizeof (c->uc_mcontext.gregs[REG_G7]) == 8)
|
|
|
|
|
asm ("stx %%g7, %0" : "=m"(c->uc_mcontext.gregs[REG_G7]));
|
|
|
|
|
else
|
|
|
|
|
asm ("st %%g7, %0" : "=m"(c->uc_mcontext.gregs[REG_G7]));
|
|
|
|
|
}
|
|
|
|
|
|
2012-02-08 06:30:12 +01:00
|
|
|
|
# else
|
|
|
|
|
|
|
|
|
|
# error unknown case for SETCONTEXT_CLOBBERS_TLS
|
|
|
|
|
|
|
|
|
|
# endif
|
|
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
2016-09-09 15:31:49 +02:00
|
|
|
|
// ucontext_arg returns a properly aligned ucontext_t value. On some
|
|
|
|
|
// systems a ucontext_t value must be aligned to a 16-byte boundary.
|
|
|
|
|
// The g structure that has fields of type ucontext_t is defined in
|
|
|
|
|
// Go, and Go has no simple way to align a field to such a boundary.
|
|
|
|
|
// So we make the field larger in runtime2.go and pick an appropriate
|
|
|
|
|
// offset within the field here.
|
|
|
|
|
static ucontext_t*
|
|
|
|
|
ucontext_arg(void** go_ucontext)
|
|
|
|
|
{
|
|
|
|
|
uintptr_t p = (uintptr_t)go_ucontext;
|
2016-09-09 18:39:44 +02:00
|
|
|
|
size_t align = __alignof__(ucontext_t);
|
|
|
|
|
if(align > 16) {
|
|
|
|
|
// We only ensured space for up to a 16 byte alignment
|
|
|
|
|
// in libgo/go/runtime/runtime2.go.
|
|
|
|
|
runtime_throw("required alignment of ucontext_t too large");
|
|
|
|
|
}
|
|
|
|
|
p = (p + align - 1) &~ (uintptr_t)(align - 1);
|
2016-09-09 15:31:49 +02:00
|
|
|
|
return (ucontext_t*)p;
|
|
|
|
|
}
|
|
|
|
|
|
2011-11-28 06:45:49 +01:00
|
|
|
|
// We can not always refer to the TLS variables directly. The
|
|
|
|
|
// compiler will call tls_get_addr to get the address of the variable,
|
|
|
|
|
// and it may hold it in a register across a call to schedule. When
|
|
|
|
|
// we get back from the call we may be running in a different thread,
|
|
|
|
|
// in which case the register now points to the TLS variable for a
|
|
|
|
|
// different thread. We use non-inlinable functions to avoid this
|
|
|
|
|
// when necessary.
|
|
|
|
|
|
|
|
|
|
G* runtime_g(void) __attribute__ ((noinline, no_split_stack));
|
|
|
|
|
|
|
|
|
|
G*
|
|
|
|
|
runtime_g(void)
|
|
|
|
|
{
|
|
|
|
|
return g;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
M* runtime_m(void) __attribute__ ((noinline, no_split_stack));
|
|
|
|
|
|
|
|
|
|
M*
|
|
|
|
|
runtime_m(void)
|
|
|
|
|
{
|
2016-08-30 23:07:47 +02:00
|
|
|
|
if(g == nil)
|
|
|
|
|
return nil;
|
|
|
|
|
return g->m;
|
2011-11-28 06:45:49 +01:00
|
|
|
|
}
|
|
|
|
|
|
2016-08-30 23:07:47 +02:00
|
|
|
|
// Set g.
|
2013-07-16 08:54:42 +02:00
|
|
|
|
void
|
2016-08-30 23:07:47 +02:00
|
|
|
|
runtime_setg(G* gp)
|
2012-06-07 02:55:20 +02:00
|
|
|
|
{
|
2013-07-16 08:54:42 +02:00
|
|
|
|
g = gp;
|
2012-06-07 02:55:20 +02:00
|
|
|
|
}
|
|
|
|
|
|
2013-07-16 08:54:42 +02:00
|
|
|
|
// Start a new thread.
|
2012-06-07 02:55:20 +02:00
|
|
|
|
static void
|
2013-07-16 08:54:42 +02:00
|
|
|
|
runtime_newosproc(M *mp)
|
2012-06-07 02:55:20 +02:00
|
|
|
|
{
|
2013-07-16 08:54:42 +02:00
|
|
|
|
pthread_attr_t attr;
|
|
|
|
|
sigset_t clear, old;
|
|
|
|
|
pthread_t tid;
|
2017-01-20 22:18:37 +01:00
|
|
|
|
int tries;
|
2013-07-16 08:54:42 +02:00
|
|
|
|
int ret;
|
2011-11-28 06:45:49 +01:00
|
|
|
|
|
2013-07-16 08:54:42 +02:00
|
|
|
|
if(pthread_attr_init(&attr) != 0)
|
|
|
|
|
runtime_throw("pthread_attr_init");
|
|
|
|
|
if(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) != 0)
|
|
|
|
|
runtime_throw("pthread_attr_setdetachstate");
|
2011-11-28 06:45:49 +01:00
|
|
|
|
|
2013-07-16 08:54:42 +02:00
|
|
|
|
// Block signals during pthread_create so that the new thread
|
|
|
|
|
// starts with signals disabled. It will enable them in minit.
|
|
|
|
|
sigfillset(&clear);
|
2011-11-28 06:45:49 +01:00
|
|
|
|
|
2013-07-16 08:54:42 +02:00
|
|
|
|
#ifdef SIGTRAP
|
|
|
|
|
// Blocking SIGTRAP reportedly breaks gdb on Alpha GNU/Linux.
|
|
|
|
|
sigdelset(&clear, SIGTRAP);
|
|
|
|
|
#endif
|
2011-11-28 06:45:49 +01:00
|
|
|
|
|
2013-07-16 08:54:42 +02:00
|
|
|
|
sigemptyset(&old);
|
2013-12-04 02:35:53 +01:00
|
|
|
|
pthread_sigmask(SIG_BLOCK, &clear, &old);
|
2017-01-20 22:18:37 +01:00
|
|
|
|
|
|
|
|
|
for (tries = 0; tries < 20; tries++) {
|
|
|
|
|
ret = pthread_create(&tid, &attr, runtime_mstart, mp);
|
|
|
|
|
if (ret != EAGAIN) {
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
runtime_usleep((tries + 1) * 1000); // Milliseconds.
|
|
|
|
|
}
|
|
|
|
|
|
2013-12-04 02:35:53 +01:00
|
|
|
|
pthread_sigmask(SIG_SETMASK, &old, nil);
|
2011-11-28 06:45:49 +01:00
|
|
|
|
|
2017-01-20 22:18:37 +01:00
|
|
|
|
if (ret != 0) {
|
|
|
|
|
runtime_printf("pthread_create failed: %d\n", ret);
|
2013-07-16 08:54:42 +02:00
|
|
|
|
runtime_throw("pthread_create");
|
2017-01-20 22:18:37 +01:00
|
|
|
|
}
|
2011-11-28 06:45:49 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// First function run by a new goroutine. This replaces gogocall.
|
|
|
|
|
static void
|
|
|
|
|
kickoff(void)
|
|
|
|
|
{
|
|
|
|
|
void (*fn)(void*);
|
2016-10-12 17:38:56 +02:00
|
|
|
|
void *param;
|
2011-11-28 06:45:49 +01:00
|
|
|
|
|
2012-10-23 06:31:11 +02:00
|
|
|
|
if(g->traceback != nil)
|
|
|
|
|
gtraceback(g);
|
|
|
|
|
|
2011-11-28 06:45:49 +01:00
|
|
|
|
fn = (void (*)(void*))(g->entry);
|
2016-10-12 17:38:56 +02:00
|
|
|
|
param = g->param;
|
2017-01-14 01:05:42 +01:00
|
|
|
|
g->entry = nil;
|
2016-10-12 17:38:56 +02:00
|
|
|
|
g->param = nil;
|
|
|
|
|
fn(param);
|
2016-11-22 18:58:04 +01:00
|
|
|
|
runtime_goexit1();
|
2011-11-28 06:45:49 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Switch context to a different goroutine. This is like longjmp.
|
2013-11-06 20:49:01 +01:00
|
|
|
|
void runtime_gogo(G*) __attribute__ ((noinline));
|
|
|
|
|
void
|
2011-11-28 06:45:49 +01:00
|
|
|
|
runtime_gogo(G* newg)
|
|
|
|
|
{
|
|
|
|
|
#ifdef USING_SPLIT_STACK
|
2016-08-30 23:07:47 +02:00
|
|
|
|
__splitstack_setcontext(&newg->stackcontext[0]);
|
2011-11-28 06:45:49 +01:00
|
|
|
|
#endif
|
|
|
|
|
g = newg;
|
|
|
|
|
newg->fromgogo = true;
|
2016-09-09 15:31:49 +02:00
|
|
|
|
fixcontext(ucontext_arg(&newg->context[0]));
|
|
|
|
|
setcontext(ucontext_arg(&newg->context[0]));
|
2012-02-08 06:30:12 +01:00
|
|
|
|
runtime_throw("gogo setcontext returned");
|
2011-11-28 06:45:49 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Save context and call fn passing g as a parameter. This is like
|
|
|
|
|
// setjmp. Because getcontext always returns 0, unlike setjmp, we use
|
|
|
|
|
// g->fromgogo as a code. It will be true if we got here via
|
|
|
|
|
// setcontext. g == nil the first time this is called in a new m.
|
2013-11-06 20:49:01 +01:00
|
|
|
|
void runtime_mcall(void (*)(G*)) __attribute__ ((noinline));
|
|
|
|
|
void
|
2011-11-28 06:45:49 +01:00
|
|
|
|
runtime_mcall(void (*pfn)(G*))
|
|
|
|
|
{
|
2012-02-14 01:38:07 +01:00
|
|
|
|
M *mp;
|
|
|
|
|
G *gp;
|
runtime: scan caller-saved registers for non-split-stack
While testing a patch on Solaris, which does not support split-stack, I
ran across a bug in the handling of caller-saved registers for the
garbage collector. For non-split-stack systems, runtime_mcall is
responsible for saving all caller-saved registers on the stack so that
the GC stack scan will see them. It does this by calling
__builtin_unwind_init and setting the g's gcnextsp field to point to the
current stack. The garbage collector then scans the stack from gcnextsp
to the top of stack.
Unfortunately, the code was setting gcnextsp to point to runtime_mcall's
argument, which meant that even though runtime_mcall was careful to
store all caller-saved registers on the stack, the GC never saw them.
This is, of course, only a problem if a value lives only in a
caller-saved register, and not anywhere else on the stack or heap. And
it is only a problem if that caller-saved register manages to make it
all the way down to runtime_mcall without being saved by any function on
the way. This is moderately unlikely but it turns out that the recent
changes to keep values on the stack when compiling the runtime package
caused it to happen for the local variable `s` in `notifyListWait` in
runtime/sema.go. That function calls goparkunlock which is simple
enough to not require all registers, and itself calls runtime_mcall. So
it was possible for `s` to be released by the GC before the goroutine
returned from goparkunlock, which eventually caused a dangling pointer
to be passed to releaseSudog.
This is not a problem on split-stack systems, which use
__splitstack_get_context, which saves a stack pointer low enough on the
stack to scan the registers saved by runtime_mcall.
Reviewed-on: https://go-review.googlesource.com/31323
From-SVN: r241304
2016-10-18 15:29:37 +02:00
|
|
|
|
#ifndef USING_SPLIT_STACK
|
|
|
|
|
void *afterregs;
|
|
|
|
|
#endif
|
2011-11-28 06:45:49 +01:00
|
|
|
|
|
|
|
|
|
// Ensure that all registers are on the stack for the garbage
|
|
|
|
|
// collector.
|
|
|
|
|
__builtin_unwind_init();
|
|
|
|
|
|
2012-02-14 01:38:07 +01:00
|
|
|
|
gp = g;
|
2016-08-30 23:07:47 +02:00
|
|
|
|
mp = gp->m;
|
2012-02-14 01:38:07 +01:00
|
|
|
|
if(gp == mp->g0)
|
2011-11-28 06:45:49 +01:00
|
|
|
|
runtime_throw("runtime: mcall called on m->g0 stack");
|
|
|
|
|
|
2012-02-14 01:38:07 +01:00
|
|
|
|
if(gp != nil) {
|
2011-11-28 06:45:49 +01:00
|
|
|
|
|
|
|
|
|
#ifdef USING_SPLIT_STACK
|
2016-08-30 23:07:47 +02:00
|
|
|
|
__splitstack_getcontext(&g->stackcontext[0]);
|
2011-11-28 06:45:49 +01:00
|
|
|
|
#else
|
runtime: scan caller-saved registers for non-split-stack
While testing a patch on Solaris, which does not support split-stack, I
ran across a bug in the handling of caller-saved registers for the
garbage collector. For non-split-stack systems, runtime_mcall is
responsible for saving all caller-saved registers on the stack so that
the GC stack scan will see them. It does this by calling
__builtin_unwind_init and setting the g's gcnextsp field to point to the
current stack. The garbage collector then scans the stack from gcnextsp
to the top of stack.
Unfortunately, the code was setting gcnextsp to point to runtime_mcall's
argument, which meant that even though runtime_mcall was careful to
store all caller-saved registers on the stack, the GC never saw them.
This is, of course, only a problem if a value lives only in a
caller-saved register, and not anywhere else on the stack or heap. And
it is only a problem if that caller-saved register manages to make it
all the way down to runtime_mcall without being saved by any function on
the way. This is moderately unlikely but it turns out that the recent
changes to keep values on the stack when compiling the runtime package
caused it to happen for the local variable `s` in `notifyListWait` in
runtime/sema.go. That function calls goparkunlock which is simple
enough to not require all registers, and itself calls runtime_mcall. So
it was possible for `s` to be released by the GC before the goroutine
returned from goparkunlock, which eventually caused a dangling pointer
to be passed to releaseSudog.
This is not a problem on split-stack systems, which use
__splitstack_get_context, which saves a stack pointer low enough on the
stack to scan the registers saved by runtime_mcall.
Reviewed-on: https://go-review.googlesource.com/31323
From-SVN: r241304
2016-10-18 15:29:37 +02:00
|
|
|
|
// We have to point to an address on the stack that is
|
|
|
|
|
// below the saved registers.
|
|
|
|
|
gp->gcnextsp = &afterregs;
|
2011-11-28 06:45:49 +01:00
|
|
|
|
#endif
|
2012-02-14 01:38:07 +01:00
|
|
|
|
gp->fromgogo = false;
|
2016-09-09 15:31:49 +02:00
|
|
|
|
getcontext(ucontext_arg(&gp->context[0]));
|
2012-02-14 01:38:07 +01:00
|
|
|
|
|
|
|
|
|
// When we return from getcontext, we may be running
|
2016-08-30 23:07:47 +02:00
|
|
|
|
// in a new thread. That means that g may have
|
|
|
|
|
// changed. It is a global variables so we will
|
|
|
|
|
// reload it, but the address of g may be cached in
|
|
|
|
|
// our local stack frame, and that address may be
|
|
|
|
|
// wrong. Call the function to reload the value for
|
|
|
|
|
// this thread.
|
2012-02-14 01:38:07 +01:00
|
|
|
|
gp = runtime_g();
|
2016-08-30 23:07:47 +02:00
|
|
|
|
mp = gp->m;
|
2012-05-17 07:30:25 +02:00
|
|
|
|
|
2012-05-24 23:07:18 +02:00
|
|
|
|
if(gp->traceback != nil)
|
2012-05-17 07:30:25 +02:00
|
|
|
|
gtraceback(gp);
|
2011-11-28 06:45:49 +01:00
|
|
|
|
}
|
2012-02-14 01:38:07 +01:00
|
|
|
|
if (gp == nil || !gp->fromgogo) {
|
2011-11-28 06:45:49 +01:00
|
|
|
|
#ifdef USING_SPLIT_STACK
|
2016-08-30 23:07:47 +02:00
|
|
|
|
__splitstack_setcontext(&mp->g0->stackcontext[0]);
|
2011-11-28 06:45:49 +01:00
|
|
|
|
#endif
|
2012-02-14 01:38:07 +01:00
|
|
|
|
mp->g0->entry = (byte*)pfn;
|
|
|
|
|
mp->g0->param = gp;
|
|
|
|
|
|
|
|
|
|
// It's OK to set g directly here because this case
|
|
|
|
|
// can not occur if we got here via a setcontext to
|
|
|
|
|
// the getcontext call just above.
|
|
|
|
|
g = mp->g0;
|
|
|
|
|
|
2016-09-09 15:31:49 +02:00
|
|
|
|
fixcontext(ucontext_arg(&mp->g0->context[0]));
|
|
|
|
|
setcontext(ucontext_arg(&mp->g0->context[0]));
|
2011-11-28 06:45:49 +01:00
|
|
|
|
runtime_throw("runtime: mcall function returned");
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2013-07-16 08:54:42 +02:00
|
|
|
|
// Goroutine scheduler
|
|
|
|
|
// The scheduler's job is to distribute ready-to-run goroutines over worker threads.
|
|
|
|
|
//
|
|
|
|
|
// The main concepts are:
|
|
|
|
|
// G - goroutine.
|
|
|
|
|
// M - worker thread, or machine.
|
|
|
|
|
// P - processor, a resource that is required to execute Go code.
|
|
|
|
|
// M must have an associated P to execute Go code, however it can be
|
|
|
|
|
// blocked or in a syscall w/o an associated P.
|
|
|
|
|
//
|
|
|
|
|
// Design doc at http://golang.org/s/go11sched.
|
|
|
|
|
|
2014-06-07 00:37:27 +02:00
|
|
|
|
enum
|
|
|
|
|
{
|
2016-11-18 18:48:29 +01:00
|
|
|
|
// Number of goroutine ids to grab from runtime_sched->goidgen to local per-P cache at once.
|
2014-06-07 00:37:27 +02:00
|
|
|
|
// 16 seems to provide enough amortization, but other than that it's mostly arbitrary number.
|
|
|
|
|
GoidCacheBatch = 16,
|
|
|
|
|
};
|
2013-07-16 08:54:42 +02:00
|
|
|
|
|
2016-11-18 18:48:29 +01:00
|
|
|
|
extern Sched* runtime_getsched() __asm__ (GOSYM_PREFIX "runtime.getsched");
|
2016-12-19 19:00:35 +01:00
|
|
|
|
extern bool* runtime_getCgoHasExtraM()
|
|
|
|
|
__asm__ (GOSYM_PREFIX "runtime.getCgoHasExtraM");
|
2017-01-03 23:58:48 +01:00
|
|
|
|
extern P** runtime_getAllP()
|
|
|
|
|
__asm__ (GOSYM_PREFIX "runtime.getAllP");
|
|
|
|
|
extern G* allocg(void)
|
|
|
|
|
__asm__ (GOSYM_PREFIX "runtime.allocg");
|
2017-01-09 20:37:19 +01:00
|
|
|
|
extern bool needaddgcproc(void)
|
|
|
|
|
__asm__ (GOSYM_PREFIX "runtime.needaddgcproc");
|
|
|
|
|
extern void startm(P*, bool)
|
|
|
|
|
__asm__(GOSYM_PREFIX "runtime.startm");
|
|
|
|
|
extern void newm(void(*)(void), P*)
|
|
|
|
|
__asm__(GOSYM_PREFIX "runtime.newm");
|
2016-11-18 18:48:29 +01:00
|
|
|
|
|
2016-11-22 18:58:04 +01:00
|
|
|
|
Sched* runtime_sched;
|
2013-07-16 08:54:42 +02:00
|
|
|
|
M runtime_m0;
|
2014-06-07 00:37:27 +02:00
|
|
|
|
G runtime_g0; // idle goroutine for m0
|
2013-07-16 08:54:42 +02:00
|
|
|
|
G* runtime_lastg;
|
|
|
|
|
P** runtime_allp;
|
|
|
|
|
int8* runtime_goos;
|
|
|
|
|
int32 runtime_ncpu;
|
2013-11-06 20:49:01 +01:00
|
|
|
|
bool runtime_precisestack;
|
2013-07-16 08:54:42 +02:00
|
|
|
|
|
2015-04-29 23:31:53 +02:00
|
|
|
|
bool runtime_isarchive;
|
|
|
|
|
|
2013-07-16 08:54:42 +02:00
|
|
|
|
void* runtime_mstart(void*);
|
|
|
|
|
static void exitsyscall0(G*);
|
|
|
|
|
static void park0(G*);
|
|
|
|
|
static void goexit0(G*);
|
2013-11-06 20:49:01 +01:00
|
|
|
|
static bool exitsyscallfast(void);
|
2016-12-19 19:00:35 +01:00
|
|
|
|
|
2017-01-09 20:37:19 +01:00
|
|
|
|
extern void setncpu(int32)
|
|
|
|
|
__asm__(GOSYM_PREFIX "runtime.setncpu");
|
2017-01-14 01:05:42 +01:00
|
|
|
|
extern void setpagesize(uintptr_t)
|
|
|
|
|
__asm__(GOSYM_PREFIX "runtime.setpagesize");
|
2017-01-09 20:37:19 +01:00
|
|
|
|
extern void allgadd(G*)
|
2016-12-19 19:00:35 +01:00
|
|
|
|
__asm__(GOSYM_PREFIX "runtime.allgadd");
|
2017-01-10 22:09:00 +01:00
|
|
|
|
extern void mcommoninit(M*)
|
|
|
|
|
__asm__(GOSYM_PREFIX "runtime.mcommoninit");
|
2017-01-09 20:37:19 +01:00
|
|
|
|
extern void stopm(void)
|
|
|
|
|
__asm__(GOSYM_PREFIX "runtime.stopm");
|
|
|
|
|
extern void handoffp(P*)
|
|
|
|
|
__asm__(GOSYM_PREFIX "runtime.handoffp");
|
|
|
|
|
extern void wakep(void)
|
|
|
|
|
__asm__(GOSYM_PREFIX "runtime.wakep");
|
|
|
|
|
extern void stoplockedm(void)
|
|
|
|
|
__asm__(GOSYM_PREFIX "runtime.stoplockedm");
|
|
|
|
|
extern void schedule(void)
|
|
|
|
|
__asm__(GOSYM_PREFIX "runtime.schedule");
|
|
|
|
|
extern void execute(G*, bool)
|
|
|
|
|
__asm__(GOSYM_PREFIX "runtime.execute");
|
2017-01-10 22:09:00 +01:00
|
|
|
|
extern void gfput(P*, G*)
|
|
|
|
|
__asm__(GOSYM_PREFIX "runtime.gfput");
|
|
|
|
|
extern G* gfget(P*)
|
|
|
|
|
__asm__(GOSYM_PREFIX "runtime.gfget");
|
2017-01-09 20:37:19 +01:00
|
|
|
|
extern void procresize(int32)
|
|
|
|
|
__asm__(GOSYM_PREFIX "runtime.procresize");
|
|
|
|
|
extern void acquirep(P*)
|
|
|
|
|
__asm__(GOSYM_PREFIX "runtime.acquirep");
|
|
|
|
|
extern P* releasep(void)
|
|
|
|
|
__asm__(GOSYM_PREFIX "runtime.releasep");
|
|
|
|
|
extern void incidlelocked(int32)
|
|
|
|
|
__asm__(GOSYM_PREFIX "runtime.incidlelocked");
|
|
|
|
|
extern void checkdead(void)
|
2017-01-03 23:58:48 +01:00
|
|
|
|
__asm__(GOSYM_PREFIX "runtime.checkdead");
|
2017-01-09 20:37:19 +01:00
|
|
|
|
extern void sysmon(void)
|
|
|
|
|
__asm__(GOSYM_PREFIX "runtime.sysmon");
|
|
|
|
|
extern void mput(M*)
|
|
|
|
|
__asm__(GOSYM_PREFIX "runtime.mput");
|
|
|
|
|
extern M* mget(void)
|
|
|
|
|
__asm__(GOSYM_PREFIX "runtime.mget");
|
|
|
|
|
extern void globrunqput(G*)
|
|
|
|
|
__asm__(GOSYM_PREFIX "runtime.globrunqput");
|
|
|
|
|
extern P* pidleget(void)
|
|
|
|
|
__asm__(GOSYM_PREFIX "runtime.pidleget");
|
|
|
|
|
extern bool runqempty(P*)
|
|
|
|
|
__asm__(GOSYM_PREFIX "runtime.runqempty");
|
|
|
|
|
extern void runqput(P*, G*, bool)
|
|
|
|
|
__asm__(GOSYM_PREFIX "runtime.runqput");
|
2012-03-02 21:01:37 +01:00
|
|
|
|
|
2015-04-29 23:31:53 +02:00
|
|
|
|
bool runtime_isstarted;
|
|
|
|
|
|
2011-11-28 06:45:49 +01:00
|
|
|
|
// The bootstrap sequence is:
|
|
|
|
|
//
|
|
|
|
|
// call osinit
|
|
|
|
|
// call schedinit
|
|
|
|
|
// make & queue new G
|
|
|
|
|
// call runtime_mstart
|
|
|
|
|
//
|
2011-12-02 20:34:41 +01:00
|
|
|
|
// The new G calls runtime_main.
|
2011-11-28 06:45:49 +01:00
|
|
|
|
void
|
|
|
|
|
runtime_schedinit(void)
|
|
|
|
|
{
|
2016-08-30 23:07:47 +02:00
|
|
|
|
M *m;
|
2013-07-16 08:54:42 +02:00
|
|
|
|
int32 n, procs;
|
2015-10-31 01:59:47 +01:00
|
|
|
|
String s;
|
2011-11-28 06:45:49 +01:00
|
|
|
|
const byte *p;
|
2013-11-06 20:49:01 +01:00
|
|
|
|
Eface i;
|
2011-11-28 06:45:49 +01:00
|
|
|
|
|
2017-01-09 20:37:19 +01:00
|
|
|
|
setncpu(runtime_ncpu);
|
2017-01-14 01:05:42 +01:00
|
|
|
|
setpagesize(getpagesize());
|
2016-11-18 18:48:29 +01:00
|
|
|
|
runtime_sched = runtime_getsched();
|
|
|
|
|
|
2011-11-28 06:45:49 +01:00
|
|
|
|
m = &runtime_m0;
|
|
|
|
|
g = &runtime_g0;
|
|
|
|
|
m->g0 = g;
|
|
|
|
|
m->curg = g;
|
|
|
|
|
g->m = m;
|
|
|
|
|
|
2012-02-08 06:30:12 +01:00
|
|
|
|
initcontext();
|
|
|
|
|
|
2016-11-18 18:48:29 +01:00
|
|
|
|
runtime_sched->maxmcount = 10000;
|
2013-11-06 20:49:01 +01:00
|
|
|
|
runtime_precisestack = 0;
|
|
|
|
|
|
2014-07-19 10:53:52 +02:00
|
|
|
|
// runtime_symtabinit();
|
2011-11-28 06:45:49 +01:00
|
|
|
|
runtime_mallocinit();
|
|
|
|
|
mcommoninit(m);
|
2016-12-08 17:37:54 +01:00
|
|
|
|
runtime_alginit(); // maps must not be used before this call
|
|
|
|
|
|
2013-11-06 20:49:01 +01:00
|
|
|
|
// Initialize the itable value for newErrorCString,
|
|
|
|
|
// so that the next time it gets called, possibly
|
|
|
|
|
// in a fault during a garbage collection, it will not
|
|
|
|
|
// need to allocated memory.
|
|
|
|
|
runtime_newErrorCString(0, &i);
|
2014-07-19 10:53:52 +02:00
|
|
|
|
|
|
|
|
|
// Initialize the cached gotraceback value, since
|
|
|
|
|
// gotraceback calls getenv, which mallocs on Plan 9.
|
|
|
|
|
runtime_gotraceback(nil);
|
2011-11-28 06:45:49 +01:00
|
|
|
|
|
|
|
|
|
runtime_goargs();
|
|
|
|
|
runtime_goenvs();
|
2013-11-06 20:49:01 +01:00
|
|
|
|
runtime_parsedebugvars();
|
2011-11-28 06:45:49 +01:00
|
|
|
|
|
2016-11-18 18:48:29 +01:00
|
|
|
|
runtime_sched->lastpoll = runtime_nanotime();
|
2013-07-16 08:54:42 +02:00
|
|
|
|
procs = 1;
|
2015-10-31 01:59:47 +01:00
|
|
|
|
s = runtime_getenv("GOMAXPROCS");
|
|
|
|
|
p = s.str;
|
|
|
|
|
if(p != nil && (n = runtime_atoi(p, s.len)) > 0) {
|
2016-08-30 23:07:47 +02:00
|
|
|
|
if(n > _MaxGomaxprocs)
|
|
|
|
|
n = _MaxGomaxprocs;
|
2013-07-16 08:54:42 +02:00
|
|
|
|
procs = n;
|
2011-11-28 06:45:49 +01:00
|
|
|
|
}
|
2017-01-03 23:58:48 +01:00
|
|
|
|
runtime_allp = runtime_getAllP();
|
2013-07-16 08:54:42 +02:00
|
|
|
|
procresize(procs);
|
2011-11-28 06:45:49 +01:00
|
|
|
|
|
|
|
|
|
// Can not enable GC until all roots are registered.
|
2016-10-13 17:24:50 +02:00
|
|
|
|
// mstats()->enablegc = 1;
|
2011-11-28 06:45:49 +01:00
|
|
|
|
}
|
|
|
|
|
|
2013-01-24 20:44:23 +01:00
|
|
|
|
extern void main_init(void) __asm__ (GOSYM_PREFIX "__go_init_main");
|
|
|
|
|
extern void main_main(void) __asm__ (GOSYM_PREFIX "main.main");
|
2011-12-02 20:34:41 +01:00
|
|
|
|
|
2015-04-29 23:31:53 +02:00
|
|
|
|
// Used to determine the field alignment.
|
|
|
|
|
|
|
|
|
|
struct field_align
|
|
|
|
|
{
|
|
|
|
|
char c;
|
|
|
|
|
Hchan *p;
|
|
|
|
|
};
|
|
|
|
|
|
2013-11-06 20:49:01 +01:00
|
|
|
|
static void
|
|
|
|
|
initDone(void *arg __attribute__ ((unused))) {
|
|
|
|
|
runtime_unlockOSThread();
|
|
|
|
|
};
|
|
|
|
|
|
2011-12-02 20:34:41 +01:00
|
|
|
|
// The main goroutine.
|
2014-07-19 10:53:52 +02:00
|
|
|
|
// Note: C frames in general are not copyable during stack growth, for two reasons:
|
|
|
|
|
// 1) We don't know where in a frame to find pointers to other stack locations.
|
|
|
|
|
// 2) There's no guarantee that globals or heap values do not point into the frame.
|
|
|
|
|
//
|
|
|
|
|
// The C frame for runtime.main is copyable, because:
|
|
|
|
|
// 1) There are no pointers to other stack locations in the frame
|
|
|
|
|
// (d.fn points at a global, d.link is nil, d.argp is -1).
|
|
|
|
|
// 2) The only pointer into this frame is from the defer chain,
|
|
|
|
|
// which is explicitly handled during stack copying.
|
2011-12-02 20:34:41 +01:00
|
|
|
|
void
|
2013-07-16 08:54:42 +02:00
|
|
|
|
runtime_main(void* dummy __attribute__((unused)))
|
2011-12-02 20:34:41 +01:00
|
|
|
|
{
|
2013-11-06 20:49:01 +01:00
|
|
|
|
Defer d;
|
|
|
|
|
_Bool frame;
|
|
|
|
|
|
2013-07-16 08:54:42 +02:00
|
|
|
|
newm(sysmon, nil);
|
|
|
|
|
|
2011-12-02 20:34:41 +01:00
|
|
|
|
// Lock the main goroutine onto this, the main OS thread,
|
|
|
|
|
// during initialization. Most programs won't care, but a few
|
|
|
|
|
// do require certain calls to be made by the main thread.
|
|
|
|
|
// Those can arrange for main.main to run in the main thread
|
|
|
|
|
// by calling runtime.LockOSThread during initialization
|
|
|
|
|
// to preserve the lock.
|
2013-07-16 08:54:42 +02:00
|
|
|
|
runtime_lockOSThread();
|
2013-11-06 20:49:01 +01:00
|
|
|
|
|
|
|
|
|
// Defer unlock so that runtime.Goexit during init does the unlock too.
|
2016-08-30 23:07:47 +02:00
|
|
|
|
d.pfn = (uintptr)(void*)initDone;
|
2016-11-22 18:58:04 +01:00
|
|
|
|
d.link = g->_defer;
|
2016-08-30 23:07:47 +02:00
|
|
|
|
d.arg = (void*)-1;
|
|
|
|
|
d._panic = g->_panic;
|
|
|
|
|
d.retaddr = 0;
|
|
|
|
|
d.makefunccanrecover = 0;
|
|
|
|
|
d.frame = &frame;
|
|
|
|
|
d.special = true;
|
|
|
|
|
g->_defer = &d;
|
|
|
|
|
|
|
|
|
|
if(g->m != &runtime_m0)
|
2013-07-16 08:54:42 +02:00
|
|
|
|
runtime_throw("runtime_main not on m0");
|
|
|
|
|
__go_go(runtime_MHeap_Scavenger, nil);
|
2015-04-29 23:31:53 +02:00
|
|
|
|
|
2016-12-19 19:00:35 +01:00
|
|
|
|
makeMainInitDone();
|
2015-04-29 23:31:53 +02:00
|
|
|
|
|
|
|
|
|
_cgo_notify_runtime_init_done();
|
|
|
|
|
|
2011-12-02 20:34:41 +01:00
|
|
|
|
main_init();
|
2013-11-06 20:49:01 +01:00
|
|
|
|
|
2016-12-19 19:00:35 +01:00
|
|
|
|
closeMainInitDone();
|
2015-04-29 23:31:53 +02:00
|
|
|
|
|
2016-08-30 23:07:47 +02:00
|
|
|
|
if(g->_defer != &d || (void*)d.pfn != initDone)
|
2013-11-06 20:49:01 +01:00
|
|
|
|
runtime_throw("runtime: bad defer entry after init");
|
2016-11-22 18:58:04 +01:00
|
|
|
|
g->_defer = d.link;
|
2013-07-16 08:54:42 +02:00
|
|
|
|
runtime_unlockOSThread();
|
2011-12-02 20:34:41 +01:00
|
|
|
|
|
|
|
|
|
// For gccgo we have to wait until after main is initialized
|
|
|
|
|
// to enable GC, because initializing main registers the GC
|
|
|
|
|
// roots.
|
2016-10-13 17:24:50 +02:00
|
|
|
|
mstats()->enablegc = 1;
|
2011-12-02 20:34:41 +01:00
|
|
|
|
|
2015-04-29 23:31:53 +02:00
|
|
|
|
if(runtime_isarchive) {
|
|
|
|
|
// This is not a complete program, but is instead a
|
|
|
|
|
// library built using -buildmode=c-archive or
|
|
|
|
|
// c-shared. Now that we are initialized, there is
|
|
|
|
|
// nothing further to do.
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2011-12-02 20:34:41 +01:00
|
|
|
|
main_main();
|
2013-07-16 08:54:42 +02:00
|
|
|
|
|
|
|
|
|
// Make racy client program work: if panicking on
|
|
|
|
|
// another goroutine at the same time as main returns,
|
|
|
|
|
// let the other goroutine finish printing the panic trace.
|
|
|
|
|
// Once it does, it will exit. See issue 3934.
|
2016-11-22 18:58:04 +01:00
|
|
|
|
if(runtime_panicking())
|
2013-07-16 08:54:42 +02:00
|
|
|
|
runtime_park(nil, nil, "panicwait");
|
|
|
|
|
|
2011-12-02 20:34:41 +01:00
|
|
|
|
runtime_exit(0);
|
|
|
|
|
for(;;)
|
|
|
|
|
*(int32*)0 = 0;
|
|
|
|
|
}
|
|
|
|
|
|
2017-01-03 23:58:48 +01:00
|
|
|
|
void getTraceback(G*, G*) __asm__(GOSYM_PREFIX "runtime.getTraceback");
|
2012-05-17 07:30:25 +02:00
|
|
|
|
|
2017-01-03 23:58:48 +01:00
|
|
|
|
// getTraceback stores a traceback of gp in the g's traceback field
|
|
|
|
|
// and then returns to me. We expect that gp's traceback is not nil.
|
|
|
|
|
// It works by saving me's current context, and checking gp's traceback field.
|
|
|
|
|
// If gp's traceback field is not nil, it starts running gp.
|
|
|
|
|
// In places where we call getcontext, we check the traceback field.
|
|
|
|
|
// If it is not nil, we collect a traceback, and then return to the
|
|
|
|
|
// goroutine stored in the traceback field, which is me.
|
|
|
|
|
void getTraceback(G* me, G* gp)
|
|
|
|
|
{
|
2012-05-17 07:30:25 +02:00
|
|
|
|
#ifdef USING_SPLIT_STACK
|
2017-01-03 23:58:48 +01:00
|
|
|
|
__splitstack_getcontext(&me->stackcontext[0]);
|
2012-05-17 07:30:25 +02:00
|
|
|
|
#endif
|
2017-01-26 20:05:16 +01:00
|
|
|
|
getcontext(ucontext_arg(&me->context[0]));
|
2012-05-17 07:30:25 +02:00
|
|
|
|
|
2017-01-03 23:58:48 +01:00
|
|
|
|
if (gp->traceback != nil) {
|
|
|
|
|
runtime_gogo(gp);
|
2013-11-06 20:49:01 +01:00
|
|
|
|
}
|
|
|
|
|
}
|
2012-05-24 23:07:18 +02:00
|
|
|
|
|
2012-05-17 07:30:25 +02:00
|
|
|
|
// Do a stack trace of gp, and then restore the context to
|
|
|
|
|
// gp->dotraceback.
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
gtraceback(G* gp)
|
|
|
|
|
{
|
2012-05-24 23:07:18 +02:00
|
|
|
|
Traceback* traceback;
|
2012-05-17 07:30:25 +02:00
|
|
|
|
|
2012-05-24 23:07:18 +02:00
|
|
|
|
traceback = gp->traceback;
|
|
|
|
|
gp->traceback = nil;
|
2016-08-30 23:07:47 +02:00
|
|
|
|
if(gp->m != nil)
|
|
|
|
|
runtime_throw("gtraceback: m is not nil");
|
|
|
|
|
gp->m = traceback->gp->m;
|
2013-01-30 23:24:40 +01:00
|
|
|
|
traceback->c = runtime_callers(1, traceback->locbuf,
|
2014-07-19 23:36:26 +02:00
|
|
|
|
sizeof traceback->locbuf / sizeof traceback->locbuf[0], false);
|
2016-08-30 23:07:47 +02:00
|
|
|
|
gp->m = nil;
|
2012-05-24 23:07:18 +02:00
|
|
|
|
runtime_gogo(traceback->gp);
|
2012-05-17 07:30:25 +02:00
|
|
|
|
}
|
|
|
|
|
|
2011-11-28 06:45:49 +01:00
|
|
|
|
// Called to start an M.
|
|
|
|
|
void*
|
|
|
|
|
runtime_mstart(void* mp)
|
|
|
|
|
{
|
2016-08-30 23:07:47 +02:00
|
|
|
|
M *m;
|
2017-01-14 01:05:42 +01:00
|
|
|
|
G *gp;
|
2016-08-30 23:07:47 +02:00
|
|
|
|
|
2011-11-28 06:45:49 +01:00
|
|
|
|
m = (M*)mp;
|
|
|
|
|
g = m->g0;
|
2016-08-30 23:07:47 +02:00
|
|
|
|
g->m = m;
|
2017-01-14 01:05:42 +01:00
|
|
|
|
gp = g;
|
2011-11-28 06:45:49 +01:00
|
|
|
|
|
2012-02-08 06:30:12 +01:00
|
|
|
|
initcontext();
|
|
|
|
|
|
2017-01-14 01:05:42 +01:00
|
|
|
|
gp->entry = nil;
|
|
|
|
|
gp->param = nil;
|
2011-11-28 06:45:49 +01:00
|
|
|
|
|
|
|
|
|
// Record top of stack for use by mcall.
|
|
|
|
|
// Once we call schedule we're never coming back,
|
|
|
|
|
// so other calls can reuse this stack space.
|
|
|
|
|
#ifdef USING_SPLIT_STACK
|
2016-08-30 23:07:47 +02:00
|
|
|
|
__splitstack_getcontext(&g->stackcontext[0]);
|
2011-11-28 06:45:49 +01:00
|
|
|
|
#else
|
2017-01-14 01:05:42 +01:00
|
|
|
|
gp->gcinitialsp = ∓
|
2016-08-30 23:07:47 +02:00
|
|
|
|
// Setting gcstacksize to 0 is a marker meaning that gcinitialsp
|
2012-02-10 16:55:37 +01:00
|
|
|
|
// is the top of the stack, not the bottom.
|
2017-01-14 01:05:42 +01:00
|
|
|
|
gp->gcstacksize = 0;
|
|
|
|
|
gp->gcnextsp = ∓
|
2011-11-28 06:45:49 +01:00
|
|
|
|
#endif
|
2017-01-14 01:05:42 +01:00
|
|
|
|
getcontext(ucontext_arg(&gp->context[0]));
|
|
|
|
|
|
|
|
|
|
if(gp->traceback != nil)
|
|
|
|
|
gtraceback(gp);
|
2011-11-28 06:45:49 +01:00
|
|
|
|
|
2017-01-14 01:05:42 +01:00
|
|
|
|
if(gp->entry != nil) {
|
2011-11-28 06:45:49 +01:00
|
|
|
|
// Got here from mcall.
|
2017-01-14 01:05:42 +01:00
|
|
|
|
void (*pfn)(G*) = (void (*)(G*))gp->entry;
|
|
|
|
|
G* gp1 = (G*)gp->param;
|
|
|
|
|
gp->entry = nil;
|
|
|
|
|
gp->param = nil;
|
|
|
|
|
pfn(gp1);
|
2011-11-28 06:45:49 +01:00
|
|
|
|
*(int*)0x21 = 0x21;
|
|
|
|
|
}
|
|
|
|
|
runtime_minit();
|
2011-12-21 23:24:47 +01:00
|
|
|
|
|
|
|
|
|
#ifdef USING_SPLIT_STACK
|
|
|
|
|
{
|
2013-07-23 22:26:09 +02:00
|
|
|
|
int dont_block_signals = 0;
|
|
|
|
|
__splitstack_block_signals(&dont_block_signals, nil);
|
2011-12-21 23:24:47 +01:00
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
2012-03-31 00:09:55 +02:00
|
|
|
|
// Install signal handlers; after minit so that minit can
|
|
|
|
|
// prepare the thread to be able to handle the signals.
|
2015-04-29 23:31:53 +02:00
|
|
|
|
if(m == &runtime_m0) {
|
2016-12-19 19:00:35 +01:00
|
|
|
|
if(runtime_iscgo) {
|
|
|
|
|
bool* cgoHasExtraM = runtime_getCgoHasExtraM();
|
|
|
|
|
if(!*cgoHasExtraM) {
|
|
|
|
|
*cgoHasExtraM = true;
|
|
|
|
|
runtime_newextram();
|
|
|
|
|
}
|
2015-04-29 23:31:53 +02:00
|
|
|
|
}
|
2016-02-12 23:10:09 +01:00
|
|
|
|
runtime_initsig(false);
|
2015-04-29 23:31:53 +02:00
|
|
|
|
}
|
2013-07-16 08:54:42 +02:00
|
|
|
|
|
|
|
|
|
if(m->mstartfn)
|
2016-08-30 23:07:47 +02:00
|
|
|
|
((void (*)(void))m->mstartfn)();
|
2012-03-31 00:09:55 +02:00
|
|
|
|
|
2013-07-16 08:54:42 +02:00
|
|
|
|
if(m->helpgc) {
|
|
|
|
|
m->helpgc = 0;
|
|
|
|
|
stopm();
|
|
|
|
|
} else if(m != &runtime_m0) {
|
2016-08-30 23:07:47 +02:00
|
|
|
|
acquirep((P*)m->nextp);
|
|
|
|
|
m->nextp = 0;
|
2013-07-16 08:54:42 +02:00
|
|
|
|
}
|
|
|
|
|
schedule();
|
2012-10-23 06:31:11 +02:00
|
|
|
|
|
|
|
|
|
// TODO(brainman): This point is never reached, because scheduler
|
|
|
|
|
// does not release os threads at the moment. But once this path
|
|
|
|
|
// is enabled, we must remove our seh here.
|
|
|
|
|
|
2011-11-28 06:45:49 +01:00
|
|
|
|
return nil;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
typedef struct CgoThreadStart CgoThreadStart;
|
|
|
|
|
struct CgoThreadStart
|
|
|
|
|
{
|
|
|
|
|
M *m;
|
|
|
|
|
G *g;
|
2014-06-07 00:37:27 +02:00
|
|
|
|
uintptr *tls;
|
2011-11-28 06:45:49 +01:00
|
|
|
|
void (*fn)(void);
|
|
|
|
|
};
|
|
|
|
|
|
2016-12-19 19:00:35 +01:00
|
|
|
|
M* runtime_allocm(P*, bool, byte**, uintptr*)
|
|
|
|
|
__asm__(GOSYM_PREFIX "runtime.allocm");
|
|
|
|
|
|
2013-07-16 08:54:42 +02:00
|
|
|
|
// Allocate a new m unassociated with any thread.
|
|
|
|
|
// Can use p for allocation context if needed.
|
2011-12-13 00:40:51 +01:00
|
|
|
|
M*
|
2016-12-19 19:00:35 +01:00
|
|
|
|
runtime_allocm(P *p, bool allocatestack, byte** ret_g0_stack, uintptr* ret_g0_stacksize)
|
2011-11-28 06:45:49 +01:00
|
|
|
|
{
|
2012-10-23 06:31:11 +02:00
|
|
|
|
M *mp;
|
2011-11-28 06:45:49 +01:00
|
|
|
|
|
2016-08-30 23:07:47 +02:00
|
|
|
|
g->m->locks++; // disable GC because it can be called from sysmon
|
|
|
|
|
if(g->m->p == 0)
|
2013-07-16 08:54:42 +02:00
|
|
|
|
acquirep(p); // temporarily borrow p for mallocs in this function
|
2012-11-21 08:03:38 +01:00
|
|
|
|
#if 0
|
|
|
|
|
if(mtype == nil) {
|
|
|
|
|
Eface e;
|
|
|
|
|
runtime_gc_m_ptr(&e);
|
|
|
|
|
mtype = ((const PtrType*)e.__type_descriptor)->__element_type;
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
mp = runtime_mal(sizeof *mp);
|
2012-10-23 06:31:11 +02:00
|
|
|
|
mcommoninit(mp);
|
2016-12-19 19:00:35 +01:00
|
|
|
|
mp->g0 = runtime_malg(allocatestack, false, ret_g0_stack, ret_g0_stacksize);
|
2016-08-30 23:07:47 +02:00
|
|
|
|
mp->g0->m = mp;
|
2011-11-28 06:45:49 +01:00
|
|
|
|
|
2016-08-30 23:07:47 +02:00
|
|
|
|
if(p == (P*)g->m->p)
|
2013-07-16 08:54:42 +02:00
|
|
|
|
releasep();
|
2016-08-30 23:07:47 +02:00
|
|
|
|
g->m->locks--;
|
2011-11-28 06:45:49 +01:00
|
|
|
|
|
2013-07-16 08:54:42 +02:00
|
|
|
|
return mp;
|
|
|
|
|
}
|
2012-06-05 08:19:19 +02:00
|
|
|
|
|
2016-12-19 19:00:35 +01:00
|
|
|
|
void setGContext(void) __asm__ (GOSYM_PREFIX "runtime.setGContext");
|
2012-06-05 08:19:19 +02:00
|
|
|
|
|
2016-12-19 19:00:35 +01:00
|
|
|
|
// setGContext sets up a new goroutine context for the current g.
|
2013-07-16 08:54:42 +02:00
|
|
|
|
void
|
2016-12-19 19:00:35 +01:00
|
|
|
|
setGContext()
|
2013-07-16 08:54:42 +02:00
|
|
|
|
{
|
2016-12-19 19:00:35 +01:00
|
|
|
|
int val;
|
2017-01-14 01:05:42 +01:00
|
|
|
|
G *gp;
|
2011-11-28 06:45:49 +01:00
|
|
|
|
|
2013-07-23 22:26:09 +02:00
|
|
|
|
initcontext();
|
2017-01-14 01:05:42 +01:00
|
|
|
|
gp = g;
|
|
|
|
|
gp->entry = nil;
|
|
|
|
|
gp->param = nil;
|
2013-07-23 22:26:09 +02:00
|
|
|
|
#ifdef USING_SPLIT_STACK
|
2017-01-14 01:05:42 +01:00
|
|
|
|
__splitstack_getcontext(&gp->stackcontext[0]);
|
2016-12-19 19:00:35 +01:00
|
|
|
|
val = 0;
|
|
|
|
|
__splitstack_block_signals(&val, nil);
|
2013-07-23 22:26:09 +02:00
|
|
|
|
#else
|
2017-01-14 01:05:42 +01:00
|
|
|
|
gp->gcinitialsp = &val;
|
|
|
|
|
gp->gcstack = nil;
|
|
|
|
|
gp->gcstacksize = 0;
|
|
|
|
|
gp->gcnextsp = &val;
|
2013-07-23 22:26:09 +02:00
|
|
|
|
#endif
|
2017-01-14 01:05:42 +01:00
|
|
|
|
getcontext(ucontext_arg(&gp->context[0]));
|
2013-07-23 22:26:09 +02:00
|
|
|
|
|
2017-01-14 01:05:42 +01:00
|
|
|
|
if(gp->entry != nil) {
|
2013-07-23 22:26:09 +02:00
|
|
|
|
// Got here from mcall.
|
2017-01-14 01:05:42 +01:00
|
|
|
|
void (*pfn)(G*) = (void (*)(G*))gp->entry;
|
|
|
|
|
G* gp1 = (G*)gp->param;
|
|
|
|
|
gp->entry = nil;
|
|
|
|
|
gp->param = nil;
|
|
|
|
|
pfn(gp1);
|
2013-07-23 22:26:09 +02:00
|
|
|
|
*(int*)0x22 = 0x22;
|
|
|
|
|
}
|
2013-07-16 08:54:42 +02:00
|
|
|
|
}
|
2013-02-28 22:56:14 +01:00
|
|
|
|
|
2016-12-19 19:00:35 +01:00
|
|
|
|
void makeGContext(G*, byte*, uintptr)
|
|
|
|
|
__asm__(GOSYM_PREFIX "runtime.makeGContext");
|
|
|
|
|
|
|
|
|
|
// makeGContext makes a new context for a g.
|
2013-07-16 08:54:42 +02:00
|
|
|
|
void
|
2016-12-19 19:00:35 +01:00
|
|
|
|
makeGContext(G* gp, byte* sp, uintptr spsize) {
|
2016-09-09 15:31:49 +02:00
|
|
|
|
ucontext_t *uc;
|
2013-02-28 22:56:14 +01:00
|
|
|
|
|
2016-12-19 19:00:35 +01:00
|
|
|
|
uc = ucontext_arg(&gp->context[0]);
|
2016-09-09 15:31:49 +02:00
|
|
|
|
getcontext(uc);
|
2016-12-19 19:00:35 +01:00
|
|
|
|
uc->uc_stack.ss_sp = sp;
|
|
|
|
|
uc->uc_stack.ss_size = (size_t)spsize;
|
2016-09-09 15:31:49 +02:00
|
|
|
|
makecontext(uc, kickoff, 0);
|
2013-07-23 22:26:09 +02:00
|
|
|
|
}
|
2013-07-16 08:54:42 +02:00
|
|
|
|
|
|
|
|
|
// Create a new m. It will start off with a call to fn, or else the scheduler.
|
2017-01-09 20:37:19 +01:00
|
|
|
|
void
|
2013-07-16 08:54:42 +02:00
|
|
|
|
newm(void(*fn)(void), P *p)
|
|
|
|
|
{
|
|
|
|
|
M *mp;
|
|
|
|
|
|
2016-12-19 19:00:35 +01:00
|
|
|
|
mp = runtime_allocm(p, false, nil, nil);
|
2016-08-30 23:07:47 +02:00
|
|
|
|
mp->nextp = (uintptr)p;
|
|
|
|
|
mp->mstartfn = (uintptr)(void*)fn;
|
2013-07-16 08:54:42 +02:00
|
|
|
|
|
|
|
|
|
runtime_newosproc(mp);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
mspinning(void)
|
|
|
|
|
{
|
2016-08-30 23:07:47 +02:00
|
|
|
|
g->m->spinning = true;
|
2013-07-16 08:54:42 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Schedules some M to run the p (creates an M if necessary).
|
2014-06-07 00:37:27 +02:00
|
|
|
|
// If p==nil, tries to get an idle P, if no idle P's does nothing.
|
2017-01-09 20:37:19 +01:00
|
|
|
|
void
|
2013-07-16 08:54:42 +02:00
|
|
|
|
startm(P *p, bool spinning)
|
|
|
|
|
{
|
|
|
|
|
M *mp;
|
|
|
|
|
void (*fn)(void);
|
|
|
|
|
|
2016-11-18 18:48:29 +01:00
|
|
|
|
runtime_lock(&runtime_sched->lock);
|
2013-07-16 08:54:42 +02:00
|
|
|
|
if(p == nil) {
|
|
|
|
|
p = pidleget();
|
|
|
|
|
if(p == nil) {
|
2016-11-18 18:48:29 +01:00
|
|
|
|
runtime_unlock(&runtime_sched->lock);
|
2013-07-16 08:54:42 +02:00
|
|
|
|
if(spinning)
|
2016-11-18 18:48:29 +01:00
|
|
|
|
runtime_xadd(&runtime_sched->nmspinning, -1);
|
2013-07-16 08:54:42 +02:00
|
|
|
|
return;
|
|
|
|
|
}
|
2011-11-28 06:45:49 +01:00
|
|
|
|
}
|
2013-07-16 08:54:42 +02:00
|
|
|
|
mp = mget();
|
2016-11-18 18:48:29 +01:00
|
|
|
|
runtime_unlock(&runtime_sched->lock);
|
2013-07-16 08:54:42 +02:00
|
|
|
|
if(mp == nil) {
|
|
|
|
|
fn = nil;
|
|
|
|
|
if(spinning)
|
|
|
|
|
fn = mspinning;
|
|
|
|
|
newm(fn, p);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
if(mp->spinning)
|
|
|
|
|
runtime_throw("startm: m is spinning");
|
|
|
|
|
if(mp->nextp)
|
|
|
|
|
runtime_throw("startm: m has p");
|
2017-01-09 20:37:19 +01:00
|
|
|
|
if(spinning && !runqempty(p)) {
|
|
|
|
|
runtime_throw("startm: p has runnable gs");
|
2013-07-16 08:54:42 +02:00
|
|
|
|
}
|
2017-01-09 20:37:19 +01:00
|
|
|
|
mp->spinning = spinning;
|
2016-08-30 23:07:47 +02:00
|
|
|
|
mp->nextp = (uintptr)p;
|
2013-07-16 08:54:42 +02:00
|
|
|
|
runtime_notewakeup(&mp->park);
|
2011-11-28 06:45:49 +01:00
|
|
|
|
}
|
|
|
|
|
|
2014-06-07 00:37:27 +02:00
|
|
|
|
// Puts the current goroutine into a waiting state and calls unlockf.
|
|
|
|
|
// If unlockf returns false, the goroutine is resumed.
|
2012-10-23 06:31:11 +02:00
|
|
|
|
void
|
2014-06-07 00:37:27 +02:00
|
|
|
|
runtime_park(bool(*unlockf)(G*, void*), void *lock, const char *reason)
|
2012-10-23 06:31:11 +02:00
|
|
|
|
{
|
2016-08-30 23:07:47 +02:00
|
|
|
|
if(g->atomicstatus != _Grunning)
|
2014-07-19 10:53:52 +02:00
|
|
|
|
runtime_throw("bad g status");
|
2016-08-30 23:07:47 +02:00
|
|
|
|
g->m->waitlock = lock;
|
|
|
|
|
g->m->waitunlockf = unlockf;
|
|
|
|
|
g->waitreason = runtime_gostringnocopy((const byte*)reason);
|
2013-07-16 08:54:42 +02:00
|
|
|
|
runtime_mcall(park0);
|
2012-10-23 06:31:11 +02:00
|
|
|
|
}
|
|
|
|
|
|
2016-10-10 18:52:09 +02:00
|
|
|
|
void gopark(FuncVal *, void *, String, byte, int)
|
|
|
|
|
__asm__ ("runtime.gopark");
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
gopark(FuncVal *unlockf, void *lock, String reason,
|
|
|
|
|
byte traceEv __attribute__ ((unused)),
|
|
|
|
|
int traceskip __attribute__ ((unused)))
|
|
|
|
|
{
|
|
|
|
|
if(g->atomicstatus != _Grunning)
|
|
|
|
|
runtime_throw("bad g status");
|
|
|
|
|
g->m->waitlock = lock;
|
|
|
|
|
g->m->waitunlockf = unlockf == nil ? nil : (void*)unlockf->fn;
|
|
|
|
|
g->waitreason = reason;
|
|
|
|
|
runtime_mcall(park0);
|
|
|
|
|
}
|
|
|
|
|
|
2014-06-07 00:37:27 +02:00
|
|
|
|
static bool
|
|
|
|
|
parkunlock(G *gp, void *lock)
|
|
|
|
|
{
|
|
|
|
|
USED(gp);
|
|
|
|
|
runtime_unlock(lock);
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Puts the current goroutine into a waiting state and unlocks the lock.
|
|
|
|
|
// The goroutine can be made runnable again by calling runtime_ready(gp).
|
|
|
|
|
void
|
|
|
|
|
runtime_parkunlock(Lock *lock, const char *reason)
|
|
|
|
|
{
|
|
|
|
|
runtime_park(parkunlock, lock, reason);
|
|
|
|
|
}
|
|
|
|
|
|
2016-10-10 18:52:09 +02:00
|
|
|
|
void goparkunlock(Lock *, String, byte, int)
|
|
|
|
|
__asm__ (GOSYM_PREFIX "runtime.goparkunlock");
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
goparkunlock(Lock *lock, String reason, byte traceEv __attribute__ ((unused)),
|
|
|
|
|
int traceskip __attribute__ ((unused)))
|
|
|
|
|
{
|
|
|
|
|
if(g->atomicstatus != _Grunning)
|
|
|
|
|
runtime_throw("bad g status");
|
|
|
|
|
g->m->waitlock = lock;
|
|
|
|
|
g->m->waitunlockf = parkunlock;
|
|
|
|
|
g->waitreason = reason;
|
|
|
|
|
runtime_mcall(park0);
|
|
|
|
|
}
|
|
|
|
|
|
2013-07-16 08:54:42 +02:00
|
|
|
|
// runtime_park continuation on g0.
|
|
|
|
|
static void
|
|
|
|
|
park0(G *gp)
|
|
|
|
|
{
|
2016-08-30 23:07:47 +02:00
|
|
|
|
M *m;
|
2014-06-07 00:37:27 +02:00
|
|
|
|
bool ok;
|
|
|
|
|
|
2016-08-30 23:07:47 +02:00
|
|
|
|
m = g->m;
|
|
|
|
|
gp->atomicstatus = _Gwaiting;
|
2013-07-16 08:54:42 +02:00
|
|
|
|
gp->m = nil;
|
|
|
|
|
m->curg = nil;
|
|
|
|
|
if(m->waitunlockf) {
|
2016-08-30 23:07:47 +02:00
|
|
|
|
ok = ((bool (*)(G*, void*))m->waitunlockf)(gp, m->waitlock);
|
2013-07-16 08:54:42 +02:00
|
|
|
|
m->waitunlockf = nil;
|
|
|
|
|
m->waitlock = nil;
|
2014-06-07 00:37:27 +02:00
|
|
|
|
if(!ok) {
|
2016-08-30 23:07:47 +02:00
|
|
|
|
gp->atomicstatus = _Grunnable;
|
2017-01-09 20:37:19 +01:00
|
|
|
|
execute(gp, true); // Schedule it back, never returns.
|
2014-06-07 00:37:27 +02:00
|
|
|
|
}
|
2013-07-16 08:54:42 +02:00
|
|
|
|
}
|
|
|
|
|
if(m->lockedg) {
|
|
|
|
|
stoplockedm();
|
2017-01-09 20:37:19 +01:00
|
|
|
|
execute(gp, true); // Never returns.
|
2013-07-16 08:54:42 +02:00
|
|
|
|
}
|
|
|
|
|
schedule();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Scheduler yield.
|
|
|
|
|
void
|
|
|
|
|
runtime_gosched(void)
|
|
|
|
|
{
|
2016-08-30 23:07:47 +02:00
|
|
|
|
if(g->atomicstatus != _Grunning)
|
2014-07-19 10:53:52 +02:00
|
|
|
|
runtime_throw("bad g status");
|
2013-11-06 20:49:01 +01:00
|
|
|
|
runtime_mcall(runtime_gosched0);
|
2013-07-16 08:54:42 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// runtime_gosched continuation on g0.
|
2013-11-06 20:49:01 +01:00
|
|
|
|
void
|
|
|
|
|
runtime_gosched0(G *gp)
|
2013-07-16 08:54:42 +02:00
|
|
|
|
{
|
2016-08-30 23:07:47 +02:00
|
|
|
|
M *m;
|
|
|
|
|
|
|
|
|
|
m = g->m;
|
|
|
|
|
gp->atomicstatus = _Grunnable;
|
2013-07-16 08:54:42 +02:00
|
|
|
|
gp->m = nil;
|
|
|
|
|
m->curg = nil;
|
2016-11-18 18:48:29 +01:00
|
|
|
|
runtime_lock(&runtime_sched->lock);
|
2013-07-16 08:54:42 +02:00
|
|
|
|
globrunqput(gp);
|
2016-11-18 18:48:29 +01:00
|
|
|
|
runtime_unlock(&runtime_sched->lock);
|
2013-07-16 08:54:42 +02:00
|
|
|
|
if(m->lockedg) {
|
|
|
|
|
stoplockedm();
|
2017-01-09 20:37:19 +01:00
|
|
|
|
execute(gp, true); // Never returns.
|
2013-07-16 08:54:42 +02:00
|
|
|
|
}
|
|
|
|
|
schedule();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Finishes execution of the current goroutine.
|
2013-11-06 20:49:01 +01:00
|
|
|
|
// Need to mark it as nosplit, because it runs with sp > stackbase (as runtime_lessstack).
|
|
|
|
|
// Since it does not return it does not matter. But if it is preempted
|
|
|
|
|
// at the split stack check, GC will complain about inconsistent sp.
|
2016-11-22 18:58:04 +01:00
|
|
|
|
void runtime_goexit1(void) __attribute__ ((noinline));
|
2013-07-16 08:54:42 +02:00
|
|
|
|
void
|
2016-11-22 18:58:04 +01:00
|
|
|
|
runtime_goexit1(void)
|
2013-07-16 08:54:42 +02:00
|
|
|
|
{
|
2016-08-30 23:07:47 +02:00
|
|
|
|
if(g->atomicstatus != _Grunning)
|
2014-07-19 10:53:52 +02:00
|
|
|
|
runtime_throw("bad g status");
|
2013-07-16 08:54:42 +02:00
|
|
|
|
runtime_mcall(goexit0);
|
|
|
|
|
}
|
|
|
|
|
|
2016-11-22 18:58:04 +01:00
|
|
|
|
// runtime_goexit1 continuation on g0.
|
2013-07-16 08:54:42 +02:00
|
|
|
|
static void
|
|
|
|
|
goexit0(G *gp)
|
|
|
|
|
{
|
2016-08-30 23:07:47 +02:00
|
|
|
|
M *m;
|
|
|
|
|
|
|
|
|
|
m = g->m;
|
|
|
|
|
gp->atomicstatus = _Gdead;
|
2013-07-16 08:54:42 +02:00
|
|
|
|
gp->entry = nil;
|
|
|
|
|
gp->m = nil;
|
|
|
|
|
gp->lockedm = nil;
|
2014-07-19 10:53:52 +02:00
|
|
|
|
gp->paniconfault = 0;
|
2016-08-30 23:07:47 +02:00
|
|
|
|
gp->_defer = nil; // should be true already but just in case.
|
|
|
|
|
gp->_panic = nil; // non-nil for Goexit during panic. points at stack-allocated data.
|
2016-10-11 01:13:39 +02:00
|
|
|
|
gp->writebuf.__values = nil;
|
|
|
|
|
gp->writebuf.__count = 0;
|
|
|
|
|
gp->writebuf.__capacity = 0;
|
2016-08-30 23:07:47 +02:00
|
|
|
|
gp->waitreason = runtime_gostringnocopy(nil);
|
2014-07-19 10:53:52 +02:00
|
|
|
|
gp->param = nil;
|
2017-01-09 20:37:19 +01:00
|
|
|
|
m->curg->m = nil;
|
2013-07-16 08:54:42 +02:00
|
|
|
|
m->curg = nil;
|
|
|
|
|
m->lockedg = nil;
|
2016-08-30 23:07:47 +02:00
|
|
|
|
if(m->locked & ~_LockExternal) {
|
2013-11-06 20:49:01 +01:00
|
|
|
|
runtime_printf("invalid m->locked = %d\n", m->locked);
|
2013-07-16 08:54:42 +02:00
|
|
|
|
runtime_throw("internal lockOSThread error");
|
|
|
|
|
}
|
|
|
|
|
m->locked = 0;
|
2016-08-30 23:07:47 +02:00
|
|
|
|
gfput((P*)m->p, gp);
|
2013-07-16 08:54:42 +02:00
|
|
|
|
schedule();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// The goroutine g is about to enter a system call.
|
|
|
|
|
// Record that it's not using the cpu anymore.
|
|
|
|
|
// This is called only from the go syscall library and cgocall,
|
|
|
|
|
// not from the low-level system calls used by the runtime.
|
|
|
|
|
//
|
|
|
|
|
// Entersyscall cannot split the stack: the runtime_gosave must
|
|
|
|
|
// make g->sched refer to the caller's stack segment, because
|
|
|
|
|
// entersyscall is going to return immediately after.
|
|
|
|
|
|
2016-09-30 15:45:08 +02:00
|
|
|
|
void runtime_entersyscall(int32) __attribute__ ((no_split_stack));
|
2016-11-10 23:53:23 +01:00
|
|
|
|
static void doentersyscall(uintptr, uintptr)
|
|
|
|
|
__attribute__ ((no_split_stack, noinline));
|
2013-07-16 08:54:42 +02:00
|
|
|
|
|
|
|
|
|
void
|
2016-09-30 15:45:08 +02:00
|
|
|
|
runtime_entersyscall(int32 dummy __attribute__ ((unused)))
|
runtime: Fix GC bug caused by Entersyscall modifying reg.
This patch fixes a rare but serious bug. The Go garbage
collector only examines Go stacks. When Go code calls a
function that is not written in Go, it first calls
syscall.Entersyscall. Entersyscall records the position of
the Go stack pointer and saves a copy of all the registers.
If the garbage collector runs while the thread is executing
the non-Go code, the garbage collector fetches the stack
pointer and registers from the saved location.
Entersyscall saves the registers using the getcontext
function. Unfortunately I didn't consider the possibility
that Entersyscall might itself change a register before
calling getcontext. This only matters for callee-saved
registers, as caller-saved registers would be visible on the
saved stack. And it only matters if Entersyscall is compiled
to save and modify a callee-saved register before it calls
getcontext. And it only matters if a garbage collection
occurs while the non-Go code is executing. And it only
matters if the only copy of a valid Go pointer happens to be
in the callee-saved register when Entersyscall is called.
When all those conditions are true, the Go pointer might get
collected incorrectly, leading to memory corruption.
This patch tries to avoid the problem by splitting
Entersyscall into two functions. The first is a simple
function that just calls getcontext and then calls the rest of
Entersyscall. This should fix the problem, provided the
simple Entersyscall function does not itself modify any
callee-saved registers before calling getcontext. That seems
to be true on the systems I checked. But since the argument
to getcontext is an offset from a TLS variable, it won't be
true on a system which needs to save callee-saved registers in
order to get the address of a TLS variable. I don't know why
any system would work that way, but I don't know how to rule
it out. I think that on any such system this will have to be
implemented in assembler. I can't put the ucontext_t
structure on the stack, because this function can not split
stacks, and the ucontext_t structure is large enough that it
could cause a stack overflow.
From-SVN: r208390
2014-03-07 06:04:37 +01:00
|
|
|
|
{
|
|
|
|
|
// Save the registers in the g structure so that any pointers
|
|
|
|
|
// held in registers will be seen by the garbage collector.
|
2016-09-09 15:31:49 +02:00
|
|
|
|
getcontext(ucontext_arg(&g->gcregs[0]));
|
runtime: Fix GC bug caused by Entersyscall modifying reg.
This patch fixes a rare but serious bug. The Go garbage
collector only examines Go stacks. When Go code calls a
function that is not written in Go, it first calls
syscall.Entersyscall. Entersyscall records the position of
the Go stack pointer and saves a copy of all the registers.
If the garbage collector runs while the thread is executing
the non-Go code, the garbage collector fetches the stack
pointer and registers from the saved location.
Entersyscall saves the registers using the getcontext
function. Unfortunately I didn't consider the possibility
that Entersyscall might itself change a register before
calling getcontext. This only matters for callee-saved
registers, as caller-saved registers would be visible on the
saved stack. And it only matters if Entersyscall is compiled
to save and modify a callee-saved register before it calls
getcontext. And it only matters if a garbage collection
occurs while the non-Go code is executing. And it only
matters if the only copy of a valid Go pointer happens to be
in the callee-saved register when Entersyscall is called.
When all those conditions are true, the Go pointer might get
collected incorrectly, leading to memory corruption.
This patch tries to avoid the problem by splitting
Entersyscall into two functions. The first is a simple
function that just calls getcontext and then calls the rest of
Entersyscall. This should fix the problem, provided the
simple Entersyscall function does not itself modify any
callee-saved registers before calling getcontext. That seems
to be true on the systems I checked. But since the argument
to getcontext is an offset from a TLS variable, it won't be
true on a system which needs to save callee-saved registers in
order to get the address of a TLS variable. I don't know why
any system would work that way, but I don't know how to rule
it out. I think that on any such system this will have to be
implemented in assembler. I can't put the ucontext_t
structure on the stack, because this function can not split
stacks, and the ucontext_t structure is large enough that it
could cause a stack overflow.
From-SVN: r208390
2014-03-07 06:04:37 +01:00
|
|
|
|
|
|
|
|
|
// Do the work in a separate function, so that this function
|
|
|
|
|
// doesn't save any registers on its own stack. If this
|
|
|
|
|
// function does save any registers, we might store the wrong
|
|
|
|
|
// value in the call to getcontext.
|
|
|
|
|
//
|
|
|
|
|
// FIXME: This assumes that we do not need to save any
|
|
|
|
|
// callee-saved registers to access the TLS variable g. We
|
|
|
|
|
// don't want to put the ucontext_t on the stack because it is
|
|
|
|
|
// large and we can not split the stack here.
|
2016-11-10 23:53:23 +01:00
|
|
|
|
doentersyscall((uintptr)runtime_getcallerpc(&dummy),
|
|
|
|
|
(uintptr)runtime_getcallersp(&dummy));
|
runtime: Fix GC bug caused by Entersyscall modifying reg.
This patch fixes a rare but serious bug. The Go garbage
collector only examines Go stacks. When Go code calls a
function that is not written in Go, it first calls
syscall.Entersyscall. Entersyscall records the position of
the Go stack pointer and saves a copy of all the registers.
If the garbage collector runs while the thread is executing
the non-Go code, the garbage collector fetches the stack
pointer and registers from the saved location.
Entersyscall saves the registers using the getcontext
function. Unfortunately I didn't consider the possibility
that Entersyscall might itself change a register before
calling getcontext. This only matters for callee-saved
registers, as caller-saved registers would be visible on the
saved stack. And it only matters if Entersyscall is compiled
to save and modify a callee-saved register before it calls
getcontext. And it only matters if a garbage collection
occurs while the non-Go code is executing. And it only
matters if the only copy of a valid Go pointer happens to be
in the callee-saved register when Entersyscall is called.
When all those conditions are true, the Go pointer might get
collected incorrectly, leading to memory corruption.
This patch tries to avoid the problem by splitting
Entersyscall into two functions. The first is a simple
function that just calls getcontext and then calls the rest of
Entersyscall. This should fix the problem, provided the
simple Entersyscall function does not itself modify any
callee-saved registers before calling getcontext. That seems
to be true on the systems I checked. But since the argument
to getcontext is an offset from a TLS variable, it won't be
true on a system which needs to save callee-saved registers in
order to get the address of a TLS variable. I don't know why
any system would work that way, but I don't know how to rule
it out. I think that on any such system this will have to be
implemented in assembler. I can't put the ucontext_t
structure on the stack, because this function can not split
stacks, and the ucontext_t structure is large enough that it
could cause a stack overflow.
From-SVN: r208390
2014-03-07 06:04:37 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2016-11-10 23:53:23 +01:00
|
|
|
|
doentersyscall(uintptr pc, uintptr sp)
|
2013-07-16 08:54:42 +02:00
|
|
|
|
{
|
2016-08-30 23:07:47 +02:00
|
|
|
|
// Disable preemption because during this function g is in _Gsyscall status,
|
2013-11-06 20:49:01 +01:00
|
|
|
|
// but can have inconsistent g->sched, do not let GC observe it.
|
2016-08-30 23:07:47 +02:00
|
|
|
|
g->m->locks++;
|
2013-07-16 08:54:42 +02:00
|
|
|
|
|
2013-11-06 20:49:01 +01:00
|
|
|
|
// Leave SP around for GC and traceback.
|
2013-07-16 08:54:42 +02:00
|
|
|
|
#ifdef USING_SPLIT_STACK
|
2016-09-20 18:48:19 +02:00
|
|
|
|
{
|
|
|
|
|
size_t gcstacksize;
|
|
|
|
|
g->gcstack = __splitstack_find(nil, nil, &gcstacksize,
|
|
|
|
|
&g->gcnextsegment, &g->gcnextsp,
|
|
|
|
|
&g->gcinitialsp);
|
|
|
|
|
g->gcstacksize = (uintptr)gcstacksize;
|
|
|
|
|
}
|
2013-07-16 08:54:42 +02:00
|
|
|
|
#else
|
2013-07-16 17:44:54 +02:00
|
|
|
|
{
|
2014-06-13 15:50:13 +02:00
|
|
|
|
void *v;
|
2013-07-16 17:44:54 +02:00
|
|
|
|
|
2016-08-30 23:07:47 +02:00
|
|
|
|
g->gcnextsp = (byte *) &v;
|
2013-07-16 17:44:54 +02:00
|
|
|
|
}
|
2013-07-16 08:54:42 +02:00
|
|
|
|
#endif
|
|
|
|
|
|
2016-11-10 23:53:23 +01:00
|
|
|
|
g->syscallsp = sp;
|
|
|
|
|
g->syscallpc = pc;
|
|
|
|
|
|
2016-08-30 23:07:47 +02:00
|
|
|
|
g->atomicstatus = _Gsyscall;
|
2013-07-16 08:54:42 +02:00
|
|
|
|
|
2016-11-18 18:48:29 +01:00
|
|
|
|
if(runtime_atomicload(&runtime_sched->sysmonwait)) { // TODO: fast atomic
|
|
|
|
|
runtime_lock(&runtime_sched->lock);
|
|
|
|
|
if(runtime_atomicload(&runtime_sched->sysmonwait)) {
|
|
|
|
|
runtime_atomicstore(&runtime_sched->sysmonwait, 0);
|
|
|
|
|
runtime_notewakeup(&runtime_sched->sysmonnote);
|
2013-07-16 08:54:42 +02:00
|
|
|
|
}
|
2016-11-18 18:48:29 +01:00
|
|
|
|
runtime_unlock(&runtime_sched->lock);
|
2013-07-16 08:54:42 +02:00
|
|
|
|
}
|
|
|
|
|
|
2016-08-30 23:07:47 +02:00
|
|
|
|
g->m->mcache = nil;
|
|
|
|
|
((P*)(g->m->p))->m = 0;
|
|
|
|
|
runtime_atomicstore(&((P*)g->m->p)->status, _Psyscall);
|
2016-11-18 18:48:29 +01:00
|
|
|
|
if(runtime_atomicload(&runtime_sched->gcwaiting)) {
|
|
|
|
|
runtime_lock(&runtime_sched->lock);
|
|
|
|
|
if (runtime_sched->stopwait > 0 && runtime_cas(&((P*)g->m->p)->status, _Psyscall, _Pgcstop)) {
|
|
|
|
|
if(--runtime_sched->stopwait == 0)
|
|
|
|
|
runtime_notewakeup(&runtime_sched->stopnote);
|
2013-07-16 08:54:42 +02:00
|
|
|
|
}
|
2016-11-18 18:48:29 +01:00
|
|
|
|
runtime_unlock(&runtime_sched->lock);
|
2013-07-16 08:54:42 +02:00
|
|
|
|
}
|
2013-11-06 20:49:01 +01:00
|
|
|
|
|
2016-08-30 23:07:47 +02:00
|
|
|
|
g->m->locks--;
|
2013-07-16 08:54:42 +02:00
|
|
|
|
}
|
2011-11-28 06:45:49 +01:00
|
|
|
|
|
2013-07-16 08:54:42 +02:00
|
|
|
|
// The same as runtime_entersyscall(), but with a hint that the syscall is blocking.
|
2011-11-28 06:45:49 +01:00
|
|
|
|
void
|
2016-09-30 15:45:08 +02:00
|
|
|
|
runtime_entersyscallblock(int32 dummy __attribute__ ((unused)))
|
2011-11-28 06:45:49 +01:00
|
|
|
|
{
|
2013-07-16 08:54:42 +02:00
|
|
|
|
P *p;
|
2011-11-28 06:45:49 +01:00
|
|
|
|
|
2016-08-30 23:07:47 +02:00
|
|
|
|
g->m->locks++; // see comment in entersyscall
|
2012-03-06 18:57:23 +01:00
|
|
|
|
|
2013-11-06 20:49:01 +01:00
|
|
|
|
// Leave SP around for GC and traceback.
|
2011-11-28 06:45:49 +01:00
|
|
|
|
#ifdef USING_SPLIT_STACK
|
2016-09-20 18:48:19 +02:00
|
|
|
|
{
|
|
|
|
|
size_t gcstacksize;
|
|
|
|
|
g->gcstack = __splitstack_find(nil, nil, &gcstacksize,
|
|
|
|
|
&g->gcnextsegment, &g->gcnextsp,
|
|
|
|
|
&g->gcinitialsp);
|
|
|
|
|
g->gcstacksize = (uintptr)gcstacksize;
|
|
|
|
|
}
|
2011-11-28 06:45:49 +01:00
|
|
|
|
#else
|
2016-08-30 23:07:47 +02:00
|
|
|
|
g->gcnextsp = (byte *) &p;
|
2011-11-28 06:45:49 +01:00
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
// Save the registers in the g structure so that any pointers
|
|
|
|
|
// held in registers will be seen by the garbage collector.
|
2016-09-09 15:31:49 +02:00
|
|
|
|
getcontext(ucontext_arg(&g->gcregs[0]));
|
2011-11-28 06:45:49 +01:00
|
|
|
|
|
2016-11-10 23:53:23 +01:00
|
|
|
|
g->syscallpc = (uintptr)runtime_getcallerpc(&dummy);
|
|
|
|
|
g->syscallsp = (uintptr)runtime_getcallersp(&dummy);
|
|
|
|
|
|
2016-08-30 23:07:47 +02:00
|
|
|
|
g->atomicstatus = _Gsyscall;
|
2011-11-28 06:45:49 +01:00
|
|
|
|
|
2013-07-16 08:54:42 +02:00
|
|
|
|
p = releasep();
|
|
|
|
|
handoffp(p);
|
|
|
|
|
if(g->isbackground) // do not consider blocked scavenger for deadlock detection
|
2013-11-06 20:49:01 +01:00
|
|
|
|
incidlelocked(1);
|
|
|
|
|
|
2016-08-30 23:07:47 +02:00
|
|
|
|
g->m->locks--;
|
2011-11-28 06:45:49 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// The goroutine g exited its system call.
|
|
|
|
|
// Arrange for it to run on a cpu again.
|
|
|
|
|
// This is called only from the go syscall library, not
|
|
|
|
|
// from the low-level system calls used by the runtime.
|
|
|
|
|
void
|
2016-09-30 15:45:08 +02:00
|
|
|
|
runtime_exitsyscall(int32 dummy __attribute__ ((unused)))
|
2011-11-28 06:45:49 +01:00
|
|
|
|
{
|
|
|
|
|
G *gp;
|
2013-07-16 08:54:42 +02:00
|
|
|
|
|
2011-11-28 06:45:49 +01:00
|
|
|
|
gp = g;
|
2016-08-30 23:07:47 +02:00
|
|
|
|
gp->m->locks++; // see comment in entersyscall
|
|
|
|
|
|
2013-11-06 20:49:01 +01:00
|
|
|
|
if(gp->isbackground) // do not consider blocked scavenger for deadlock detection
|
|
|
|
|
incidlelocked(-1);
|
|
|
|
|
|
2016-08-30 23:07:47 +02:00
|
|
|
|
gp->waitsince = 0;
|
2013-11-06 20:49:01 +01:00
|
|
|
|
if(exitsyscallfast()) {
|
2011-11-28 06:45:49 +01:00
|
|
|
|
// There's a cpu for us, so we can run.
|
2016-08-30 23:07:47 +02:00
|
|
|
|
((P*)gp->m->p)->syscalltick++;
|
|
|
|
|
gp->atomicstatus = _Grunning;
|
2011-11-28 06:45:49 +01:00
|
|
|
|
// Garbage collector isn't running (since we are),
|
2013-07-16 08:54:42 +02:00
|
|
|
|
// so okay to clear gcstack and gcsp.
|
2011-11-28 06:45:49 +01:00
|
|
|
|
#ifdef USING_SPLIT_STACK
|
|
|
|
|
gp->gcstack = nil;
|
|
|
|
|
#endif
|
2016-08-30 23:07:47 +02:00
|
|
|
|
gp->gcnextsp = nil;
|
|
|
|
|
runtime_memclr(&gp->gcregs[0], sizeof gp->gcregs);
|
2016-11-10 23:53:23 +01:00
|
|
|
|
gp->syscallsp = 0;
|
2016-08-30 23:07:47 +02:00
|
|
|
|
gp->m->locks--;
|
2011-11-28 06:45:49 +01:00
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2016-08-30 23:07:47 +02:00
|
|
|
|
gp->m->locks--;
|
2011-11-28 06:45:49 +01:00
|
|
|
|
|
2013-07-16 08:54:42 +02:00
|
|
|
|
// Call the scheduler.
|
|
|
|
|
runtime_mcall(exitsyscall0);
|
2011-11-28 06:45:49 +01:00
|
|
|
|
|
2013-07-16 08:54:42 +02:00
|
|
|
|
// Scheduler returned, so we're allowed to run now.
|
2011-11-28 06:45:49 +01:00
|
|
|
|
// Delete the gcstack information that we left for
|
|
|
|
|
// the garbage collector during the system call.
|
|
|
|
|
// Must wait until now because until gosched returns
|
|
|
|
|
// we don't know for sure that the garbage collector
|
|
|
|
|
// is not running.
|
|
|
|
|
#ifdef USING_SPLIT_STACK
|
|
|
|
|
gp->gcstack = nil;
|
|
|
|
|
#endif
|
2016-08-30 23:07:47 +02:00
|
|
|
|
gp->gcnextsp = nil;
|
|
|
|
|
runtime_memclr(&gp->gcregs[0], sizeof gp->gcregs);
|
2013-11-15 18:20:25 +01:00
|
|
|
|
|
2016-11-10 23:53:23 +01:00
|
|
|
|
gp->syscallsp = 0;
|
|
|
|
|
|
2016-08-30 23:07:47 +02:00
|
|
|
|
// Note that this gp->m might be different than the earlier
|
|
|
|
|
// gp->m after returning from runtime_mcall.
|
|
|
|
|
((P*)gp->m->p)->syscalltick++;
|
2013-11-06 20:49:01 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool
|
|
|
|
|
exitsyscallfast(void)
|
|
|
|
|
{
|
2016-08-30 23:07:47 +02:00
|
|
|
|
G *gp;
|
2013-11-06 20:49:01 +01:00
|
|
|
|
P *p;
|
|
|
|
|
|
2016-08-30 23:07:47 +02:00
|
|
|
|
gp = g;
|
|
|
|
|
|
2013-11-06 20:49:01 +01:00
|
|
|
|
// Freezetheworld sets stopwait but does not retake P's.
|
2016-11-18 18:48:29 +01:00
|
|
|
|
if(runtime_sched->stopwait) {
|
2016-08-30 23:07:47 +02:00
|
|
|
|
gp->m->p = 0;
|
2013-11-06 20:49:01 +01:00
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Try to re-acquire the last P.
|
2016-08-30 23:07:47 +02:00
|
|
|
|
if(gp->m->p && ((P*)gp->m->p)->status == _Psyscall && runtime_cas(&((P*)gp->m->p)->status, _Psyscall, _Prunning)) {
|
2013-11-06 20:49:01 +01:00
|
|
|
|
// There's a cpu for us, so we can run.
|
2016-08-30 23:07:47 +02:00
|
|
|
|
gp->m->mcache = ((P*)gp->m->p)->mcache;
|
|
|
|
|
((P*)gp->m->p)->m = (uintptr)gp->m;
|
2013-11-06 20:49:01 +01:00
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
// Try to get any other idle P.
|
2016-08-30 23:07:47 +02:00
|
|
|
|
gp->m->p = 0;
|
2016-11-18 18:48:29 +01:00
|
|
|
|
if(runtime_sched->pidle) {
|
|
|
|
|
runtime_lock(&runtime_sched->lock);
|
2013-11-06 20:49:01 +01:00
|
|
|
|
p = pidleget();
|
2016-11-18 18:48:29 +01:00
|
|
|
|
if(p && runtime_atomicload(&runtime_sched->sysmonwait)) {
|
|
|
|
|
runtime_atomicstore(&runtime_sched->sysmonwait, 0);
|
|
|
|
|
runtime_notewakeup(&runtime_sched->sysmonnote);
|
2013-11-06 20:49:01 +01:00
|
|
|
|
}
|
2016-11-18 18:48:29 +01:00
|
|
|
|
runtime_unlock(&runtime_sched->lock);
|
2013-11-06 20:49:01 +01:00
|
|
|
|
if(p) {
|
|
|
|
|
acquirep(p);
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return false;
|
2011-11-28 06:45:49 +01:00
|
|
|
|
}
|
|
|
|
|
|
2013-07-16 08:54:42 +02:00
|
|
|
|
// runtime_exitsyscall slow path on g0.
|
|
|
|
|
// Failed to acquire P, enqueue gp as runnable.
|
|
|
|
|
static void
|
|
|
|
|
exitsyscall0(G *gp)
|
|
|
|
|
{
|
2016-08-30 23:07:47 +02:00
|
|
|
|
M *m;
|
2013-07-16 08:54:42 +02:00
|
|
|
|
P *p;
|
|
|
|
|
|
2016-08-30 23:07:47 +02:00
|
|
|
|
m = g->m;
|
|
|
|
|
gp->atomicstatus = _Grunnable;
|
2013-07-16 08:54:42 +02:00
|
|
|
|
gp->m = nil;
|
|
|
|
|
m->curg = nil;
|
2016-11-18 18:48:29 +01:00
|
|
|
|
runtime_lock(&runtime_sched->lock);
|
2013-07-16 08:54:42 +02:00
|
|
|
|
p = pidleget();
|
|
|
|
|
if(p == nil)
|
|
|
|
|
globrunqput(gp);
|
2016-11-18 18:48:29 +01:00
|
|
|
|
else if(runtime_atomicload(&runtime_sched->sysmonwait)) {
|
|
|
|
|
runtime_atomicstore(&runtime_sched->sysmonwait, 0);
|
|
|
|
|
runtime_notewakeup(&runtime_sched->sysmonnote);
|
2013-11-06 20:49:01 +01:00
|
|
|
|
}
|
2016-11-18 18:48:29 +01:00
|
|
|
|
runtime_unlock(&runtime_sched->lock);
|
2013-07-16 08:54:42 +02:00
|
|
|
|
if(p) {
|
|
|
|
|
acquirep(p);
|
2017-01-09 20:37:19 +01:00
|
|
|
|
execute(gp, false); // Never returns.
|
2013-07-16 08:54:42 +02:00
|
|
|
|
}
|
|
|
|
|
if(m->lockedg) {
|
|
|
|
|
// Wait until another thread schedules gp and so m again.
|
|
|
|
|
stoplockedm();
|
2017-01-09 20:37:19 +01:00
|
|
|
|
execute(gp, false); // Never returns.
|
2013-07-16 08:54:42 +02:00
|
|
|
|
}
|
|
|
|
|
stopm();
|
|
|
|
|
schedule(); // Never returns.
|
|
|
|
|
}
|
|
|
|
|
|
2016-09-30 15:45:08 +02:00
|
|
|
|
void syscall_entersyscall(void)
|
|
|
|
|
__asm__(GOSYM_PREFIX "syscall.Entersyscall");
|
|
|
|
|
|
|
|
|
|
void syscall_entersyscall(void) __attribute__ ((no_split_stack));
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
syscall_entersyscall()
|
|
|
|
|
{
|
|
|
|
|
runtime_entersyscall(0);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void syscall_exitsyscall(void)
|
|
|
|
|
__asm__(GOSYM_PREFIX "syscall.Exitsyscall");
|
|
|
|
|
|
|
|
|
|
void syscall_exitsyscall(void) __attribute__ ((no_split_stack));
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
syscall_exitsyscall()
|
|
|
|
|
{
|
|
|
|
|
runtime_exitsyscall(0);
|
|
|
|
|
}
|
|
|
|
|
|
2011-12-13 00:40:51 +01:00
|
|
|
|
// Allocate a new g, with a stack big enough for stacksize bytes.
|
2011-11-28 06:45:49 +01:00
|
|
|
|
G*
|
2016-12-19 19:00:35 +01:00
|
|
|
|
runtime_malg(bool allocatestack, bool signalstack, byte** ret_stack, uintptr* ret_stacksize)
|
2011-11-28 06:45:49 +01:00
|
|
|
|
{
|
2016-12-19 19:00:35 +01:00
|
|
|
|
uintptr stacksize;
|
2011-11-28 06:45:49 +01:00
|
|
|
|
G *newg;
|
2016-12-19 19:00:35 +01:00
|
|
|
|
byte* unused_stack;
|
|
|
|
|
uintptr unused_stacksize;
|
|
|
|
|
#if USING_SPLIT_STACK
|
|
|
|
|
int dont_block_signals = 0;
|
|
|
|
|
size_t ss_stacksize;
|
|
|
|
|
#endif
|
2011-11-28 06:45:49 +01:00
|
|
|
|
|
2016-12-19 19:00:35 +01:00
|
|
|
|
if (ret_stack == nil) {
|
|
|
|
|
ret_stack = &unused_stack;
|
|
|
|
|
}
|
|
|
|
|
if (ret_stacksize == nil) {
|
|
|
|
|
ret_stacksize = &unused_stacksize;
|
|
|
|
|
}
|
2014-07-19 10:53:52 +02:00
|
|
|
|
newg = allocg();
|
2016-12-19 19:00:35 +01:00
|
|
|
|
if(allocatestack) {
|
|
|
|
|
stacksize = StackMin;
|
|
|
|
|
if(signalstack) {
|
|
|
|
|
stacksize = 32 * 1024; // OS X wants >= 8K, GNU/Linux >= 2K
|
|
|
|
|
#ifdef SIGSTKSZ
|
|
|
|
|
if(stacksize < SIGSTKSZ)
|
|
|
|
|
stacksize = SIGSTKSZ;
|
|
|
|
|
#endif
|
|
|
|
|
}
|
2011-12-21 23:24:47 +01:00
|
|
|
|
|
2016-12-19 19:00:35 +01:00
|
|
|
|
#if USING_SPLIT_STACK
|
2011-11-28 06:45:49 +01:00
|
|
|
|
*ret_stack = __splitstack_makecontext(stacksize,
|
2016-08-30 23:07:47 +02:00
|
|
|
|
&newg->stackcontext[0],
|
|
|
|
|
&ss_stacksize);
|
|
|
|
|
*ret_stacksize = (uintptr)ss_stacksize;
|
|
|
|
|
__splitstack_block_signals_context(&newg->stackcontext[0],
|
2011-12-21 23:24:47 +01:00
|
|
|
|
&dont_block_signals, nil);
|
2011-11-28 06:45:49 +01:00
|
|
|
|
#else
|
runtime: If no split stacks, allocate stacks using mmap on 64-bit systems.
When not using split stacks, libgo allocate large stacks for each
goroutine. On a 64-bit system, libgo allocates a maximum of 128G for
the Go heap, and allocates 4M for each stack. When the stacks are
allocated from the Go heap, the result is that a program can only create
32K goroutines, which is not enough for an active Go server. This patch
changes libgo to allocate the stacks using mmap directly, rather than
allocating them out of the Go heap. This change is only done for 64-bit
systems when not using split stacks. When using split stacks, the
stacks are allocated using mmap directly anyhow. On a 32-bit system,
there is no maximum size for the Go heap, or, rather, the maximum size
is the available address space anyhow.
Reviewed-on: https://go-review.googlesource.com/16531
From-SVN: r229636
2015-11-01 00:48:19 +01:00
|
|
|
|
// In 64-bit mode, the maximum Go allocation space is
|
|
|
|
|
// 128G. Our stack size is 4M, which only permits 32K
|
|
|
|
|
// goroutines. In order to not limit ourselves,
|
|
|
|
|
// allocate the stacks out of separate memory. In
|
|
|
|
|
// 32-bit mode, the Go allocation space is all of
|
|
|
|
|
// memory anyhow.
|
|
|
|
|
if(sizeof(void*) == 8) {
|
2016-10-13 17:24:50 +02:00
|
|
|
|
void *p = runtime_SysAlloc(stacksize, &mstats()->other_sys);
|
runtime: If no split stacks, allocate stacks using mmap on 64-bit systems.
When not using split stacks, libgo allocate large stacks for each
goroutine. On a 64-bit system, libgo allocates a maximum of 128G for
the Go heap, and allocates 4M for each stack. When the stacks are
allocated from the Go heap, the result is that a program can only create
32K goroutines, which is not enough for an active Go server. This patch
changes libgo to allocate the stacks using mmap directly, rather than
allocating them out of the Go heap. This change is only done for 64-bit
systems when not using split stacks. When using split stacks, the
stacks are allocated using mmap directly anyhow. On a 32-bit system,
there is no maximum size for the Go heap, or, rather, the maximum size
is the available address space anyhow.
Reviewed-on: https://go-review.googlesource.com/16531
From-SVN: r229636
2015-11-01 00:48:19 +01:00
|
|
|
|
if(p == nil)
|
|
|
|
|
runtime_throw("runtime: cannot allocate memory for goroutine stack");
|
|
|
|
|
*ret_stack = (byte*)p;
|
|
|
|
|
} else {
|
|
|
|
|
*ret_stack = runtime_mallocgc(stacksize, 0, FlagNoProfiling|FlagNoGC);
|
|
|
|
|
runtime_xadd(&runtime_stacks_sys, stacksize);
|
|
|
|
|
}
|
2016-08-30 23:07:47 +02:00
|
|
|
|
*ret_stacksize = (uintptr)stacksize;
|
|
|
|
|
newg->gcinitialsp = *ret_stack;
|
|
|
|
|
newg->gcstacksize = (uintptr)stacksize;
|
2011-11-28 06:45:49 +01:00
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
return newg;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
G*
|
|
|
|
|
__go_go(void (*fn)(void*), void* arg)
|
|
|
|
|
{
|
|
|
|
|
byte *sp;
|
|
|
|
|
size_t spsize;
|
2012-05-15 20:56:48 +02:00
|
|
|
|
G *newg;
|
2014-06-07 00:37:27 +02:00
|
|
|
|
P *p;
|
2012-10-23 06:31:11 +02:00
|
|
|
|
|
2013-11-06 20:49:01 +01:00
|
|
|
|
//runtime_printf("newproc1 %p %p narg=%d nret=%d\n", fn->fn, argp, narg, nret);
|
2014-07-19 10:53:52 +02:00
|
|
|
|
if(fn == nil) {
|
2016-08-30 23:07:47 +02:00
|
|
|
|
g->m->throwing = -1; // do not dump full stacks
|
2014-07-19 10:53:52 +02:00
|
|
|
|
runtime_throw("go of nil func value");
|
|
|
|
|
}
|
2016-08-30 23:07:47 +02:00
|
|
|
|
g->m->locks++; // disable preemption because it can be holding p in a local var
|
2011-11-28 06:45:49 +01:00
|
|
|
|
|
2016-08-30 23:07:47 +02:00
|
|
|
|
p = (P*)g->m->p;
|
2014-06-07 00:37:27 +02:00
|
|
|
|
if((newg = gfget(p)) != nil) {
|
2011-11-28 06:45:49 +01:00
|
|
|
|
#ifdef USING_SPLIT_STACK
|
2011-12-21 23:24:47 +01:00
|
|
|
|
int dont_block_signals = 0;
|
|
|
|
|
|
2016-08-30 23:07:47 +02:00
|
|
|
|
sp = __splitstack_resetcontext(&newg->stackcontext[0],
|
2011-11-28 06:45:49 +01:00
|
|
|
|
&spsize);
|
2016-08-30 23:07:47 +02:00
|
|
|
|
__splitstack_block_signals_context(&newg->stackcontext[0],
|
2011-12-21 23:24:47 +01:00
|
|
|
|
&dont_block_signals, nil);
|
2011-11-28 06:45:49 +01:00
|
|
|
|
#else
|
2016-08-30 23:07:47 +02:00
|
|
|
|
sp = newg->gcinitialsp;
|
|
|
|
|
spsize = newg->gcstacksize;
|
2012-02-10 16:55:37 +01:00
|
|
|
|
if(spsize == 0)
|
|
|
|
|
runtime_throw("bad spsize in __go_go");
|
2016-08-30 23:07:47 +02:00
|
|
|
|
newg->gcnextsp = sp;
|
2011-11-28 06:45:49 +01:00
|
|
|
|
#endif
|
2017-01-14 01:05:42 +01:00
|
|
|
|
newg->traceback = nil;
|
2011-11-28 06:45:49 +01:00
|
|
|
|
} else {
|
2016-08-30 23:07:47 +02:00
|
|
|
|
uintptr malsize;
|
|
|
|
|
|
2016-12-19 19:00:35 +01:00
|
|
|
|
newg = runtime_malg(true, false, &sp, &malsize);
|
2016-08-30 23:07:47 +02:00
|
|
|
|
spsize = (size_t)malsize;
|
2017-01-03 23:58:48 +01:00
|
|
|
|
newg->atomicstatus = _Gdead;
|
2014-06-07 00:37:27 +02:00
|
|
|
|
allgadd(newg);
|
2011-11-28 06:45:49 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
newg->entry = (byte*)fn;
|
|
|
|
|
newg->param = arg;
|
|
|
|
|
newg->gopc = (uintptr)__builtin_return_address(0);
|
2016-08-30 23:07:47 +02:00
|
|
|
|
newg->atomicstatus = _Grunnable;
|
2014-06-07 00:37:27 +02:00
|
|
|
|
if(p->goidcache == p->goidcacheend) {
|
2016-11-18 18:48:29 +01:00
|
|
|
|
p->goidcache = runtime_xadd64(&runtime_sched->goidgen, GoidCacheBatch);
|
2014-06-07 00:37:27 +02:00
|
|
|
|
p->goidcacheend = p->goidcache + GoidCacheBatch;
|
|
|
|
|
}
|
|
|
|
|
newg->goid = p->goidcache++;
|
2011-11-28 06:45:49 +01:00
|
|
|
|
|
2016-12-19 19:00:35 +01:00
|
|
|
|
makeGContext(newg, sp, (uintptr)spsize);
|
2011-11-28 06:45:49 +01:00
|
|
|
|
|
2017-01-09 20:37:19 +01:00
|
|
|
|
runqput(p, newg, true);
|
2011-11-28 06:45:49 +01:00
|
|
|
|
|
2016-12-19 19:00:35 +01:00
|
|
|
|
if(runtime_atomicload(&runtime_sched->npidle) != 0 && runtime_atomicload(&runtime_sched->nmspinning) == 0 && fn != runtime_main) // TODO: fast atomic
|
|
|
|
|
wakep();
|
|
|
|
|
g->m->locks--;
|
|
|
|
|
return newg;
|
2011-11-28 06:45:49 +01:00
|
|
|
|
}
|
|
|
|
|
|
2013-07-16 08:54:42 +02:00
|
|
|
|
void
|
|
|
|
|
runtime_Breakpoint(void)
|
|
|
|
|
{
|
|
|
|
|
runtime_breakpoint();
|
|
|
|
|
}
|
|
|
|
|
|
2013-01-24 20:44:23 +01:00
|
|
|
|
void runtime_Gosched (void) __asm__ (GOSYM_PREFIX "runtime.Gosched");
|
2011-11-28 06:45:49 +01:00
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
runtime_Gosched(void)
|
|
|
|
|
{
|
|
|
|
|
runtime_gosched();
|
|
|
|
|
}
|
|
|
|
|
|
2011-03-27 21:14:55 +02:00
|
|
|
|
static struct {
|
2016-10-14 15:36:35 +02:00
|
|
|
|
uint32 lock;
|
2011-03-27 21:14:55 +02:00
|
|
|
|
int32 hz;
|
|
|
|
|
} prof;
|
|
|
|
|
|
2014-06-07 00:37:27 +02:00
|
|
|
|
static void System(void) {}
|
|
|
|
|
static void GC(void) {}
|
2013-11-06 20:49:01 +01:00
|
|
|
|
|
2011-12-13 00:40:51 +01:00
|
|
|
|
// Called if we receive a SIGPROF signal.
|
2011-03-27 21:14:55 +02:00
|
|
|
|
void
|
2012-07-26 03:57:04 +02:00
|
|
|
|
runtime_sigprof()
|
2011-03-27 21:14:55 +02:00
|
|
|
|
{
|
2016-08-30 23:07:47 +02:00
|
|
|
|
M *mp = g->m;
|
2013-01-30 23:24:40 +01:00
|
|
|
|
int32 n, i;
|
2013-11-06 20:49:01 +01:00
|
|
|
|
bool traceback;
|
2016-10-14 15:36:35 +02:00
|
|
|
|
uintptr pcbuf[TracebackMaxFrames];
|
|
|
|
|
Location locbuf[TracebackMaxFrames];
|
|
|
|
|
Slice stk;
|
2011-11-28 06:45:49 +01:00
|
|
|
|
|
2016-10-14 15:36:35 +02:00
|
|
|
|
if(prof.hz == 0)
|
2011-03-27 21:14:55 +02:00
|
|
|
|
return;
|
2014-06-07 00:37:27 +02:00
|
|
|
|
|
|
|
|
|
if(mp == nil)
|
|
|
|
|
return;
|
|
|
|
|
|
2014-07-19 10:53:52 +02:00
|
|
|
|
// Profiling runs concurrently with GC, so it must not allocate.
|
|
|
|
|
mp->mallocing++;
|
|
|
|
|
|
2013-11-06 20:49:01 +01:00
|
|
|
|
traceback = true;
|
2014-06-07 00:37:27 +02:00
|
|
|
|
|
|
|
|
|
if(mp->mcache == nil)
|
2013-11-06 20:49:01 +01:00
|
|
|
|
traceback = false;
|
2014-06-07 00:37:27 +02:00
|
|
|
|
|
2013-11-06 20:49:01 +01:00
|
|
|
|
n = 0;
|
2013-12-01 02:40:16 +01:00
|
|
|
|
|
|
|
|
|
if(runtime_atomicload(&runtime_in_callers) > 0) {
|
|
|
|
|
// If SIGPROF arrived while already fetching runtime
|
|
|
|
|
// callers we can have trouble on older systems
|
|
|
|
|
// because the unwind library calls dl_iterate_phdr
|
|
|
|
|
// which was not recursive in the past.
|
|
|
|
|
traceback = false;
|
|
|
|
|
}
|
|
|
|
|
|
2013-11-06 20:49:01 +01:00
|
|
|
|
if(traceback) {
|
2016-10-14 15:36:35 +02:00
|
|
|
|
n = runtime_callers(0, locbuf, nelem(locbuf), false);
|
2013-11-06 20:49:01 +01:00
|
|
|
|
for(i = 0; i < n; i++)
|
2016-10-14 15:36:35 +02:00
|
|
|
|
pcbuf[i] = locbuf[i].pc;
|
2013-11-06 20:49:01 +01:00
|
|
|
|
}
|
2014-06-07 00:37:27 +02:00
|
|
|
|
if(!traceback || n <= 0) {
|
2013-11-06 20:49:01 +01:00
|
|
|
|
n = 2;
|
2016-10-14 15:36:35 +02:00
|
|
|
|
pcbuf[0] = (uintptr)runtime_getcallerpc(&n);
|
2014-06-07 00:37:27 +02:00
|
|
|
|
if(mp->gcing || mp->helpgc)
|
2016-10-14 15:36:35 +02:00
|
|
|
|
pcbuf[1] = (uintptr)GC;
|
2014-06-07 00:37:27 +02:00
|
|
|
|
else
|
2016-10-14 15:36:35 +02:00
|
|
|
|
pcbuf[1] = (uintptr)System;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (prof.hz != 0) {
|
|
|
|
|
stk.__values = &pcbuf[0];
|
|
|
|
|
stk.__count = n;
|
|
|
|
|
stk.__capacity = n;
|
|
|
|
|
|
|
|
|
|
// Simple cas-lock to coordinate with setcpuprofilerate.
|
|
|
|
|
while (!runtime_cas(&prof.lock, 0, 1)) {
|
|
|
|
|
runtime_osyield();
|
|
|
|
|
}
|
|
|
|
|
if (prof.hz != 0) {
|
|
|
|
|
runtime_cpuprofAdd(stk);
|
|
|
|
|
}
|
|
|
|
|
runtime_atomicstore(&prof.lock, 0);
|
2013-11-06 20:49:01 +01:00
|
|
|
|
}
|
2016-10-14 15:36:35 +02:00
|
|
|
|
|
2014-06-07 00:37:27 +02:00
|
|
|
|
mp->mallocing--;
|
2011-03-27 21:14:55 +02:00
|
|
|
|
}
|
|
|
|
|
|
2011-12-13 00:40:51 +01:00
|
|
|
|
// Arrange to call fn with a traceback hz times a second.
|
2011-03-27 21:14:55 +02:00
|
|
|
|
void
|
2016-10-14 15:36:35 +02:00
|
|
|
|
runtime_setcpuprofilerate_m(int32 hz)
|
2011-03-27 21:14:55 +02:00
|
|
|
|
{
|
|
|
|
|
// Force sane arguments.
|
|
|
|
|
if(hz < 0)
|
|
|
|
|
hz = 0;
|
|
|
|
|
|
2013-11-06 20:49:01 +01:00
|
|
|
|
// Disable preemption, otherwise we can be rescheduled to another thread
|
|
|
|
|
// that has profiling enabled.
|
2016-08-30 23:07:47 +02:00
|
|
|
|
g->m->locks++;
|
2013-11-06 20:49:01 +01:00
|
|
|
|
|
|
|
|
|
// Stop profiler on this thread so that it is safe to lock prof.
|
2011-03-27 21:14:55 +02:00
|
|
|
|
// if a profiling signal came in while we had prof locked,
|
|
|
|
|
// it would deadlock.
|
|
|
|
|
runtime_resetcpuprofiler(0);
|
|
|
|
|
|
2016-10-14 15:36:35 +02:00
|
|
|
|
while (!runtime_cas(&prof.lock, 0, 1)) {
|
|
|
|
|
runtime_osyield();
|
|
|
|
|
}
|
2011-03-27 21:14:55 +02:00
|
|
|
|
prof.hz = hz;
|
2016-10-14 15:36:35 +02:00
|
|
|
|
runtime_atomicstore(&prof.lock, 0);
|
|
|
|
|
|
2016-11-18 18:48:29 +01:00
|
|
|
|
runtime_lock(&runtime_sched->lock);
|
|
|
|
|
runtime_sched->profilehz = hz;
|
|
|
|
|
runtime_unlock(&runtime_sched->lock);
|
2011-11-28 06:45:49 +01:00
|
|
|
|
|
2011-03-27 21:14:55 +02:00
|
|
|
|
if(hz != 0)
|
|
|
|
|
runtime_resetcpuprofiler(hz);
|
2013-11-06 20:49:01 +01:00
|
|
|
|
|
2016-08-30 23:07:47 +02:00
|
|
|
|
g->m->locks--;
|
2011-03-27 21:14:55 +02:00
|
|
|
|
}
|
2013-07-16 08:54:42 +02:00
|
|
|
|
|
2013-11-06 20:49:01 +01:00
|
|
|
|
// Return whether we are waiting for a GC. This gc toolchain uses
|
|
|
|
|
// preemption instead.
|
|
|
|
|
bool
|
|
|
|
|
runtime_gcwaiting(void)
|
|
|
|
|
{
|
2016-11-18 18:48:29 +01:00
|
|
|
|
return runtime_sched->gcwaiting;
|
2013-11-06 20:49:01 +01:00
|
|
|
|
}
|
2015-10-31 01:59:47 +01:00
|
|
|
|
|
|
|
|
|
// os_beforeExit is called from os.Exit(0).
|
|
|
|
|
//go:linkname os_beforeExit os.runtime_beforeExit
|
|
|
|
|
|
|
|
|
|
extern void os_beforeExit() __asm__ (GOSYM_PREFIX "os.runtime_beforeExit");
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
os_beforeExit()
|
|
|
|
|
{
|
|
|
|
|
}
|
|
|
|
|
|
2016-11-16 19:33:11 +01:00
|
|
|
|
intgo NumCPU(void) __asm__ (GOSYM_PREFIX "runtime.NumCPU");
|
|
|
|
|
|
|
|
|
|
intgo
|
|
|
|
|
NumCPU()
|
|
|
|
|
{
|
|
|
|
|
return (intgo)(runtime_ncpu);
|
|
|
|
|
}
|