runtime: copy Go 1.7 runtime semaphore code
This triggered a check in releaseSudog that g.param not nil, because libgo uses the param field when starting a goroutine. Fixed by clearing g->param in kickoff in proc.c. Reviewed-on: https://go-review.googlesource.com/30951 From-SVN: r241067
This commit is contained in:
parent
859e95abb8
commit
543f217b7a
@ -1,4 +1,4 @@
|
||||
d56717f8c434b3d6b753c027487681769e201e14
|
||||
7e4543d050339e113e6278fd442d940c0f1a5670
|
||||
|
||||
The first line of this file holds the git revision number of the last
|
||||
merge done from the gofrontend repository.
|
||||
|
@ -528,7 +528,6 @@ runtime_files = \
|
||||
rdebug.c \
|
||||
reflect.c \
|
||||
runtime1.c \
|
||||
sema.c \
|
||||
sigqueue.c \
|
||||
string.c \
|
||||
time.c \
|
||||
|
@ -265,7 +265,7 @@ am__objects_6 = go-append.lo go-assert.lo go-assert-interface.lo \
|
||||
$(am__objects_2) panic.lo parfor.lo print.lo proc.lo \
|
||||
runtime.lo signal_unix.lo thread.lo $(am__objects_3) yield.lo \
|
||||
$(am__objects_4) cpuprof.lo go-iface.lo lfstack.lo malloc.lo \
|
||||
mprof.lo netpoll.lo rdebug.lo reflect.lo runtime1.lo sema.lo \
|
||||
mprof.lo netpoll.lo rdebug.lo reflect.lo runtime1.lo \
|
||||
sigqueue.lo string.lo time.lo $(am__objects_5)
|
||||
am_libgo_llgo_la_OBJECTS = $(am__objects_6)
|
||||
libgo_llgo_la_OBJECTS = $(am_libgo_llgo_la_OBJECTS)
|
||||
@ -930,7 +930,6 @@ runtime_files = \
|
||||
rdebug.c \
|
||||
reflect.c \
|
||||
runtime1.c \
|
||||
sema.c \
|
||||
sigqueue.c \
|
||||
string.c \
|
||||
time.c \
|
||||
@ -1656,7 +1655,6 @@ distclean-compile:
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/rtems-task-variable-add.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/runtime.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/runtime1.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sema.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/signal_unix.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sigqueue.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/string.Plo@am__quote@
|
||||
|
358
libgo/go/runtime/sema.go
Normal file
358
libgo/go/runtime/sema.go
Normal file
@ -0,0 +1,358 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Semaphore implementation exposed to Go.
|
||||
// Intended use is provide a sleep and wakeup
|
||||
// primitive that can be used in the contended case
|
||||
// of other synchronization primitives.
|
||||
// Thus it targets the same goal as Linux's futex,
|
||||
// but it has much simpler semantics.
|
||||
//
|
||||
// That is, don't think of these as semaphores.
|
||||
// Think of them as a way to implement sleep and wakeup
|
||||
// such that every sleep is paired with a single wakeup,
|
||||
// even if, due to races, the wakeup happens before the sleep.
|
||||
//
|
||||
// See Mullender and Cox, ``Semaphores in Plan 9,''
|
||||
// http://swtch.com/semaphore.pdf
|
||||
|
||||
package runtime
|
||||
|
||||
// Export temporarily for gccgo's C code to call:
|
||||
//go:linkname semacquire runtime.semacquire
|
||||
//go:linkname semrelease runtime.semrelease
|
||||
|
||||
import (
|
||||
"runtime/internal/atomic"
|
||||
"runtime/internal/sys"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Asynchronous semaphore for sync.Mutex.
|
||||
|
||||
type semaRoot struct {
|
||||
lock mutex
|
||||
head *sudog
|
||||
tail *sudog
|
||||
nwait uint32 // Number of waiters. Read w/o the lock.
|
||||
}
|
||||
|
||||
// Prime to not correlate with any user patterns.
|
||||
const semTabSize = 251
|
||||
|
||||
var semtable [semTabSize]struct {
|
||||
root semaRoot
|
||||
pad [sys.CacheLineSize - unsafe.Sizeof(semaRoot{})]byte
|
||||
}
|
||||
|
||||
//go:linkname sync_runtime_Semacquire sync.runtime_Semacquire
|
||||
func sync_runtime_Semacquire(addr *uint32) {
|
||||
semacquire(addr, true)
|
||||
}
|
||||
|
||||
//go:linkname net_runtime_Semacquire net.runtime_Semacquire
|
||||
func net_runtime_Semacquire(addr *uint32) {
|
||||
semacquire(addr, true)
|
||||
}
|
||||
|
||||
//go:linkname sync_runtime_Semrelease sync.runtime_Semrelease
|
||||
func sync_runtime_Semrelease(addr *uint32) {
|
||||
semrelease(addr)
|
||||
}
|
||||
|
||||
//go:linkname net_runtime_Semrelease net.runtime_Semrelease
|
||||
func net_runtime_Semrelease(addr *uint32) {
|
||||
semrelease(addr)
|
||||
}
|
||||
|
||||
func readyWithTime(s *sudog, traceskip int) {
|
||||
if s.releasetime != 0 {
|
||||
s.releasetime = cputicks()
|
||||
}
|
||||
goready(s.g, traceskip)
|
||||
}
|
||||
|
||||
// Called from runtime.
|
||||
func semacquire(addr *uint32, profile bool) {
|
||||
gp := getg()
|
||||
if gp != gp.m.curg {
|
||||
throw("semacquire not on the G stack")
|
||||
}
|
||||
|
||||
// Easy case.
|
||||
if cansemacquire(addr) {
|
||||
return
|
||||
}
|
||||
|
||||
// Harder case:
|
||||
// increment waiter count
|
||||
// try cansemacquire one more time, return if succeeded
|
||||
// enqueue itself as a waiter
|
||||
// sleep
|
||||
// (waiter descriptor is dequeued by signaler)
|
||||
s := acquireSudog()
|
||||
root := semroot(addr)
|
||||
t0 := int64(0)
|
||||
s.releasetime = 0
|
||||
if profile && blockprofilerate > 0 {
|
||||
t0 = cputicks()
|
||||
s.releasetime = -1
|
||||
}
|
||||
for {
|
||||
lock(&root.lock)
|
||||
// Add ourselves to nwait to disable "easy case" in semrelease.
|
||||
atomic.Xadd(&root.nwait, 1)
|
||||
// Check cansemacquire to avoid missed wakeup.
|
||||
if cansemacquire(addr) {
|
||||
atomic.Xadd(&root.nwait, -1)
|
||||
unlock(&root.lock)
|
||||
break
|
||||
}
|
||||
// Any semrelease after the cansemacquire knows we're waiting
|
||||
// (we set nwait above), so go to sleep.
|
||||
root.queue(addr, s)
|
||||
goparkunlock(&root.lock, "semacquire", traceEvGoBlockSync, 4)
|
||||
if cansemacquire(addr) {
|
||||
break
|
||||
}
|
||||
}
|
||||
if s.releasetime > 0 {
|
||||
blockevent(s.releasetime-t0, 3)
|
||||
}
|
||||
releaseSudog(s)
|
||||
}
|
||||
|
||||
func semrelease(addr *uint32) {
|
||||
root := semroot(addr)
|
||||
atomic.Xadd(addr, 1)
|
||||
|
||||
// Easy case: no waiters?
|
||||
// This check must happen after the xadd, to avoid a missed wakeup
|
||||
// (see loop in semacquire).
|
||||
if atomic.Load(&root.nwait) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Harder case: search for a waiter and wake it.
|
||||
lock(&root.lock)
|
||||
if atomic.Load(&root.nwait) == 0 {
|
||||
// The count is already consumed by another goroutine,
|
||||
// so no need to wake up another goroutine.
|
||||
unlock(&root.lock)
|
||||
return
|
||||
}
|
||||
s := root.head
|
||||
for ; s != nil; s = s.next {
|
||||
if s.elem == unsafe.Pointer(addr) {
|
||||
atomic.Xadd(&root.nwait, -1)
|
||||
root.dequeue(s)
|
||||
break
|
||||
}
|
||||
}
|
||||
unlock(&root.lock)
|
||||
if s != nil {
|
||||
readyWithTime(s, 5)
|
||||
}
|
||||
}
|
||||
|
||||
func semroot(addr *uint32) *semaRoot {
|
||||
return &semtable[(uintptr(unsafe.Pointer(addr))>>3)%semTabSize].root
|
||||
}
|
||||
|
||||
func cansemacquire(addr *uint32) bool {
|
||||
for {
|
||||
v := atomic.Load(addr)
|
||||
if v == 0 {
|
||||
return false
|
||||
}
|
||||
if atomic.Cas(addr, v, v-1) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (root *semaRoot) queue(addr *uint32, s *sudog) {
|
||||
s.g = getg()
|
||||
s.elem = unsafe.Pointer(addr)
|
||||
s.next = nil
|
||||
s.prev = root.tail
|
||||
if root.tail != nil {
|
||||
root.tail.next = s
|
||||
} else {
|
||||
root.head = s
|
||||
}
|
||||
root.tail = s
|
||||
}
|
||||
|
||||
func (root *semaRoot) dequeue(s *sudog) {
|
||||
if s.next != nil {
|
||||
s.next.prev = s.prev
|
||||
} else {
|
||||
root.tail = s.prev
|
||||
}
|
||||
if s.prev != nil {
|
||||
s.prev.next = s.next
|
||||
} else {
|
||||
root.head = s.next
|
||||
}
|
||||
s.elem = nil
|
||||
s.next = nil
|
||||
s.prev = nil
|
||||
}
|
||||
|
||||
// notifyList is a ticket-based notification list used to implement sync.Cond.
|
||||
//
|
||||
// It must be kept in sync with the sync package.
|
||||
type notifyList struct {
|
||||
// wait is the ticket number of the next waiter. It is atomically
|
||||
// incremented outside the lock.
|
||||
wait uint32
|
||||
|
||||
// notify is the ticket number of the next waiter to be notified. It can
|
||||
// be read outside the lock, but is only written to with lock held.
|
||||
//
|
||||
// Both wait & notify can wrap around, and such cases will be correctly
|
||||
// handled as long as their "unwrapped" difference is bounded by 2^31.
|
||||
// For this not to be the case, we'd need to have 2^31+ goroutines
|
||||
// blocked on the same condvar, which is currently not possible.
|
||||
notify uint32
|
||||
|
||||
// List of parked waiters.
|
||||
lock mutex
|
||||
head *sudog
|
||||
tail *sudog
|
||||
}
|
||||
|
||||
// less checks if a < b, considering a & b running counts that may overflow the
|
||||
// 32-bit range, and that their "unwrapped" difference is always less than 2^31.
|
||||
func less(a, b uint32) bool {
|
||||
return int32(a-b) < 0
|
||||
}
|
||||
|
||||
// notifyListAdd adds the caller to a notify list such that it can receive
|
||||
// notifications. The caller must eventually call notifyListWait to wait for
|
||||
// such a notification, passing the returned ticket number.
|
||||
//go:linkname notifyListAdd sync.runtime_notifyListAdd
|
||||
func notifyListAdd(l *notifyList) uint32 {
|
||||
// This may be called concurrently, for example, when called from
|
||||
// sync.Cond.Wait while holding a RWMutex in read mode.
|
||||
return atomic.Xadd(&l.wait, 1) - 1
|
||||
}
|
||||
|
||||
// notifyListWait waits for a notification. If one has been sent since
|
||||
// notifyListAdd was called, it returns immediately. Otherwise, it blocks.
|
||||
//go:linkname notifyListWait sync.runtime_notifyListWait
|
||||
func notifyListWait(l *notifyList, t uint32) {
|
||||
lock(&l.lock)
|
||||
|
||||
// Return right away if this ticket has already been notified.
|
||||
if less(t, l.notify) {
|
||||
unlock(&l.lock)
|
||||
return
|
||||
}
|
||||
|
||||
// Enqueue itself.
|
||||
s := acquireSudog()
|
||||
s.g = getg()
|
||||
s.ticket = t
|
||||
s.releasetime = 0
|
||||
t0 := int64(0)
|
||||
if blockprofilerate > 0 {
|
||||
t0 = cputicks()
|
||||
s.releasetime = -1
|
||||
}
|
||||
if l.tail == nil {
|
||||
l.head = s
|
||||
} else {
|
||||
l.tail.next = s
|
||||
}
|
||||
l.tail = s
|
||||
goparkunlock(&l.lock, "semacquire", traceEvGoBlockCond, 3)
|
||||
if t0 != 0 {
|
||||
blockevent(s.releasetime-t0, 2)
|
||||
}
|
||||
releaseSudog(s)
|
||||
}
|
||||
|
||||
// notifyListNotifyAll notifies all entries in the list.
|
||||
//go:linkname notifyListNotifyAll sync.runtime_notifyListNotifyAll
|
||||
func notifyListNotifyAll(l *notifyList) {
|
||||
// Fast-path: if there are no new waiters since the last notification
|
||||
// we don't need to acquire the lock.
|
||||
if atomic.Load(&l.wait) == atomic.Load(&l.notify) {
|
||||
return
|
||||
}
|
||||
|
||||
// Pull the list out into a local variable, waiters will be readied
|
||||
// outside the lock.
|
||||
lock(&l.lock)
|
||||
s := l.head
|
||||
l.head = nil
|
||||
l.tail = nil
|
||||
|
||||
// Update the next ticket to be notified. We can set it to the current
|
||||
// value of wait because any previous waiters are already in the list
|
||||
// or will notice that they have already been notified when trying to
|
||||
// add themselves to the list.
|
||||
atomic.Store(&l.notify, atomic.Load(&l.wait))
|
||||
unlock(&l.lock)
|
||||
|
||||
// Go through the local list and ready all waiters.
|
||||
for s != nil {
|
||||
next := s.next
|
||||
s.next = nil
|
||||
readyWithTime(s, 4)
|
||||
s = next
|
||||
}
|
||||
}
|
||||
|
||||
// notifyListNotifyOne notifies one entry in the list.
|
||||
//go:linkname notifyListNotifyOne sync.runtime_notifyListNotifyOne
|
||||
func notifyListNotifyOne(l *notifyList) {
|
||||
// Fast-path: if there are no new waiters since the last notification
|
||||
// we don't need to acquire the lock at all.
|
||||
if atomic.Load(&l.wait) == atomic.Load(&l.notify) {
|
||||
return
|
||||
}
|
||||
|
||||
lock(&l.lock)
|
||||
|
||||
// Re-check under the lock if we need to do anything.
|
||||
t := l.notify
|
||||
if t == atomic.Load(&l.wait) {
|
||||
unlock(&l.lock)
|
||||
return
|
||||
}
|
||||
|
||||
// Update the next notify ticket number, and try to find the G that
|
||||
// needs to be notified. If it hasn't made it to the list yet we won't
|
||||
// find it, but it won't park itself once it sees the new notify number.
|
||||
atomic.Store(&l.notify, t+1)
|
||||
for p, s := (*sudog)(nil), l.head; s != nil; p, s = s, s.next {
|
||||
if s.ticket == t {
|
||||
n := s.next
|
||||
if p != nil {
|
||||
p.next = n
|
||||
} else {
|
||||
l.head = n
|
||||
}
|
||||
if n == nil {
|
||||
l.tail = p
|
||||
}
|
||||
unlock(&l.lock)
|
||||
s.next = nil
|
||||
readyWithTime(s, 4)
|
||||
return
|
||||
}
|
||||
}
|
||||
unlock(&l.lock)
|
||||
}
|
||||
|
||||
//go:linkname notifyListCheck sync.runtime_notifyListCheck
|
||||
func notifyListCheck(sz uintptr) {
|
||||
if sz != unsafe.Sizeof(notifyList{}) {
|
||||
print("runtime: bad notifyList size - sync=", sz, " runtime=", unsafe.Sizeof(notifyList{}), "\n")
|
||||
throw("bad notifyList size")
|
||||
}
|
||||
}
|
@ -246,12 +246,15 @@ static void
|
||||
kickoff(void)
|
||||
{
|
||||
void (*fn)(void*);
|
||||
void *param;
|
||||
|
||||
if(g->traceback != nil)
|
||||
gtraceback(g);
|
||||
|
||||
fn = (void (*)(void*))(g->entry);
|
||||
fn(g->param);
|
||||
param = g->param;
|
||||
g->param = nil;
|
||||
fn(param);
|
||||
runtime_goexit();
|
||||
}
|
||||
|
||||
|
@ -552,8 +552,10 @@ void runtime_newErrorCString(const char*, Eface*)
|
||||
/*
|
||||
* wrapped for go users
|
||||
*/
|
||||
void runtime_semacquire(uint32 volatile *, bool);
|
||||
void runtime_semrelease(uint32 volatile *);
|
||||
void runtime_semacquire(uint32 volatile *, bool)
|
||||
__asm__ (GOSYM_PREFIX "runtime.semacquire");
|
||||
void runtime_semrelease(uint32 volatile *)
|
||||
__asm__ (GOSYM_PREFIX "runtime.semrelease");
|
||||
int32 runtime_gomaxprocsfunc(int32 n);
|
||||
void runtime_procyield(uint32)
|
||||
__asm__(GOSYM_PREFIX "runtime.procyield");
|
||||
|
@ -1,470 +0,0 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Semaphore implementation exposed to Go.
|
||||
// Intended use is provide a sleep and wakeup
|
||||
// primitive that can be used in the contended case
|
||||
// of other synchronization primitives.
|
||||
// Thus it targets the same goal as Linux's futex,
|
||||
// but it has much simpler semantics.
|
||||
//
|
||||
// That is, don't think of these as semaphores.
|
||||
// Think of them as a way to implement sleep and wakeup
|
||||
// such that every sleep is paired with a single wakeup,
|
||||
// even if, due to races, the wakeup happens before the sleep.
|
||||
//
|
||||
// See Mullender and Cox, ``Semaphores in Plan 9,''
|
||||
// http://swtch.com/semaphore.pdf
|
||||
|
||||
package sync
|
||||
#include "runtime.h"
|
||||
#include "arch.h"
|
||||
|
||||
typedef struct SemaWaiter SemaWaiter;
|
||||
struct SemaWaiter
|
||||
{
|
||||
uint32 volatile* addr;
|
||||
G* g;
|
||||
int64 releasetime;
|
||||
int32 nrelease; // -1 for acquire
|
||||
SemaWaiter* prev;
|
||||
SemaWaiter* next;
|
||||
};
|
||||
|
||||
typedef struct SemaRoot SemaRoot;
|
||||
struct SemaRoot
|
||||
{
|
||||
Lock;
|
||||
SemaWaiter* head;
|
||||
SemaWaiter* tail;
|
||||
// Number of waiters. Read w/o the lock.
|
||||
uint32 volatile nwait;
|
||||
};
|
||||
|
||||
// Prime to not correlate with any user patterns.
|
||||
#define SEMTABLESZ 251
|
||||
|
||||
struct semtable
|
||||
{
|
||||
SemaRoot;
|
||||
uint8 pad[CacheLineSize-sizeof(SemaRoot)];
|
||||
};
|
||||
static struct semtable semtable[SEMTABLESZ];
|
||||
|
||||
static SemaRoot*
|
||||
semroot(uint32 volatile *addr)
|
||||
{
|
||||
return &semtable[((uintptr)addr >> 3) % SEMTABLESZ];
|
||||
}
|
||||
|
||||
static void
|
||||
semqueue(SemaRoot *root, uint32 volatile *addr, SemaWaiter *s)
|
||||
{
|
||||
s->g = runtime_g();
|
||||
s->addr = addr;
|
||||
s->next = nil;
|
||||
s->prev = root->tail;
|
||||
if(root->tail)
|
||||
root->tail->next = s;
|
||||
else
|
||||
root->head = s;
|
||||
root->tail = s;
|
||||
}
|
||||
|
||||
static void
|
||||
semdequeue(SemaRoot *root, SemaWaiter *s)
|
||||
{
|
||||
if(s->next)
|
||||
s->next->prev = s->prev;
|
||||
else
|
||||
root->tail = s->prev;
|
||||
if(s->prev)
|
||||
s->prev->next = s->next;
|
||||
else
|
||||
root->head = s->next;
|
||||
s->prev = nil;
|
||||
s->next = nil;
|
||||
}
|
||||
|
||||
static int32
|
||||
cansemacquire(uint32 volatile *addr)
|
||||
{
|
||||
uint32 v;
|
||||
|
||||
while((v = runtime_atomicload(addr)) > 0)
|
||||
if(runtime_cas(addr, v, v-1))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void readyWithTime(SudoG* s, int traceskip __attribute__ ((unused))) {
|
||||
if (s->releasetime != 0) {
|
||||
s->releasetime = runtime_cputicks();
|
||||
}
|
||||
runtime_ready(s->g);
|
||||
}
|
||||
|
||||
void
|
||||
runtime_semacquire(uint32 volatile *addr, bool profile)
|
||||
{
|
||||
SemaWaiter s; // Needs to be allocated on stack, otherwise garbage collector could deallocate it
|
||||
SemaRoot *root;
|
||||
int64 t0;
|
||||
|
||||
// Easy case.
|
||||
if(cansemacquire(addr))
|
||||
return;
|
||||
|
||||
// Harder case:
|
||||
// increment waiter count
|
||||
// try cansemacquire one more time, return if succeeded
|
||||
// enqueue itself as a waiter
|
||||
// sleep
|
||||
// (waiter descriptor is dequeued by signaler)
|
||||
root = semroot(addr);
|
||||
t0 = 0;
|
||||
s.releasetime = 0;
|
||||
if(profile && runtime_blockprofilerate > 0) {
|
||||
t0 = runtime_cputicks();
|
||||
s.releasetime = -1;
|
||||
}
|
||||
for(;;) {
|
||||
|
||||
runtime_lock(root);
|
||||
// Add ourselves to nwait to disable "easy case" in semrelease.
|
||||
runtime_xadd(&root->nwait, 1);
|
||||
// Check cansemacquire to avoid missed wakeup.
|
||||
if(cansemacquire(addr)) {
|
||||
runtime_xadd(&root->nwait, -1);
|
||||
runtime_unlock(root);
|
||||
return;
|
||||
}
|
||||
// Any semrelease after the cansemacquire knows we're waiting
|
||||
// (we set nwait above), so go to sleep.
|
||||
semqueue(root, addr, &s);
|
||||
runtime_parkunlock(root, "semacquire");
|
||||
if(cansemacquire(addr)) {
|
||||
if(t0)
|
||||
runtime_blockevent(s.releasetime - t0, 3);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
runtime_semrelease(uint32 volatile *addr)
|
||||
{
|
||||
SemaWaiter *s;
|
||||
SemaRoot *root;
|
||||
|
||||
root = semroot(addr);
|
||||
runtime_xadd(addr, 1);
|
||||
|
||||
// Easy case: no waiters?
|
||||
// This check must happen after the xadd, to avoid a missed wakeup
|
||||
// (see loop in semacquire).
|
||||
if(runtime_atomicload(&root->nwait) == 0)
|
||||
return;
|
||||
|
||||
// Harder case: search for a waiter and wake it.
|
||||
runtime_lock(root);
|
||||
if(runtime_atomicload(&root->nwait) == 0) {
|
||||
// The count is already consumed by another goroutine,
|
||||
// so no need to wake up another goroutine.
|
||||
runtime_unlock(root);
|
||||
return;
|
||||
}
|
||||
for(s = root->head; s; s = s->next) {
|
||||
if(s->addr == addr) {
|
||||
runtime_xadd(&root->nwait, -1);
|
||||
semdequeue(root, s);
|
||||
break;
|
||||
}
|
||||
}
|
||||
runtime_unlock(root);
|
||||
if(s) {
|
||||
if(s->releasetime)
|
||||
s->releasetime = runtime_cputicks();
|
||||
runtime_ready(s->g);
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(dvyukov): move to netpoll.goc once it's used by all OSes.
|
||||
void net_runtime_Semacquire(uint32 *addr)
|
||||
__asm__ (GOSYM_PREFIX "net.runtime_Semacquire");
|
||||
|
||||
void net_runtime_Semacquire(uint32 *addr)
|
||||
{
|
||||
runtime_semacquire(addr, true);
|
||||
}
|
||||
|
||||
void net_runtime_Semrelease(uint32 *addr)
|
||||
__asm__ (GOSYM_PREFIX "net.runtime_Semrelease");
|
||||
|
||||
void net_runtime_Semrelease(uint32 *addr)
|
||||
{
|
||||
runtime_semrelease(addr);
|
||||
}
|
||||
|
||||
func runtime_Semacquire(addr *uint32) {
|
||||
runtime_semacquire(addr, true);
|
||||
}
|
||||
|
||||
func runtime_Semrelease(addr *uint32) {
|
||||
runtime_semrelease(addr);
|
||||
}
|
||||
|
||||
typedef struct SyncSema SyncSema;
|
||||
struct SyncSema
|
||||
{
|
||||
Lock;
|
||||
SemaWaiter* head;
|
||||
SemaWaiter* tail;
|
||||
};
|
||||
|
||||
func runtime_Syncsemcheck(size uintptr) {
|
||||
if(size != sizeof(SyncSema)) {
|
||||
runtime_printf("bad SyncSema size: sync:%D runtime:%D\n", (int64)size, (int64)sizeof(SyncSema));
|
||||
runtime_throw("bad SyncSema size");
|
||||
}
|
||||
}
|
||||
|
||||
// Syncsemacquire waits for a pairing Syncsemrelease on the same semaphore s.
|
||||
func runtime_Syncsemacquire(s *SyncSema) {
|
||||
SemaWaiter w, *wake;
|
||||
int64 t0;
|
||||
|
||||
w.g = runtime_g();
|
||||
w.nrelease = -1;
|
||||
w.next = nil;
|
||||
w.releasetime = 0;
|
||||
t0 = 0;
|
||||
if(runtime_blockprofilerate > 0) {
|
||||
t0 = runtime_cputicks();
|
||||
w.releasetime = -1;
|
||||
}
|
||||
|
||||
runtime_lock(s);
|
||||
if(s->head && s->head->nrelease > 0) {
|
||||
// have pending release, consume it
|
||||
wake = nil;
|
||||
s->head->nrelease--;
|
||||
if(s->head->nrelease == 0) {
|
||||
wake = s->head;
|
||||
s->head = wake->next;
|
||||
if(s->head == nil)
|
||||
s->tail = nil;
|
||||
}
|
||||
runtime_unlock(s);
|
||||
if(wake)
|
||||
runtime_ready(wake->g);
|
||||
} else {
|
||||
// enqueue itself
|
||||
if(s->tail == nil)
|
||||
s->head = &w;
|
||||
else
|
||||
s->tail->next = &w;
|
||||
s->tail = &w;
|
||||
runtime_parkunlock(s, "semacquire");
|
||||
if(t0)
|
||||
runtime_blockevent(w.releasetime - t0, 2);
|
||||
}
|
||||
}
|
||||
|
||||
// Syncsemrelease waits for n pairing Syncsemacquire on the same semaphore s.
|
||||
func runtime_Syncsemrelease(s *SyncSema, n uint32) {
|
||||
SemaWaiter w, *wake;
|
||||
|
||||
w.g = runtime_g();
|
||||
w.nrelease = (int32)n;
|
||||
w.next = nil;
|
||||
w.releasetime = 0;
|
||||
|
||||
runtime_lock(s);
|
||||
while(w.nrelease > 0 && s->head && s->head->nrelease < 0) {
|
||||
// have pending acquire, satisfy it
|
||||
wake = s->head;
|
||||
s->head = wake->next;
|
||||
if(s->head == nil)
|
||||
s->tail = nil;
|
||||
if(wake->releasetime)
|
||||
wake->releasetime = runtime_cputicks();
|
||||
runtime_ready(wake->g);
|
||||
w.nrelease--;
|
||||
}
|
||||
if(w.nrelease > 0) {
|
||||
// enqueue itself
|
||||
if(s->tail == nil)
|
||||
s->head = &w;
|
||||
else
|
||||
s->tail->next = &w;
|
||||
s->tail = &w;
|
||||
runtime_parkunlock(s, "semarelease");
|
||||
} else
|
||||
runtime_unlock(s);
|
||||
}
|
||||
|
||||
// notifyList is a ticket-based notification list used to implement sync.Cond.
|
||||
//
|
||||
// It must be kept in sync with the sync package.
|
||||
typedef struct {
|
||||
// wait is the ticket number of the next waiter. It is atomically
|
||||
// incremented outside the lock.
|
||||
uint32 wait;
|
||||
|
||||
// notify is the ticket number of the next waiter to be notified. It can
|
||||
// be read outside the lock, but is only written to with lock held.
|
||||
//
|
||||
// Both wait & notify can wrap around, and such cases will be correctly
|
||||
// handled as long as their "unwrapped" difference is bounded by 2^31.
|
||||
// For this not to be the case, we'd need to have 2^31+ goroutines
|
||||
// blocked on the same condvar, which is currently not possible.
|
||||
uint32 notify;
|
||||
|
||||
// List of parked waiters.
|
||||
Lock lock;
|
||||
SudoG* head;
|
||||
SudoG* tail;
|
||||
} notifyList;
|
||||
|
||||
// less checks if a < b, considering a & b running counts that may overflow the
|
||||
// 32-bit range, and that their "unwrapped" difference is always less than 2^31.
|
||||
static bool less(uint32 a, uint32 b) {
|
||||
return (int32)(a-b) < 0;
|
||||
}
|
||||
|
||||
// notifyListAdd adds the caller to a notify list such that it can receive
|
||||
// notifications. The caller must eventually call notifyListWait to wait for
|
||||
// such a notification, passing the returned ticket number.
|
||||
//go:linkname notifyListAdd sync.runtime_notifyListAdd
|
||||
func runtime_notifyListAdd(l *notifyList) (r uint32) {
|
||||
// This may be called concurrently, for example, when called from
|
||||
// sync.Cond.Wait while holding a RWMutex in read mode.
|
||||
r = runtime_xadd(&l->wait, 1) - 1;
|
||||
}
|
||||
|
||||
// notifyListWait waits for a notification. If one has been sent since
|
||||
// notifyListAdd was called, it returns immediately. Otherwise, it blocks.
|
||||
//go:linkname notifyListWait sync.runtime_notifyListWait
|
||||
func runtime_notifyListWait(l *notifyList, t uint32) {
|
||||
SudoG s;
|
||||
int64 t0;
|
||||
|
||||
runtime_lock(&l->lock);
|
||||
|
||||
// Return right away if this ticket has already been notified.
|
||||
if (less(t, l->notify)) {
|
||||
runtime_unlock(&l->lock);
|
||||
return;
|
||||
}
|
||||
|
||||
// Enqueue itself.
|
||||
runtime_memclr(&s, sizeof(s));
|
||||
s.g = runtime_g();
|
||||
s.ticket = t;
|
||||
s.releasetime = 0;
|
||||
t0 = 0;
|
||||
if (runtime_blockprofilerate > 0) {
|
||||
t0 = runtime_cputicks();
|
||||
s.releasetime = -1;
|
||||
}
|
||||
if (l->tail == nil) {
|
||||
l->head = &s;
|
||||
} else {
|
||||
l->tail->next = &s;
|
||||
}
|
||||
l->tail = &s;
|
||||
runtime_parkunlock(&l->lock, "semacquire");
|
||||
if (t0 != 0) {
|
||||
runtime_blockevent(s.releasetime-t0, 2);
|
||||
}
|
||||
}
|
||||
|
||||
// notifyListNotifyAll notifies all entries in the list.
|
||||
//go:linkname notifyListNotifyAll sync.runtime_notifyListNotifyAll
|
||||
func runtime_notifyListNotifyAll(l *notifyList) {
|
||||
SudoG *s;
|
||||
|
||||
// Fast-path: if there are no new waiters since the last notification
|
||||
// we don't need to acquire the lock.
|
||||
if (runtime_atomicload(&l->wait) == runtime_atomicload(&l->notify)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Pull the list out into a local variable, waiters will be readied
|
||||
// outside the lock.
|
||||
runtime_lock(&l->lock);
|
||||
s = l->head;
|
||||
l->head = nil;
|
||||
l->tail = nil;
|
||||
|
||||
// Update the next ticket to be notified. We can set it to the current
|
||||
// value of wait because any previous waiters are already in the list
|
||||
// or will notice that they have already been notified when trying to
|
||||
// add themselves to the list.
|
||||
runtime_atomicstore(&l->notify, runtime_atomicload(&l->wait));
|
||||
runtime_unlock(&l->lock);
|
||||
|
||||
// Go through the local list and ready all waiters.
|
||||
while (s != nil) {
|
||||
SudoG* next = s->next;
|
||||
s->next = nil;
|
||||
readyWithTime(s, 4);
|
||||
s = next;
|
||||
}
|
||||
}
|
||||
|
||||
// notifyListNotifyOne notifies one entry in the list.
|
||||
//go:linkname notifyListNotifyOne sync.runtime_notifyListNotifyOne
|
||||
func runtime_notifyListNotifyOne(l *notifyList) {
|
||||
uint32 t;
|
||||
SudoG *p;
|
||||
SudoG *s;
|
||||
|
||||
// Fast-path: if there are no new waiters since the last notification
|
||||
// we don't need to acquire the lock at all.
|
||||
if (runtime_atomicload(&l->wait) == runtime_atomicload(&l->notify)) {
|
||||
return;
|
||||
}
|
||||
|
||||
runtime_lock(&l->lock);
|
||||
|
||||
// Re-check under the lock if we need to do anything.
|
||||
t = l->notify;
|
||||
if (t == runtime_atomicload(&l->wait)) {
|
||||
runtime_unlock(&l->lock);
|
||||
return;
|
||||
}
|
||||
|
||||
// Update the next notify ticket number, and try to find the G that
|
||||
// needs to be notified. If it hasn't made it to the list yet we won't
|
||||
// find it, but it won't park itself once it sees the new notify number.
|
||||
runtime_atomicstore(&l->notify, t+1);
|
||||
for (p = nil, s = l->head; s != nil; p = s, s = s->next) {
|
||||
if (s->ticket == t) {
|
||||
SudoG *n = s->next;
|
||||
if (p != nil) {
|
||||
p->next = n;
|
||||
} else {
|
||||
l->head = n;
|
||||
}
|
||||
if (n == nil) {
|
||||
l->tail = p;
|
||||
}
|
||||
runtime_unlock(&l->lock);
|
||||
s->next = nil;
|
||||
readyWithTime(s, 4);
|
||||
return;
|
||||
}
|
||||
}
|
||||
runtime_unlock(&l->lock);
|
||||
}
|
||||
|
||||
//go:linkname notifyListCheck sync.runtime_notifyListCheck
|
||||
func runtime_notifyListCheck(sz uintptr) {
|
||||
if (sz != sizeof(notifyList)) {
|
||||
runtime_printf("runtime: bad notifyList size\n");
|
||||
runtime_throw("bad notifyList size");
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue
Block a user