00d86ac99f
From-SVN: r212837
357 lines
6.9 KiB
Plaintext
357 lines
6.9 KiB
Plaintext
// Copyright 2009 The Go Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style
|
|
// license that can be found in the LICENSE file.
|
|
|
|
// Time-related runtime and pieces of package time.
|
|
|
|
package time
|
|
|
|
#include <sys/time.h>
|
|
|
|
#include "runtime.h"
|
|
#include "defs.h"
|
|
#include "arch.h"
|
|
#include "malloc.h"
|
|
#include "race.h"
|
|
|
|
enum {
|
|
debug = 0,
|
|
};
|
|
|
|
static Timers timers;
|
|
static void addtimer(Timer*);
|
|
static void dumptimers(const char*);
|
|
|
|
// nacl fake time support.
|
|
int64 runtime_timens;
|
|
|
|
// Package time APIs.
|
|
// Godoc uses the comments in package time, not these.
|
|
|
|
// time.now is implemented in assembly.
|
|
|
|
// runtimeNano returns the current value of the runtime clock in nanoseconds.
|
|
func runtimeNano() (ns int64) {
|
|
ns = runtime_nanotime();
|
|
}
|
|
|
|
// Sleep puts the current goroutine to sleep for at least ns nanoseconds.
|
|
func Sleep(ns int64) {
|
|
runtime_tsleep(ns, "sleep");
|
|
}
|
|
|
|
// startTimer adds t to the timer heap.
|
|
func startTimer(t *Timer) {
|
|
if(raceenabled)
|
|
runtime_racerelease(t);
|
|
runtime_addtimer(t);
|
|
}
|
|
|
|
// stopTimer removes t from the timer heap if it is there.
|
|
// It returns true if t was removed, false if t wasn't even there.
|
|
func stopTimer(t *Timer) (stopped bool) {
|
|
stopped = runtime_deltimer(t);
|
|
}
|
|
|
|
// C runtime.
|
|
|
|
int64 runtime_unixnanotime(void)
|
|
{
|
|
struct time_now_ret r;
|
|
|
|
r = now();
|
|
return r.sec*1000000000 + r.nsec;
|
|
}
|
|
|
|
static void timerproc(void*);
|
|
static void siftup(int32);
|
|
static void siftdown(int32);
|
|
|
|
// Ready the goroutine e.data.
|
|
static void
|
|
ready(int64 now, Eface e)
|
|
{
|
|
USED(now);
|
|
|
|
runtime_ready(e.__object);
|
|
}
|
|
|
|
static FuncVal readyv = {(void(*)(void))ready};
|
|
|
|
// Put the current goroutine to sleep for ns nanoseconds.
|
|
void
|
|
runtime_tsleep(int64 ns, const char *reason)
|
|
{
|
|
G* g;
|
|
Timer t;
|
|
|
|
g = runtime_g();
|
|
|
|
if(ns <= 0)
|
|
return;
|
|
|
|
t.when = runtime_nanotime() + ns;
|
|
t.period = 0;
|
|
t.fv = &readyv;
|
|
t.arg.__object = g;
|
|
runtime_lock(&timers);
|
|
addtimer(&t);
|
|
runtime_parkunlock(&timers, reason);
|
|
}
|
|
|
|
void
|
|
runtime_addtimer(Timer *t)
|
|
{
|
|
runtime_lock(&timers);
|
|
addtimer(t);
|
|
runtime_unlock(&timers);
|
|
}
|
|
|
|
// Add a timer to the heap and start or kick the timer proc
|
|
// if the new timer is earlier than any of the others.
|
|
static void
|
|
addtimer(Timer *t)
|
|
{
|
|
int32 n;
|
|
Timer **nt;
|
|
|
|
// when must never be negative; otherwise timerproc will overflow
|
|
// during its delta calculation and never expire other timers.
|
|
if(t->when < 0)
|
|
t->when = (int64)((1ULL<<63)-1);
|
|
|
|
if(timers.len >= timers.cap) {
|
|
// Grow slice.
|
|
n = 16;
|
|
if(n <= timers.cap)
|
|
n = timers.cap*3 / 2;
|
|
nt = runtime_malloc(n*sizeof nt[0]);
|
|
runtime_memmove(nt, timers.t, timers.len*sizeof nt[0]);
|
|
runtime_free(timers.t);
|
|
timers.t = nt;
|
|
timers.cap = n;
|
|
}
|
|
t->i = timers.len++;
|
|
timers.t[t->i] = t;
|
|
siftup(t->i);
|
|
if(t->i == 0) {
|
|
// siftup moved to top: new earliest deadline.
|
|
if(timers.sleeping) {
|
|
timers.sleeping = false;
|
|
runtime_notewakeup(&timers.waitnote);
|
|
}
|
|
if(timers.rescheduling) {
|
|
timers.rescheduling = false;
|
|
runtime_ready(timers.timerproc);
|
|
}
|
|
}
|
|
if(timers.timerproc == nil) {
|
|
timers.timerproc = __go_go(timerproc, nil);
|
|
timers.timerproc->issystem = true;
|
|
}
|
|
if(debug)
|
|
dumptimers("addtimer");
|
|
}
|
|
|
|
// Used to force a dereference before the lock is acquired.
|
|
static int32 gi;
|
|
|
|
// Delete timer t from the heap.
|
|
// Do not need to update the timerproc:
|
|
// if it wakes up early, no big deal.
|
|
bool
|
|
runtime_deltimer(Timer *t)
|
|
{
|
|
int32 i;
|
|
|
|
// Dereference t so that any panic happens before the lock is held.
|
|
// Discard result, because t might be moving in the heap.
|
|
i = t->i;
|
|
gi = i;
|
|
|
|
runtime_lock(&timers);
|
|
|
|
// t may not be registered anymore and may have
|
|
// a bogus i (typically 0, if generated by Go).
|
|
// Verify it before proceeding.
|
|
i = t->i;
|
|
if(i < 0 || i >= timers.len || timers.t[i] != t) {
|
|
runtime_unlock(&timers);
|
|
return false;
|
|
}
|
|
|
|
timers.len--;
|
|
if(i == timers.len) {
|
|
timers.t[i] = nil;
|
|
} else {
|
|
timers.t[i] = timers.t[timers.len];
|
|
timers.t[timers.len] = nil;
|
|
timers.t[i]->i = i;
|
|
siftup(i);
|
|
siftdown(i);
|
|
}
|
|
if(debug)
|
|
dumptimers("deltimer");
|
|
runtime_unlock(&timers);
|
|
return true;
|
|
}
|
|
|
|
// Timerproc runs the time-driven events.
|
|
// It sleeps until the next event in the timers heap.
|
|
// If addtimer inserts a new earlier event, addtimer
|
|
// wakes timerproc early.
|
|
static void
|
|
timerproc(void* dummy __attribute__ ((unused)))
|
|
{
|
|
int64 delta, now;
|
|
Timer *t;
|
|
FuncVal *fv;
|
|
void (*f)(int64, Eface);
|
|
Eface arg;
|
|
|
|
for(;;) {
|
|
runtime_lock(&timers);
|
|
timers.sleeping = false;
|
|
now = runtime_nanotime();
|
|
for(;;) {
|
|
if(timers.len == 0) {
|
|
delta = -1;
|
|
break;
|
|
}
|
|
t = timers.t[0];
|
|
delta = t->when - now;
|
|
if(delta > 0)
|
|
break;
|
|
if(t->period > 0) {
|
|
// leave in heap but adjust next time to fire
|
|
t->when += t->period * (1 + -delta/t->period);
|
|
siftdown(0);
|
|
} else {
|
|
// remove from heap
|
|
timers.t[0] = timers.t[--timers.len];
|
|
timers.t[0]->i = 0;
|
|
siftdown(0);
|
|
t->i = -1; // mark as removed
|
|
}
|
|
fv = t->fv;
|
|
f = (void*)t->fv->fn;
|
|
arg = t->arg;
|
|
runtime_unlock(&timers);
|
|
if(raceenabled)
|
|
runtime_raceacquire(t);
|
|
__go_set_closure(fv);
|
|
f(now, arg);
|
|
|
|
// clear f and arg to avoid leak while sleeping for next timer
|
|
f = nil;
|
|
USED(f);
|
|
arg.__type_descriptor = nil;
|
|
arg.__object = nil;
|
|
USED(&arg);
|
|
|
|
runtime_lock(&timers);
|
|
}
|
|
if(delta < 0) {
|
|
// No timers left - put goroutine to sleep.
|
|
timers.rescheduling = true;
|
|
runtime_g()->isbackground = true;
|
|
runtime_parkunlock(&timers, "timer goroutine (idle)");
|
|
runtime_g()->isbackground = false;
|
|
continue;
|
|
}
|
|
// At least one timer pending. Sleep until then.
|
|
timers.sleeping = true;
|
|
runtime_noteclear(&timers.waitnote);
|
|
runtime_unlock(&timers);
|
|
runtime_notetsleepg(&timers.waitnote, delta);
|
|
}
|
|
}
|
|
|
|
// heap maintenance algorithms.
|
|
|
|
static void
|
|
siftup(int32 i)
|
|
{
|
|
int32 p;
|
|
int64 when;
|
|
Timer **t, *tmp;
|
|
|
|
t = timers.t;
|
|
when = t[i]->when;
|
|
tmp = t[i];
|
|
while(i > 0) {
|
|
p = (i-1)/4; // parent
|
|
if(when >= t[p]->when)
|
|
break;
|
|
t[i] = t[p];
|
|
t[i]->i = i;
|
|
t[p] = tmp;
|
|
tmp->i = p;
|
|
i = p;
|
|
}
|
|
}
|
|
|
|
static void
|
|
siftdown(int32 i)
|
|
{
|
|
int32 c, c3, len;
|
|
int64 when, w, w3;
|
|
Timer **t, *tmp;
|
|
|
|
t = timers.t;
|
|
len = timers.len;
|
|
when = t[i]->when;
|
|
tmp = t[i];
|
|
for(;;) {
|
|
c = i*4 + 1; // left child
|
|
c3 = c + 2; // mid child
|
|
if(c >= len) {
|
|
break;
|
|
}
|
|
w = t[c]->when;
|
|
if(c+1 < len && t[c+1]->when < w) {
|
|
w = t[c+1]->when;
|
|
c++;
|
|
}
|
|
if(c3 < len) {
|
|
w3 = t[c3]->when;
|
|
if(c3+1 < len && t[c3+1]->when < w3) {
|
|
w3 = t[c3+1]->when;
|
|
c3++;
|
|
}
|
|
if(w3 < w) {
|
|
w = w3;
|
|
c = c3;
|
|
}
|
|
}
|
|
if(w >= when)
|
|
break;
|
|
t[i] = t[c];
|
|
t[i]->i = i;
|
|
t[c] = tmp;
|
|
tmp->i = c;
|
|
i = c;
|
|
}
|
|
}
|
|
|
|
static void
|
|
dumptimers(const char *msg)
|
|
{
|
|
Timer *t;
|
|
int32 i;
|
|
|
|
runtime_printf("timers: %s\n", msg);
|
|
for(i = 0; i < timers.len; i++) {
|
|
t = timers.t[i];
|
|
runtime_printf("\t%d\t%p:\ti %d when %D period %D fn %p\n",
|
|
i, t, t->i, t->when, t->period, t->fv->fn);
|
|
}
|
|
runtime_printf("\n");
|
|
}
|
|
|
|
void
|
|
runtime_time_scan(struct Workbuf** wbufp, void (*enqueue1)(struct Workbuf**, Obj))
|
|
{
|
|
enqueue1(wbufp, (Obj){(byte*)&timers, sizeof timers, 0});
|
|
}
|