Renamed what's left of rust_dom to rust_scheduler

This commit is contained in:
Eric Holk 2011-06-28 12:15:41 -07:00 committed by Graydon Hoare
parent 02f6645fca
commit 657e5a2bd5
23 changed files with 303 additions and 321 deletions

View File

@ -10,7 +10,7 @@ RUNTIME_CS := rt/sync/timer.cpp \
rt/rust_run_program.cpp \
rt/rust_crate_cache.cpp \
rt/rust_comm.cpp \
rt/rust_dom.cpp \
rt/rust_scheduler.cpp \
rt/rust_task.cpp \
rt/rust_task_list.cpp \
rt/rust_proxy.cpp \
@ -37,7 +37,7 @@ RUNTIME_HDR := rt/globals.h \
rt/rust_util.h \
rt/rust_chan.h \
rt/rust_port.h \
rt/rust_dom.h \
rt/rust_scheduler.h \
rt/rust_task.h \
rt/rust_task_list.h \
rt/rust_proxy.h \

View File

@ -5,7 +5,7 @@
#include "rust_internal.h"
circular_buffer::circular_buffer(rust_task *task, size_t unit_sz) :
dom(task->dom),
sched(task->sched),
task(task),
unit_sz(unit_sz),
_buffer_sz(initial_size()),
@ -13,26 +13,26 @@ circular_buffer::circular_buffer(rust_task *task, size_t unit_sz) :
_unread(0),
_buffer((uint8_t *)task->malloc(_buffer_sz)) {
A(dom, unit_sz, "Unit size must be larger than zero.");
A(sched, unit_sz, "Unit size must be larger than zero.");
DLOG(dom, mem, "new circular_buffer(buffer_sz=%d, unread=%d)"
DLOG(sched, mem, "new circular_buffer(buffer_sz=%d, unread=%d)"
"-> circular_buffer=0x%" PRIxPTR,
_buffer_sz, _unread, this);
A(dom, _buffer, "Failed to allocate buffer.");
A(sched, _buffer, "Failed to allocate buffer.");
}
circular_buffer::~circular_buffer() {
DLOG(dom, mem, "~circular_buffer 0x%" PRIxPTR, this);
I(dom, _buffer);
W(dom, _unread == 0,
DLOG(sched, mem, "~circular_buffer 0x%" PRIxPTR, this);
I(sched, _buffer);
W(sched, _unread == 0,
"freeing circular_buffer with %d unread bytes", _unread);
task->free(_buffer);
}
size_t
circular_buffer::initial_size() {
I(dom, unit_sz > 0);
I(sched, unit_sz > 0);
return INITIAL_CIRCULAR_BUFFER_SIZE_IN_UNITS * unit_sz;
}
@ -41,8 +41,8 @@ circular_buffer::initial_size() {
*/
void
circular_buffer::transfer(void *dst) {
I(dom, dst);
I(dom, _unread <= _buffer_sz);
I(sched, dst);
I(sched, _unread <= _buffer_sz);
uint8_t *ptr = (uint8_t *) dst;
@ -54,13 +54,13 @@ circular_buffer::transfer(void *dst) {
} else {
head_sz = _buffer_sz - _next;
}
I(dom, _next + head_sz <= _buffer_sz);
I(sched, _next + head_sz <= _buffer_sz);
memcpy(ptr, _buffer + _next, head_sz);
// Then copy any other items from the beginning of the buffer
I(dom, _unread >= head_sz);
I(sched, _unread >= head_sz);
size_t tail_sz = _unread - head_sz;
I(dom, head_sz + tail_sz <= _buffer_sz);
I(sched, head_sz + tail_sz <= _buffer_sz);
memcpy(ptr + head_sz, _buffer, tail_sz);
}
@ -70,37 +70,37 @@ circular_buffer::transfer(void *dst) {
*/
void
circular_buffer::enqueue(void *src) {
I(dom, src);
I(dom, _unread <= _buffer_sz);
I(dom, _buffer);
I(sched, src);
I(sched, _unread <= _buffer_sz);
I(sched, _buffer);
// Grow if necessary.
if (_unread == _buffer_sz) {
grow();
}
DLOG(dom, mem, "circular_buffer enqueue "
DLOG(sched, mem, "circular_buffer enqueue "
"unread: %d, next: %d, buffer_sz: %d, unit_sz: %d",
_unread, _next, _buffer_sz, unit_sz);
I(dom, _unread < _buffer_sz);
I(dom, _unread + unit_sz <= _buffer_sz);
I(sched, _unread < _buffer_sz);
I(sched, _unread + unit_sz <= _buffer_sz);
// Copy data
size_t dst_idx = _next + _unread;
I(dom, dst_idx >= _buffer_sz || dst_idx + unit_sz <= _buffer_sz);
I(sched, dst_idx >= _buffer_sz || dst_idx + unit_sz <= _buffer_sz);
if (dst_idx >= _buffer_sz) {
dst_idx -= _buffer_sz;
I(dom, _next >= unit_sz);
I(dom, dst_idx <= _next - unit_sz);
I(sched, _next >= unit_sz);
I(sched, dst_idx <= _next - unit_sz);
}
I(dom, dst_idx + unit_sz <= _buffer_sz);
I(sched, dst_idx + unit_sz <= _buffer_sz);
memcpy(&_buffer[dst_idx], src, unit_sz);
_unread += unit_sz;
DLOG(dom, mem, "circular_buffer pushed data at index: %d", dst_idx);
DLOG(sched, mem, "circular_buffer pushed data at index: %d", dst_idx);
}
/**
@ -110,21 +110,21 @@ circular_buffer::enqueue(void *src) {
*/
void
circular_buffer::dequeue(void *dst) {
I(dom, unit_sz > 0);
I(dom, _unread >= unit_sz);
I(dom, _unread <= _buffer_sz);
I(dom, _buffer);
I(sched, unit_sz > 0);
I(sched, _unread >= unit_sz);
I(sched, _unread <= _buffer_sz);
I(sched, _buffer);
DLOG(dom, mem,
DLOG(sched, mem,
"circular_buffer dequeue "
"unread: %d, next: %d, buffer_sz: %d, unit_sz: %d",
_unread, _next, _buffer_sz, unit_sz);
I(dom, _next + unit_sz <= _buffer_sz);
I(sched, _next + unit_sz <= _buffer_sz);
if (dst != NULL) {
memcpy(dst, &_buffer[_next], unit_sz);
}
DLOG(dom, mem, "shifted data from index %d", _next);
DLOG(sched, mem, "shifted data from index %d", _next);
_unread -= unit_sz;
_next += unit_sz;
if (_next == _buffer_sz) {
@ -140,8 +140,8 @@ circular_buffer::dequeue(void *dst) {
void
circular_buffer::grow() {
size_t new_buffer_sz = _buffer_sz * 2;
I(dom, new_buffer_sz <= MAX_CIRCULAR_BUFFER_SIZE);
DLOG(dom, mem, "circular_buffer is growing to %d bytes", new_buffer_sz);
I(sched, new_buffer_sz <= MAX_CIRCULAR_BUFFER_SIZE);
DLOG(sched, mem, "circular_buffer is growing to %d bytes", new_buffer_sz);
void *new_buffer = task->malloc(new_buffer_sz);
transfer(new_buffer);
task->free(_buffer);
@ -153,8 +153,8 @@ circular_buffer::grow() {
void
circular_buffer::shrink() {
size_t new_buffer_sz = _buffer_sz / 2;
I(dom, initial_size() <= new_buffer_sz);
DLOG(dom, mem, "circular_buffer is shrinking to %d bytes", new_buffer_sz);
I(sched, initial_size() <= new_buffer_sz);
DLOG(sched, mem, "circular_buffer is shrinking to %d bytes", new_buffer_sz);
void *new_buffer = task->malloc(new_buffer_sz);
transfer(new_buffer);
task->free(_buffer);

View File

@ -10,7 +10,7 @@ circular_buffer : public task_owned<circular_buffer> {
static const size_t INITIAL_CIRCULAR_BUFFER_SIZE_IN_UNITS = 8;
static const size_t MAX_CIRCULAR_BUFFER_SIZE = 1 << 24;
rust_dom *dom;
rust_scheduler *sched;
public:
rust_task *task;

View File

@ -39,13 +39,13 @@ command_line_args : public kernel_owned<command_line_args>
size_t vec_fill = sizeof(rust_str *) * argc;
size_t vec_alloc = next_power_of_two(sizeof(rust_vec) + vec_fill);
void *mem = kernel->malloc(vec_alloc);
args = new (mem) rust_vec(task->dom, vec_alloc, 0, NULL);
args = new (mem) rust_vec(task->sched, vec_alloc, 0, NULL);
rust_str **strs = (rust_str**) &args->data[0];
for (int i = 0; i < argc; ++i) {
size_t str_fill = strlen(argv[i]) + 1;
size_t str_alloc = next_power_of_two(sizeof(rust_str) + str_fill);
mem = kernel->malloc(str_alloc);
strs[i] = new (mem) rust_str(task->dom, str_alloc, str_fill,
strs[i] = new (mem) rust_str(task->sched, str_alloc, str_fill,
(uint8_t const *)argv[i]);
}
args->fill = vec_fill;
@ -98,21 +98,21 @@ rust_start(uintptr_t main_fn, int argc, char **argv, void* crate_map) {
rust_srv *srv = new rust_srv();
rust_kernel *kernel = new rust_kernel(srv);
kernel->start();
rust_dom *dom = kernel->get_domain();
rust_scheduler *sched = kernel->get_scheduler();
command_line_args *args
= new (kernel) command_line_args(dom->root_task, argc, argv);
= new (kernel) command_line_args(sched->root_task, argc, argv);
DLOG(dom, dom, "startup: %d args in 0x%" PRIxPTR,
DLOG(sched, dom, "startup: %d args in 0x%" PRIxPTR,
args->argc, (uintptr_t)args->args);
for (int i = 0; i < args->argc; i++) {
DLOG(dom, dom, "startup: arg[%d] = '%s'", i, args->argv[i]);
DLOG(sched, dom, "startup: arg[%d] = '%s'", i, args->argv[i]);
}
dom->root_task->start(main_fn, (uintptr_t)args->args);
sched->root_task->start(main_fn, (uintptr_t)args->args);
int num_threads = get_num_threads();
DLOG(dom, dom, "Using %d worker threads.", num_threads);
DLOG(sched, dom, "Using %d worker threads.", num_threads);
int ret = kernel->start_task_threads(num_threads);
delete args;

View File

@ -9,7 +9,7 @@
extern "C" CDECL rust_str*
last_os_error(rust_task *task) {
rust_dom *dom = task->dom;
rust_scheduler *sched = task->sched;
LOG(task, task, "last_os_error()");
#if defined(__WIN32__)
@ -47,7 +47,7 @@ last_os_error(rust_task *task) {
task->fail(1);
return NULL;
}
rust_str *st = new (mem) rust_str(dom, alloc, fill, (const uint8_t *)buf);
rust_str *st = new (mem) rust_str(sched, alloc, fill, (const uint8_t *)buf);
#ifdef __WIN32__
LocalFree((HLOCAL)buf);
@ -57,7 +57,7 @@ last_os_error(rust_task *task) {
extern "C" CDECL rust_str *
rust_getcwd(rust_task *task) {
rust_dom *dom = task->dom;
rust_scheduler *sched = task->sched;
LOG(task, task, "rust_getcwd()");
char cbuf[BUF_BYTES];
@ -80,7 +80,7 @@ rust_getcwd(rust_task *task) {
}
rust_str *st;
st = new (mem) rust_str(dom, alloc, fill, (const uint8_t *)cbuf);
st = new (mem) rust_str(sched, alloc, fill, (const uint8_t *)cbuf);
return st;
}
@ -124,7 +124,7 @@ unsupervise(rust_task *task) {
extern "C" CDECL rust_vec*
vec_alloc(rust_task *task, type_desc *t, type_desc *elem_t, size_t n_elts)
{
rust_dom *dom = task->dom;
rust_scheduler *sched = task->sched;
LOG(task, mem, "vec_alloc %" PRIdPTR " elements of size %" PRIdPTR,
n_elts, elem_t->size);
size_t fill = n_elts * elem_t->size;
@ -134,7 +134,7 @@ vec_alloc(rust_task *task, type_desc *t, type_desc *elem_t, size_t n_elts)
task->fail(4);
return NULL;
}
rust_vec *vec = new (mem) rust_vec(dom, alloc, 0, NULL);
rust_vec *vec = new (mem) rust_vec(sched, alloc, 0, NULL);
return vec;
}
@ -198,11 +198,11 @@ vec_alloc_with_data(rust_task *task,
size_t elt_size,
void *d)
{
rust_dom *dom = task->dom;
rust_scheduler *sched = task->sched;
size_t alloc = next_power_of_two(sizeof(rust_vec) + (n_elts * elt_size));
void *mem = task->malloc(alloc, memory_region::LOCAL);
if (!mem) return NULL;
return new (mem) rust_vec(dom, alloc, fill * elt_size, (uint8_t*)d);
return new (mem) rust_vec(sched, alloc, fill * elt_size, (uint8_t*)d);
}
extern "C" CDECL rust_vec*
@ -355,13 +355,13 @@ str_from_buf(rust_task *task, char *buf, unsigned int len) {
extern "C" CDECL void *
rand_new(rust_task *task)
{
rust_dom *dom = task->dom;
rust_scheduler *sched = task->sched;
randctx *rctx = (randctx *) task->malloc(sizeof(randctx));
if (!rctx) {
task->fail(1);
return NULL;
}
isaac_init(dom, rctx);
isaac_init(sched, rctx);
return rctx;
}

View File

@ -22,7 +22,7 @@ rust_chan::rust_chan(rust_task *task,
rust_chan::~rust_chan() {
LOG(task, comm, "del rust_chan(task=0x%" PRIxPTR ")", (uintptr_t) this);
A(task->dom, is_associated() == false,
A(task->sched, is_associated() == false,
"Channel must be disassociated before being freed.");
--task->ref_count;
}
@ -49,7 +49,7 @@ bool rust_chan::is_associated() {
* Unlink this channel from its associated port.
*/
void rust_chan::disassociate() {
A(task->dom, is_associated(), "Channel must be associated with a port.");
A(task->sched, is_associated(), "Channel must be associated with a port.");
if (port->is_proxy() == false) {
LOG(task, task,
@ -69,14 +69,14 @@ void rust_chan::disassociate() {
void rust_chan::send(void *sptr) {
buffer.enqueue(sptr);
rust_dom *dom = task->dom;
rust_scheduler *sched = task->sched;
if (!is_associated()) {
W(dom, is_associated(),
W(sched, is_associated(),
"rust_chan::transmit with no associated port.");
return;
}
A(dom, !buffer.is_empty(),
A(sched, !buffer.is_empty(),
"rust_chan::transmit with nothing to send.");
if (port->is_proxy()) {
@ -86,7 +86,7 @@ void rust_chan::send(void *sptr) {
} else {
rust_port *target_port = port->referent();
if (target_port->task->blocked_on(target_port)) {
DLOG(dom, comm, "dequeued in rendezvous_ptr");
DLOG(sched, comm, "dequeued in rendezvous_ptr");
buffer.dequeue(target_port->task->rendezvous_ptr);
target_port->task->rendezvous_ptr = 0;
target_port->task->wakeup(target_port);

View File

@ -7,16 +7,16 @@ rust_crate_cache::get_type_desc(size_t size,
size_t n_descs,
type_desc const **descs)
{
I(dom, n_descs > 1);
I(sched, n_descs > 1);
type_desc *td = NULL;
size_t keysz = n_descs * sizeof(type_desc*);
HASH_FIND(hh, this->type_descs, descs, keysz, td);
if (td) {
DLOG(dom, cache, "rust_crate_cache::get_type_desc hit");
DLOG(sched, cache, "rust_crate_cache::get_type_desc hit");
return td;
}
DLOG(dom, cache, "rust_crate_cache::get_type_desc miss");
td = (type_desc*) dom->kernel->malloc(sizeof(type_desc) + keysz);
DLOG(sched, cache, "rust_crate_cache::get_type_desc miss");
td = (type_desc*) sched->kernel->malloc(sizeof(type_desc) + keysz);
if (!td)
return NULL;
// By convention, desc 0 is the root descriptor.
@ -27,7 +27,7 @@ rust_crate_cache::get_type_desc(size_t size,
td->size = size;
td->align = align;
for (size_t i = 0; i < n_descs; ++i) {
DLOG(dom, cache,
DLOG(sched, cache,
"rust_crate_cache::descs[%" PRIdPTR "] = 0x%" PRIxPTR,
i, descs[i]);
td->descs[i] = descs[i];
@ -38,22 +38,22 @@ rust_crate_cache::get_type_desc(size_t size,
return td;
}
rust_crate_cache::rust_crate_cache(rust_dom *dom)
rust_crate_cache::rust_crate_cache(rust_scheduler *sched)
: type_descs(NULL),
dom(dom),
sched(sched),
idx(0)
{
}
void
rust_crate_cache::flush() {
DLOG(dom, cache, "rust_crate_cache::flush()");
DLOG(sched, cache, "rust_crate_cache::flush()");
while (type_descs) {
type_desc *d = type_descs;
HASH_DEL(type_descs, d);
DLOG(dom, mem, "rust_crate_cache::flush() tydesc %" PRIxPTR, d);
dom->kernel->free(d);
DLOG(sched, mem, "rust_crate_cache::flush() tydesc %" PRIxPTR, d);
sched->kernel->free(d);
}
}

View File

@ -50,7 +50,7 @@ extern "C" {
#include "sync/lock_and_signal.h"
#include "sync/lock_free_queue.h"
struct rust_dom;
struct rust_scheduler;
struct rust_task;
class rust_log;
class rust_port;
@ -174,7 +174,7 @@ public:
#include "rust_proxy.h"
#include "rust_kernel.h"
#include "rust_message.h"
#include "rust_dom.h"
#include "rust_scheduler.h"
struct rust_timer {
// FIXME: This will probably eventually need replacement
@ -183,7 +183,7 @@ struct rust_timer {
// For now it's just the most basic "thread that can interrupt
// its associated domain-thread" device, so that we have
// *some* form of task-preemption.
rust_dom *dom;
rust_scheduler *sched;
uintptr_t exit_flag;
#if defined(__WIN32__)
@ -193,7 +193,7 @@ struct rust_timer {
pthread_t thread;
#endif
rust_timer(rust_dom *dom);
rust_timer(rust_scheduler *sched);
~rust_timer();
};

View File

@ -13,55 +13,55 @@ rust_kernel::rust_kernel(rust_srv *srv) :
_srv(srv),
_interrupt_kernel_loop(FALSE)
{
dom = create_domain("main");
sched = create_scheduler("main");
}
rust_dom *
rust_kernel::create_domain(const char *name) {
rust_scheduler *
rust_kernel::create_scheduler(const char *name) {
_kernel_lock.lock();
rust_message_queue *message_queue =
new (this) rust_message_queue(_srv, this);
rust_srv *srv = _srv->clone();
rust_dom *dom =
new (this) rust_dom(this, message_queue, srv, name);
rust_handle<rust_dom> *handle = internal_get_dom_handle(dom);
rust_scheduler *sched =
new (this) rust_scheduler(this, message_queue, srv, name);
rust_handle<rust_scheduler> *handle = internal_get_sched_handle(sched);
message_queue->associate(handle);
message_queues.append(message_queue);
KLOG("created domain: " PTR ", name: %s, index: %d",
dom, name, dom->list_index);
KLOG("created scheduler: " PTR ", name: %s, index: %d",
sched, name, sched->list_index);
_kernel_lock.signal_all();
_kernel_lock.unlock();
return dom;
return sched;
}
void
rust_kernel::destroy_domain() {
rust_kernel::destroy_scheduler() {
_kernel_lock.lock();
KLOG("deleting domain: " PTR ", name: %s, index: %d",
dom, dom->name, dom->list_index);
dom->message_queue->disassociate();
rust_srv *srv = dom->srv;
delete dom;
KLOG("deleting scheduler: " PTR ", name: %s, index: %d",
sched, sched->name, sched->list_index);
sched->message_queue->disassociate();
rust_srv *srv = sched->srv;
delete sched;
delete srv;
_kernel_lock.signal_all();
_kernel_lock.unlock();
}
rust_handle<rust_dom> *
rust_kernel::internal_get_dom_handle(rust_dom *dom) {
rust_handle<rust_dom> *handle = NULL;
if (_dom_handles.get(dom, &handle) == false) {
rust_handle<rust_scheduler> *
rust_kernel::internal_get_sched_handle(rust_scheduler *sched) {
rust_handle<rust_scheduler> *handle = NULL;
if (_sched_handles.get(sched, &handle) == false) {
handle =
new (this) rust_handle<rust_dom>(this, dom->message_queue, dom);
_dom_handles.put(dom, handle);
new (this) rust_handle<rust_scheduler>(this, sched->message_queue, sched);
_sched_handles.put(sched, handle);
}
return handle;
}
rust_handle<rust_dom> *
rust_kernel::get_dom_handle(rust_dom *dom) {
rust_handle<rust_scheduler> *
rust_kernel::get_sched_handle(rust_scheduler *sched) {
_kernel_lock.lock();
rust_handle<rust_dom> *handle = internal_get_dom_handle(dom);
rust_handle<rust_scheduler> *handle = internal_get_sched_handle(sched);
_kernel_lock.unlock();
return handle;
}
@ -72,7 +72,7 @@ rust_kernel::get_task_handle(rust_task *task) {
rust_handle<rust_task> *handle = NULL;
if (_task_handles.get(task, &handle) == false) {
handle =
new (this) rust_handle<rust_task>(this, task->dom->message_queue,
new (this) rust_handle<rust_task>(this, task->sched->message_queue,
task);
_task_handles.put(task, handle);
}
@ -87,7 +87,7 @@ rust_kernel::get_port_handle(rust_port *port) {
if (_port_handles.get(port, &handle) == false) {
handle =
new (this) rust_handle<rust_port>(this,
port->task->dom->message_queue,
port->task->sched->message_queue,
port);
_port_handles.put(port, handle);
}
@ -96,9 +96,8 @@ rust_kernel::get_port_handle(rust_port *port) {
}
void
rust_kernel::log_all_domain_state() {
KLOG("log_all_domain_state");
dom->log_state();
rust_kernel::log_all_scheduler_state() {
sched->log_state();
}
/**
@ -159,7 +158,7 @@ rust_kernel::terminate_kernel_loop() {
}
rust_kernel::~rust_kernel() {
destroy_domain();
destroy_scheduler();
terminate_kernel_loop();
@ -175,8 +174,8 @@ rust_kernel::~rust_kernel() {
KLOG("..task handles freed");
free_handles(_port_handles);
KLOG("..port handles freed");
free_handles(_dom_handles);
KLOG("..dom handles freed");
free_handles(_sched_handles);
KLOG("..sched handles freed");
KLOG("freeing queues");
@ -235,14 +234,14 @@ int rust_kernel::start_task_threads(int num_threads)
threads.push(thread);
}
dom->start_main_loop(0);
sched->start_main_loop(0);
while(threads.pop(&thread)) {
thread->join();
delete thread;
}
return dom->rval;
return sched->rval;
}
#ifdef __WIN32__
@ -257,9 +256,9 @@ rust_kernel::win32_require(LPCTSTR fn, BOOL ok) {
NULL, err,
MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
(LPTSTR) &buf, 0, NULL );
DLOG_ERR(dom, dom, "%s failed with error %ld: %s", fn, err, buf);
DLOG_ERR(sched, dom, "%s failed with error %ld: %s", fn, err, buf);
LocalFree((HLOCAL)buf);
I(dom, ok);
I(sched, ok);
}
}
#endif
@ -271,7 +270,7 @@ rust_task_thread::rust_task_thread(int id, rust_kernel *owner)
void rust_task_thread::run()
{
owner->dom->start_main_loop(id);
owner->sched->start_main_loop(id);
}
//

View File

@ -52,7 +52,7 @@ class rust_kernel : public rust_thread {
*/
hash_map<rust_task *, rust_handle<rust_task> *> _task_handles;
hash_map<rust_port *, rust_handle<rust_port> *> _port_handles;
hash_map<rust_dom *, rust_handle<rust_dom> *> _dom_handles;
hash_map<rust_scheduler *, rust_handle<rust_scheduler> *> _sched_handles;
template<class T> void free_handles(hash_map<T*, rust_handle<T>* > &map);
@ -65,15 +65,16 @@ class rust_kernel : public rust_thread {
void terminate_kernel_loop();
void pump_message_queues();
rust_handle<rust_dom> *internal_get_dom_handle(rust_dom *dom);
rust_handle<rust_scheduler> *
internal_get_sched_handle(rust_scheduler *sched);
rust_dom *create_domain(const char *name);
void destroy_domain();
rust_scheduler *create_scheduler(const char *name);
void destroy_scheduler();
array_list<rust_task_thread *> threads;
public:
rust_dom *dom;
rust_scheduler *sched;
lock_and_signal scheduler_lock;
/**
@ -85,7 +86,7 @@ public:
*/
indexed_list<rust_message_queue> message_queues;
rust_handle<rust_dom> *get_dom_handle(rust_dom *dom);
rust_handle<rust_scheduler> *get_sched_handle(rust_scheduler *sched);
rust_handle<rust_task> *get_task_handle(rust_task *task);
rust_handle<rust_port> *get_port_handle(rust_port *port);
@ -103,7 +104,7 @@ public:
void
notify_message_enqueued(rust_message_queue *queue, rust_message *message);
void log_all_domain_state();
void log_all_scheduler_state();
void log(uint32_t level, char const *fmt, ...);
virtual ~rust_kernel();
@ -111,7 +112,7 @@ public:
void free(void *mem);
// FIXME: this should go away
inline rust_dom *get_domain() const { return dom; }
inline rust_scheduler *get_scheduler() const { return sched; }
int start_task_threads(int num_threads);

View File

@ -23,9 +23,9 @@ static const char * _foreground_colors[] = { "[37m",
static lock_and_signal _log_lock;
static uint32_t _last_thread_id;
rust_log::rust_log(rust_srv *srv, rust_dom *dom) :
rust_log::rust_log(rust_srv *srv, rust_scheduler *sched) :
_srv(srv),
_dom(dom),
_sched(sched),
_use_colors(getenv("RUST_COLOR_LOG")) {
}
@ -104,12 +104,12 @@ rust_log::trace_ln(rust_task *task, uint32_t level, char *message) {
uint32_t thread_id = hash((uint32_t) pthread_self());
#endif
char prefix[BUF_BYTES] = "";
if (_dom && _dom->name) {
if (_sched && _sched->name) {
append_string(prefix, "%04" PRIxPTR ":%.10s:",
thread_id, _dom->name);
thread_id, _sched->name);
} else {
append_string(prefix, "%04" PRIxPTR ":0x%08" PRIxPTR ":",
thread_id, (uintptr_t) _dom);
thread_id, (uintptr_t) _sched);
}
if (task) {
if (task->name) {

View File

@ -1,3 +1,4 @@
// -*- c++ -*-
#ifndef RUST_LOG_H
#define RUST_LOG_H
@ -5,30 +6,30 @@ const uint32_t log_err = 0;
const uint32_t log_note = 1;
#define LOG(task, field, ...) \
DLOG_LVL(log_note, task, task->dom, field, __VA_ARGS__)
DLOG_LVL(log_note, task, task->sched, field, __VA_ARGS__)
#define LOG_ERR(task, field, ...) \
DLOG_LVL(log_err, task, task->dom, field, __VA_ARGS__)
#define DLOG(dom, field, ...) \
DLOG_LVL(log_note, NULL, dom, field, __VA_ARGS__)
#define DLOG_ERR(dom, field, ...) \
DLOG_LVL(log_err, NULL, dom, field, __VA_ARGS__)
#define LOGPTR(dom, msg, ptrval) \
DLOG_LVL(log_note, NULL, dom, mem, "%s 0x%" PRIxPTR, msg, ptrval)
#define DLOG_LVL(lvl, task, dom, field, ...) \
DLOG_LVL(log_err, task, task->sched, field, __VA_ARGS__)
#define DLOG(sched, field, ...) \
DLOG_LVL(log_note, NULL, sched, field, __VA_ARGS__)
#define DLOG_ERR(sched, field, ...) \
DLOG_LVL(log_err, NULL, sched, field, __VA_ARGS__)
#define LOGPTR(sched, msg, ptrval) \
DLOG_LVL(log_note, NULL, sched, mem, "%s 0x%" PRIxPTR, msg, ptrval)
#define DLOG_LVL(lvl, task, sched, field, ...) \
do { \
rust_dom* _d_ = dom; \
rust_scheduler* _d_ = sched; \
if (log_rt_##field >= lvl && _d_->log_lvl >= lvl) { \
_d_->log(task, lvl, __VA_ARGS__); \
} \
} while (0)
struct rust_dom;
struct rust_scheduler;
struct rust_task;
class rust_log {
public:
rust_log(rust_srv *srv, rust_dom *dom);
rust_log(rust_srv *srv, rust_scheduler *sched);
virtual ~rust_log();
enum ansi_color {
@ -53,7 +54,7 @@ public:
private:
rust_srv *_srv;
rust_dom *_dom;
rust_scheduler *_sched;
bool _use_labels;
bool _use_colors;
void trace_ln(rust_task *task, char *message);

View File

@ -112,7 +112,7 @@ void data_message::kernel_process() {
rust_message_queue::rust_message_queue(rust_srv *srv, rust_kernel *kernel)
: region(srv, true),
kernel(kernel),
dom_handle(NULL) {
sched_handle(NULL) {
// Nop.
}

View File

@ -93,26 +93,26 @@ class rust_message_queue : public lock_free_queue<rust_message*>,
public:
memory_region region;
rust_kernel *kernel;
rust_handle<rust_dom> *dom_handle;
rust_handle<rust_scheduler> *sched_handle;
int32_t list_index;
rust_message_queue(rust_srv *srv, rust_kernel *kernel);
void associate(rust_handle<rust_dom> *dom_handle) {
this->dom_handle = dom_handle;
void associate(rust_handle<rust_scheduler> *sched_handle) {
this->sched_handle = sched_handle;
}
/**
* The Rust domain relinquishes control to the Rust kernel.
*/
void disassociate() {
this->dom_handle = NULL;
this->sched_handle = NULL;
}
/**
* Checks if a Rust domain is responsible for draining the message queue.
*/
bool is_associated() {
return this->dom_handle != NULL;
return this->sched_handle != NULL;
}
void enqueue(rust_message* message) {

View File

@ -3,7 +3,7 @@
#include "rust_internal.h"
#include "globals.h"
rust_dom::rust_dom(rust_kernel *kernel,
rust_scheduler::rust_scheduler(rust_kernel *kernel,
rust_message_queue *message_queue, rust_srv *srv,
const char *name) :
interrupt_flag(0),
@ -32,8 +32,8 @@ rust_dom::rust_dom(rust_kernel *kernel,
root_task = create_task(NULL, name);
}
rust_dom::~rust_dom() {
DLOG(this, dom, "~rust_dom %s @0x%" PRIxPTR, name, (uintptr_t)this);
rust_scheduler::~rust_scheduler() {
DLOG(this, dom, "~rust_scheduler %s @0x%" PRIxPTR, name, (uintptr_t)this);
newborn_tasks.delete_all();
running_tasks.delete_all();
@ -45,7 +45,7 @@ rust_dom::~rust_dom() {
}
void
rust_dom::activate(rust_task *task) {
rust_scheduler::activate(rust_task *task) {
context ctx;
task->ctx.next = &ctx;
@ -57,7 +57,7 @@ rust_dom::activate(rust_task *task) {
}
void
rust_dom::log(rust_task* task, uint32_t level, char const *fmt, ...) {
rust_scheduler::log(rust_task* task, uint32_t level, char const *fmt, ...) {
char buf[BUF_BYTES];
va_list args;
va_start(args, fmt);
@ -67,7 +67,7 @@ rust_dom::log(rust_task* task, uint32_t level, char const *fmt, ...) {
}
void
rust_dom::fail() {
rust_scheduler::fail() {
log(NULL, log_err, "domain %s @0x%" PRIxPTR " root task failed",
name, this);
I(this, rval == 0);
@ -75,7 +75,7 @@ rust_dom::fail() {
}
size_t
rust_dom::number_of_live_tasks() {
rust_scheduler::number_of_live_tasks() {
return running_tasks.length() + blocked_tasks.length();
}
@ -83,7 +83,7 @@ rust_dom::number_of_live_tasks() {
* Delete any dead tasks.
*/
void
rust_dom::reap_dead_tasks() {
rust_scheduler::reap_dead_tasks() {
I(this, kernel->scheduler_lock.lock_held_by_current_thread());
for (size_t i = 0; i < dead_tasks.length(); ) {
rust_task *task = dead_tasks[i];
@ -104,7 +104,7 @@ rust_dom::reap_dead_tasks() {
/**
* Drains and processes incoming pending messages.
*/
void rust_dom::drain_incoming_message_queue(bool process) {
void rust_scheduler::drain_incoming_message_queue(bool process) {
rust_message *message;
while (message_queue->dequeue(&message)) {
DLOG(this, comm, "<== receiving \"%s\" " PTR,
@ -124,7 +124,7 @@ void rust_dom::drain_incoming_message_queue(bool process) {
* Returns NULL if no tasks can be scheduled.
*/
rust_task *
rust_dom::schedule_task() {
rust_scheduler::schedule_task() {
I(this, this);
// FIXME: in the face of failing tasks, this is not always right.
// I(this, n_live_tasks() > 0);
@ -142,7 +142,7 @@ rust_dom::schedule_task() {
}
void
rust_dom::log_state() {
rust_scheduler::log_state() {
if (log_rt_task < log_note) return;
if (!running_tasks.is_empty()) {
@ -182,7 +182,7 @@ rust_dom::log_state() {
* drop to zero.
*/
int
rust_dom::start_main_loop(int id) {
rust_scheduler::start_main_loop(int id) {
kernel->scheduler_lock.lock();
// Make sure someone is watching, to pull us out of infinite loops.
@ -282,12 +282,12 @@ rust_dom::start_main_loop(int id) {
}
rust_crate_cache *
rust_dom::get_cache() {
rust_scheduler::get_cache() {
return &cache;
}
rust_task *
rust_dom::create_task(rust_task *spawner, const char *name) {
rust_scheduler::create_task(rust_task *spawner, const char *name) {
rust_task *task =
new (this->kernel) rust_task (this, &newborn_tasks, spawner, name);
DLOG(this, task, "created task: " PTR ", spawner: %s, name: %s",

View File

@ -1,7 +1,7 @@
#ifndef RUST_DOM_H
#define RUST_DOM_H
#ifndef RUST_SCHEDULER_H
#define RUST_SCHEDULER_H
struct rust_dom;
struct rust_scheduler;
class
rust_crate_cache
@ -18,15 +18,15 @@ private:
public:
rust_dom *dom;
rust_scheduler *sched;
size_t idx;
rust_crate_cache(rust_dom *dom);
rust_crate_cache(rust_scheduler *sched);
~rust_crate_cache();
void flush();
};
struct rust_dom : public kernel_owned<rust_dom>, rc_base<rust_dom>
struct rust_scheduler : public kernel_owned<rust_scheduler>, rc_base<rust_scheduler>
{
// Fields known to the compiler:
uintptr_t interrupt_flag;
@ -64,10 +64,10 @@ struct rust_dom : public kernel_owned<rust_dom>, rc_base<rust_dom>
// Only a pointer to 'name' is kept, so it must live as long as this
// domain.
rust_dom(rust_kernel *kernel,
rust_scheduler(rust_kernel *kernel,
rust_message_queue *message_queue, rust_srv *srv,
const char *name);
~rust_dom();
~rust_scheduler();
void activate(rust_task *task);
void log(rust_task *task, uint32_t level, char const *fmt, ...);
rust_log & get_log();
@ -89,7 +89,7 @@ struct rust_dom : public kernel_owned<rust_dom>, rc_base<rust_dom>
};
inline rust_log &
rust_dom::get_log() {
rust_scheduler::get_log() {
return _log;
}
@ -104,4 +104,4 @@ rust_dom::get_log() {
// End:
//
#endif /* RUST_DOM_H */
#endif /* RUST_SCHEDULER_H */

View File

@ -26,10 +26,10 @@ new_stk(rust_task *task, size_t minsz)
minsz = min_stk_bytes;
size_t sz = sizeof(stk_seg) + minsz;
stk_seg *stk = (stk_seg *)task->malloc(sz);
LOGPTR(task->dom, "new stk", (uintptr_t)stk);
LOGPTR(task->sched, "new stk", (uintptr_t)stk);
memset(stk, 0, sizeof(stk_seg));
stk->limit = (uintptr_t) &stk->data[minsz];
LOGPTR(task->dom, "stk limit", stk->limit);
LOGPTR(task->sched, "stk limit", stk->limit);
stk->valgrind_id =
VALGRIND_STACK_REGISTER(&stk->data[0],
&stk->data[minsz]);
@ -40,7 +40,7 @@ static void
del_stk(rust_task *task, stk_seg *stk)
{
VALGRIND_STACK_DEREGISTER(stk->valgrind_id);
LOGPTR(task->dom, "freeing stk segment", (uintptr_t)stk);
LOGPTR(task->sched, "freeing stk segment", (uintptr_t)stk);
task->free(stk);
}
@ -52,16 +52,16 @@ del_stk(rust_task *task, stk_seg *stk)
size_t const n_callee_saves = 4;
size_t const callee_save_fp = 0;
rust_task::rust_task(rust_dom *dom, rust_task_list *state,
rust_task::rust_task(rust_scheduler *sched, rust_task_list *state,
rust_task *spawner, const char *name) :
maybe_proxy<rust_task>(this),
stk(NULL),
runtime_sp(0),
rust_sp(0),
gc_alloc_chain(0),
dom(dom),
sched(sched),
cache(NULL),
kernel(dom->kernel),
kernel(sched->kernel),
name(name),
state(state),
cond(NULL),
@ -71,11 +71,11 @@ rust_task::rust_task(rust_dom *dom, rust_task_list *state,
rendezvous_ptr(0),
handle(NULL),
active(false),
local_region(&dom->srv->local_region),
synchronized_region(&dom->srv->synchronized_region)
local_region(&sched->srv->local_region),
synchronized_region(&sched->srv->synchronized_region)
{
LOGPTR(dom, "new task", (uintptr_t)this);
DLOG(dom, task, "sizeof(task) = %d (0x%x)", sizeof *this, sizeof *this);
LOGPTR(sched, "new task", (uintptr_t)this);
DLOG(sched, task, "sizeof(task) = %d (0x%x)", sizeof *this, sizeof *this);
stk = new_stk(this, 0);
rust_sp = stk->limit;
@ -87,33 +87,13 @@ rust_task::rust_task(rust_dom *dom, rust_task_list *state,
rust_task::~rust_task()
{
DLOG(dom, task, "~rust_task %s @0x%" PRIxPTR ", refcnt=%d",
DLOG(sched, task, "~rust_task %s @0x%" PRIxPTR ", refcnt=%d",
name, (uintptr_t)this, ref_count);
/*
for (uintptr_t fp = get_fp(); fp; fp = get_previous_fp(fp)) {
frame_glue_fns *glue_fns = get_frame_glue_fns(fp);
DLOG(dom, task,
"~rust_task, frame fp=0x%" PRIxPTR ", glue_fns=0x%" PRIxPTR,
fp, glue_fns);
if (glue_fns) {
DLOG(dom, task,
"~rust_task, mark_glue=0x%" PRIxPTR,
glue_fns->mark_glue);
DLOG(dom, task,
"~rust_task, drop_glue=0x%" PRIxPTR,
glue_fns->drop_glue);
DLOG(dom, task,
"~rust_task, reloc_glue=0x%" PRIxPTR,
glue_fns->reloc_glue);
}
}
*/
/* FIXME: tighten this up, there are some more
assertions that hold at task-lifecycle events. */
I(dom, ref_count == 0 ||
(ref_count == 1 && this == dom->root_task));
I(sched, ref_count == 0 ||
(ref_count == 1 && this == sched->root_task));
del_stk(this, stk);
}
@ -147,7 +127,7 @@ void task_start_wrapper(spawn_args *a)
// This is duplicated from upcall_exit, which is probably dead code by
// now.
LOG(task, task, "task ref_count: %d", task->ref_count);
A(task->dom, task->ref_count >= 0,
A(task->sched, task->ref_count >= 0,
"Task ref_count should not be negative on exit!");
task->die();
task->notify_tasks_waiting_to_join();
@ -160,10 +140,10 @@ void
rust_task::start(uintptr_t spawnee_fn,
uintptr_t args)
{
LOGPTR(dom, "from spawnee", spawnee_fn);
LOGPTR(sched, "from spawnee", spawnee_fn);
I(dom, stk->data != NULL);
I(dom, !kernel->scheduler_lock.lock_held_by_current_thread());
I(sched, stk->data != NULL);
I(sched, !kernel->scheduler_lock.lock_held_by_current_thread());
scoped_lock with(kernel->scheduler_lock);
@ -182,7 +162,7 @@ rust_task::start(uintptr_t spawnee_fn,
ctx.call((void *)task_start_wrapper, a, sp);
yield_timer.reset(0);
transition(&dom->newborn_tasks, &dom->running_tasks);
transition(&sched->newborn_tasks, &sched->running_tasks);
}
void
@ -227,8 +207,8 @@ rust_task::kill() {
// Unblock the task so it can unwind.
unblock();
if (this == dom->root_task)
dom->fail();
if (this == sched->root_task)
sched->fail();
LOG(this, task, "preparing to unwind task: 0x%" PRIxPTR, this);
// run_on_resume(rust_unwind_glue);
@ -237,15 +217,15 @@ rust_task::kill() {
void
rust_task::fail(size_t nargs) {
// See note in ::kill() regarding who should call this.
DLOG(dom, task, "task %s @0x%" PRIxPTR " failing", name, this);
DLOG(sched, task, "task %s @0x%" PRIxPTR " failing", name, this);
backtrace();
// Unblock the task so it can unwind.
unblock();
if (this == dom->root_task)
dom->fail();
if (this == sched->root_task)
sched->fail();
// run_after_return(nargs, rust_unwind_glue);
if (supervisor) {
DLOG(dom, task,
DLOG(sched, task,
"task %s @0x%" PRIxPTR
" propagating failure to supervisor %s @0x%" PRIxPTR,
name, this, supervisor->name, supervisor);
@ -259,14 +239,14 @@ void
rust_task::gc(size_t nargs)
{
// FIXME: not presently implemented; was broken by rustc.
DLOG(dom, task,
DLOG(sched, task,
"task %s @0x%" PRIxPTR " garbage collecting", name, this);
}
void
rust_task::unsupervise()
{
DLOG(dom, task,
DLOG(sched, task,
"task %s @0x%" PRIxPTR
" disconnecting from supervisor %s @0x%" PRIxPTR,
name, this, supervisor->name, supervisor);
@ -302,13 +282,13 @@ rust_task::get_frame_glue_fns(uintptr_t fp) {
bool
rust_task::running()
{
return state == &dom->running_tasks;
return state == &sched->running_tasks;
}
bool
rust_task::blocked()
{
return state == &dom->blocked_tasks;
return state == &sched->blocked_tasks;
}
bool
@ -320,13 +300,13 @@ rust_task::blocked_on(rust_cond *on)
bool
rust_task::dead()
{
return state == &dom->dead_tasks;
return state == &sched->dead_tasks;
}
void
rust_task::link_gc(gc_alloc *gcm) {
I(dom, gcm->prev == NULL);
I(dom, gcm->next == NULL);
I(sched, gcm->prev == NULL);
I(sched, gcm->next == NULL);
gcm->prev = NULL;
gcm->next = gc_alloc_chain;
gc_alloc_chain = gcm;
@ -361,7 +341,7 @@ rust_task::malloc(size_t sz, type_desc *td)
return mem;
if (td) {
gc_alloc *gcm = (gc_alloc*) mem;
DLOG(dom, task, "task %s @0x%" PRIxPTR
DLOG(sched, task, "task %s @0x%" PRIxPTR
" allocated %d GC bytes = 0x%" PRIxPTR,
name, (uintptr_t)this, sz, gcm);
memset((void*) gcm, 0, sizeof(gc_alloc));
@ -384,7 +364,7 @@ rust_task::realloc(void *data, size_t sz, bool is_gc)
unlink_gc(gcm);
sz += sizeof(gc_alloc);
gcm = (gc_alloc*) realloc((void*)gcm, sz, memory_region::LOCAL);
DLOG(dom, task, "task %s @0x%" PRIxPTR
DLOG(sched, task, "task %s @0x%" PRIxPTR
" reallocated %d GC bytes = 0x%" PRIxPTR,
name, (uintptr_t)this, sz, gcm);
if (!gcm)
@ -406,7 +386,7 @@ rust_task::free(void *p, bool is_gc)
if (is_gc) {
gc_alloc *gcm = (gc_alloc*)(((char *)p) - sizeof(gc_alloc));
unlink_gc(gcm);
DLOG(dom, mem,
DLOG(sched, mem,
"task %s @0x%" PRIxPTR " freeing GC memory = 0x%" PRIxPTR,
name, (uintptr_t)this, gcm);
free(gcm, memory_region::LOCAL);
@ -417,11 +397,11 @@ rust_task::free(void *p, bool is_gc)
void
rust_task::transition(rust_task_list *src, rust_task_list *dst) {
I(dom, kernel->scheduler_lock.lock_held_by_current_thread());
DLOG(dom, task,
I(sched, kernel->scheduler_lock.lock_held_by_current_thread());
DLOG(sched, task,
"task %s " PTR " state change '%s' -> '%s' while in '%s'",
name, (uintptr_t)this, src->name, dst->name, state->name);
I(dom, state == src);
I(sched, state == src);
src->remove(this);
dst->append(this);
state = dst;
@ -431,30 +411,30 @@ void
rust_task::block(rust_cond *on, const char* name) {
LOG(this, task, "Blocking on 0x%" PRIxPTR ", cond: 0x%" PRIxPTR,
(uintptr_t) on, (uintptr_t) cond);
A(dom, cond == NULL, "Cannot block an already blocked task.");
A(dom, on != NULL, "Cannot block on a NULL object.");
A(sched, cond == NULL, "Cannot block an already blocked task.");
A(sched, on != NULL, "Cannot block on a NULL object.");
transition(&dom->running_tasks, &dom->blocked_tasks);
transition(&sched->running_tasks, &sched->blocked_tasks);
cond = on;
cond_name = name;
}
void
rust_task::wakeup(rust_cond *from) {
A(dom, cond != NULL, "Cannot wake up unblocked task.");
A(sched, cond != NULL, "Cannot wake up unblocked task.");
LOG(this, task, "Blocked on 0x%" PRIxPTR " woken up on 0x%" PRIxPTR,
(uintptr_t) cond, (uintptr_t) from);
A(dom, cond == from, "Cannot wake up blocked task on wrong condition.");
A(sched, cond == from, "Cannot wake up blocked task on wrong condition.");
transition(&dom->blocked_tasks, &dom->running_tasks);
I(dom, cond == from);
transition(&sched->blocked_tasks, &sched->running_tasks);
I(sched, cond == from);
cond = NULL;
cond_name = "none";
}
void
rust_task::die() {
transition(&dom->running_tasks, &dom->dead_tasks);
transition(&sched->running_tasks, &sched->dead_tasks);
}
void
@ -467,8 +447,8 @@ rust_crate_cache *
rust_task::get_crate_cache()
{
if (!cache) {
DLOG(dom, task, "fetching cache for current crate");
cache = dom->get_cache();
DLOG(sched, task, "fetching cache for current crate");
cache = sched->get_cache();
}
return cache;
}
@ -486,7 +466,7 @@ rust_task::backtrace() {
rust_handle<rust_task> *
rust_task::get_handle() {
if (handle == NULL) {
handle = dom->kernel->get_task_handle(this);
handle = sched->kernel->get_task_handle(this);
}
return handle;
}
@ -503,7 +483,7 @@ rust_task::malloc(size_t size, memory_region::memory_region_type type) {
} else if (type == memory_region::SYNCHRONIZED) {
return synchronized_region.malloc(size);
}
I(dom, false);
I(sched, false);
return NULL;
}
@ -535,7 +515,7 @@ rust_task::realloc(void *mem, size_t size,
void
rust_task::free(void *mem, memory_region::memory_region_type type) {
DLOG(dom, mem, "rust_task::free(0x%" PRIxPTR ")", mem);
DLOG(sched, mem, "rust_task::free(0x%" PRIxPTR ")", mem);
if (type == memory_region::LOCAL) {
local_region.free(mem);
} else if (type == memory_region::SYNCHRONIZED) {

View File

@ -43,7 +43,7 @@ rust_task : public maybe_proxy<rust_task>,
uintptr_t runtime_sp; // Runtime sp while task running.
uintptr_t rust_sp; // Saved sp when not running.
gc_alloc *gc_alloc_chain; // Linked list of GC allocations.
rust_dom *dom;
rust_scheduler *sched;
rust_crate_cache *cache;
// Fields known only to the runtime.
@ -83,7 +83,7 @@ rust_task : public maybe_proxy<rust_task>,
memory_region synchronized_region;
// Only a pointer to 'name' is kept, so it must live as long as this task.
rust_task(rust_dom *dom,
rust_task(rust_scheduler *sched,
rust_task_list *state,
rust_task *spawner,
const char *name);
@ -111,8 +111,8 @@ rust_task : public maybe_proxy<rust_task>,
void die();
void unblock();
void check_active() { I(dom, dom->curr_task == this); }
void check_suspended() { I(dom, dom->curr_task != this); }
void check_active() { I(sched, sched->curr_task == this); }
void check_suspended() { I(sched, sched->curr_task != this); }
// Print a backtrace, if the "bt" logging option is on.
void backtrace();

View File

@ -1,16 +1,16 @@
#include "rust_internal.h"
rust_task_list::rust_task_list (rust_dom *dom, const char* name) :
dom(dom), name(name) {
rust_task_list::rust_task_list (rust_scheduler *sched, const char* name) :
sched(sched), name(name) {
// Nop;
}
void
rust_task_list::delete_all() {
DLOG(dom, task, "deleting all %s tasks", name);
DLOG(sched, task, "deleting all %s tasks", name);
while (is_empty() == false) {
rust_task *task = pop_value();
DLOG(dom, task, "deleting task " PTR, task);
DLOG(sched, task, "deleting task " PTR, task);
delete task;
}
}

View File

@ -1,4 +1,4 @@
// -*- c++-mode -*-
// -*- c++ -*-
#ifndef RUST_TASK_LIST_H
#define RUST_TASK_LIST_H
@ -8,9 +8,9 @@
class rust_task_list : public indexed_list<rust_task>,
public kernel_owned<rust_task_list> {
public:
rust_dom *dom;
rust_scheduler *sched;
const char* name;
rust_task_list (rust_dom *dom, const char* name);
rust_task_list (rust_scheduler *sched, const char* name);
void delete_all();
};

View File

@ -29,8 +29,8 @@ static void *
timer_loop(void *ptr) {
// We were handed the rust_timer that owns us.
rust_timer *timer = (rust_timer *)ptr;
rust_dom *dom = timer->dom;
DLOG(dom, timer, "in timer 0x%" PRIxPTR, (uintptr_t)timer);
rust_scheduler *sched = timer->sched;
DLOG(sched, timer, "in timer 0x%" PRIxPTR, (uintptr_t)timer);
size_t ms = TIME_SLICE_IN_MS;
while (!timer->exit_flag) {
@ -39,10 +39,10 @@ timer_loop(void *ptr) {
#else
usleep(ms * 1000);
#endif
DLOG(dom, timer, "timer 0x%" PRIxPTR
" interrupting domain 0x%" PRIxPTR, (uintptr_t) timer,
(uintptr_t) dom);
dom->interrupt_flag = 1;
DLOG(sched, timer, "timer 0x%" PRIxPTR
" interrupting schedain 0x%" PRIxPTR, (uintptr_t) timer,
(uintptr_t) sched);
sched->interrupt_flag = 1;
}
#if defined(__WIN32__)
ExitThread(0);
@ -52,12 +52,12 @@ timer_loop(void *ptr) {
return 0;
}
rust_timer::rust_timer(rust_dom *dom) :
dom(dom), exit_flag(0) {
DLOG(dom, timer, "creating timer for domain 0x%" PRIxPTR, dom);
rust_timer::rust_timer(rust_scheduler *sched) :
sched(sched), exit_flag(0) {
DLOG(sched, timer, "creating timer for domain 0x%" PRIxPTR, sched);
#if defined(__WIN32__)
thread = CreateThread(NULL, 0, timer_loop, this, 0, NULL);
dom->kernel->win32_require("CreateThread", thread != NULL);
sched->kernel->win32_require("CreateThread", thread != NULL);
if (RUNNING_ON_VALGRIND)
Sleep(10);
#else
@ -70,7 +70,7 @@ rust_timer::rust_timer(rust_dom *dom) :
rust_timer::~rust_timer() {
exit_flag = 1;
#if defined(__WIN32__)
dom->kernel->win32_require("WaitForSingleObject",
sched->kernel->win32_require("WaitForSingleObject",
WaitForSingleObject(thread, INFINITE) ==
WAIT_OBJECT_0);
#else

View File

@ -23,7 +23,7 @@ str_buf(rust_task *task, rust_str *s);
extern "C" void
upcall_grow_task(rust_task *task, size_t n_frame_bytes) {
I(task->dom, false);
I(task->sched, false);
LOG_UPCALL_ENTRY(task);
task->grow(n_frame_bytes);
}
@ -31,44 +31,44 @@ upcall_grow_task(rust_task *task, size_t n_frame_bytes) {
extern "C" CDECL
void upcall_log_int(rust_task *task, uint32_t level, int32_t i) {
LOG_UPCALL_ENTRY(task);
if (task->dom->log_lvl >= level)
task->dom->log(task, level, "rust: %" PRId32 " (0x%" PRIx32 ")",
if (task->sched->log_lvl >= level)
task->sched->log(task, level, "rust: %" PRId32 " (0x%" PRIx32 ")",
i, i);
}
extern "C" CDECL
void upcall_log_float(rust_task *task, uint32_t level, float f) {
LOG_UPCALL_ENTRY(task);
if (task->dom->log_lvl >= level)
task->dom->log(task, level, "rust: %12.12f", f);
if (task->sched->log_lvl >= level)
task->sched->log(task, level, "rust: %12.12f", f);
}
extern "C" CDECL
void upcall_log_double(rust_task *task, uint32_t level, double *f) {
LOG_UPCALL_ENTRY(task);
if (task->dom->log_lvl >= level)
task->dom->log(task, level, "rust: %12.12f", *f);
if (task->sched->log_lvl >= level)
task->sched->log(task, level, "rust: %12.12f", *f);
}
extern "C" CDECL void
upcall_log_str(rust_task *task, uint32_t level, rust_str *str) {
LOG_UPCALL_ENTRY(task);
if (task->dom->log_lvl >= level) {
if (task->sched->log_lvl >= level) {
const char *c = str_buf(task, str);
task->dom->log(task, level, "rust: %s", c);
task->sched->log(task, level, "rust: %s", c);
}
}
extern "C" CDECL void
upcall_trace_word(rust_task *task, uintptr_t i) {
LOG_UPCALL_ENTRY(task);
task->dom->log(task, 2, "trace: 0x%" PRIxPTR "", i, i, (char) i);
task->sched->log(task, 2, "trace: 0x%" PRIxPTR "", i, i, (char) i);
}
extern "C" CDECL void
upcall_trace_str(rust_task *task, char const *c) {
LOG_UPCALL_ENTRY(task);
task->dom->log(task, 2, "trace: %s", c);
task->sched->log(task, 2, "trace: %s", c);
}
extern "C" CDECL rust_port*
@ -85,7 +85,7 @@ upcall_del_port(rust_task *task, rust_port *port) {
LOG_UPCALL_ENTRY(task);
scoped_lock with(task->kernel->scheduler_lock);
LOG(task, comm, "upcall del_port(0x%" PRIxPTR ")", (uintptr_t) port);
I(task->dom, !port->ref_count);
I(task->sched, !port->ref_count);
delete port;
}
@ -95,11 +95,11 @@ upcall_del_port(rust_task *task, rust_port *port) {
extern "C" CDECL rust_chan*
upcall_new_chan(rust_task *task, rust_port *port) {
LOG_UPCALL_ENTRY(task);
rust_dom *dom = task->dom;
rust_scheduler *sched = task->sched;
LOG(task, comm, "upcall_new_chan("
"task=0x%" PRIxPTR " (%s), port=0x%" PRIxPTR ")",
(uintptr_t) task, task->name, port);
I(dom, port);
I(sched, port);
return new (task) rust_chan(task, port, port->unit_sz);
}
@ -127,7 +127,7 @@ void upcall_del_chan(rust_task *task, rust_chan *chan) {
LOG(task, comm, "upcall del_chan(0x%" PRIxPTR ")", (uintptr_t) chan);
A(task->dom, chan->ref_count == 0,
A(task->sched, chan->ref_count == 0,
"Channel's ref count should be zero.");
if (chan->is_associated()) {
@ -174,7 +174,7 @@ upcall_clone_chan(rust_task *task, maybe_proxy<rust_task> *target,
target_task = target->referent();
} else {
rust_handle<rust_port> *handle =
task->dom->kernel->get_port_handle(port->as_referent());
task->sched->kernel->get_port_handle(port->as_referent());
maybe_proxy<rust_port> *proxy = new rust_proxy<rust_port> (handle);
LOG(task, mem, "new proxy: " PTR, proxy);
port = proxy;
@ -275,7 +275,7 @@ upcall_exit(rust_task *task) {
LOG_UPCALL_ENTRY(task);
scoped_lock with(task->kernel->scheduler_lock);
LOG(task, task, "task ref_count: %d", task->ref_count);
A(task->dom, task->ref_count >= 0,
A(task->sched, task->ref_count >= 0,
"Task ref_count should not be negative on exit!");
task->die();
task->notify_tasks_waiting_to_join();
@ -308,8 +308,8 @@ extern "C" CDECL void
upcall_free(rust_task *task, void* ptr, uintptr_t is_gc) {
LOG_UPCALL_ENTRY(task);
scoped_lock with(task->kernel->scheduler_lock);
rust_dom *dom = task->dom;
DLOG(dom, mem,
rust_scheduler *sched = task->sched;
DLOG(sched, mem,
"upcall free(0x%" PRIxPTR ", is_gc=%" PRIdPTR ")",
(uintptr_t)ptr, is_gc);
task->free(ptr, (bool) is_gc);
@ -320,11 +320,11 @@ upcall_mark(rust_task *task, void* ptr) {
LOG_UPCALL_ENTRY(task);
scoped_lock with(task->kernel->scheduler_lock);
rust_dom *dom = task->dom;
rust_scheduler *sched = task->sched;
if (ptr) {
gc_alloc *gcm = (gc_alloc*) (((char*)ptr) - sizeof(gc_alloc));
uintptr_t marked = (uintptr_t) gcm->mark();
DLOG(dom, gc, "upcall mark(0x%" PRIxPTR ") = %" PRIdPTR,
DLOG(sched, gc, "upcall mark(0x%" PRIxPTR ") = %" PRIdPTR,
(uintptr_t)gcm, marked);
return marked;
}
@ -332,14 +332,14 @@ upcall_mark(rust_task *task, void* ptr) {
}
rust_str *make_str(rust_task *task, char const *s, size_t fill) {
rust_dom *dom = task->dom;
rust_scheduler *sched = task->sched;
size_t alloc = next_power_of_two(sizeof(rust_str) + fill);
void *mem = task->malloc(alloc);
if (!mem) {
task->fail(3);
return NULL;
}
rust_str *st = new (mem) rust_str(dom, alloc, fill, (uint8_t const *) s);
rust_str *st = new (mem) rust_str(sched, alloc, fill, (uint8_t const *) s);
LOG(task, mem,
"upcall new_str('%s', %" PRIdPTR ") = 0x%" PRIxPTR,
s, fill, st);
@ -366,15 +366,15 @@ extern "C" CDECL rust_vec *
upcall_new_vec(rust_task *task, size_t fill, type_desc *td) {
LOG_UPCALL_ENTRY(task);
scoped_lock with(task->kernel->scheduler_lock);
rust_dom *dom = task->dom;
DLOG(dom, mem, "upcall new_vec(%" PRIdPTR ")", fill);
rust_scheduler *sched = task->sched;
DLOG(sched, mem, "upcall new_vec(%" PRIdPTR ")", fill);
size_t alloc = next_power_of_two(sizeof(rust_vec) + fill);
void *mem = task->malloc(alloc, td);
if (!mem) {
task->fail(3);
return NULL;
}
rust_vec *v = new (mem) rust_vec(dom, alloc, 0, NULL);
rust_vec *v = new (mem) rust_vec(sched, alloc, 0, NULL);
LOG(task, mem,
"upcall new_vec(%" PRIdPTR ") = 0x%" PRIxPTR, fill, v);
return v;
@ -387,7 +387,7 @@ vec_grow(rust_task *task,
uintptr_t *need_copy,
type_desc *td)
{
rust_dom *dom = task->dom;
rust_scheduler *sched = task->sched;
LOG(task, mem,
"vec_grow(0x%" PRIxPTR ", %" PRIdPTR
"), rc=%" PRIdPTR " alloc=%" PRIdPTR ", fill=%" PRIdPTR
@ -438,10 +438,10 @@ vec_grow(rust_task *task,
if (v->ref_count != CONST_REFCOUNT)
v->deref();
v = new (mem) rust_vec(dom, alloc, 0, NULL);
v = new (mem) rust_vec(sched, alloc, 0, NULL);
*need_copy = 1;
}
I(dom, sizeof(rust_vec) + v->fill <= v->alloc);
I(sched, sizeof(rust_vec) + v->fill <= v->alloc);
return v;
}
@ -521,8 +521,8 @@ upcall_new_task(rust_task *spawner, rust_vec *name) {
// name is a rust string structure.
LOG_UPCALL_ENTRY(spawner);
scoped_lock with(spawner->kernel->scheduler_lock);
rust_dom *dom = spawner->dom;
rust_task *task = dom->create_task(spawner, (const char *)name->data);
rust_scheduler *sched = spawner->sched;
rust_task *task = sched->create_task(spawner, (const char *)name->data);
return task;
}
@ -534,8 +534,8 @@ upcall_start_task(rust_task *spawner,
size_t args_sz) {
LOG_UPCALL_ENTRY(spawner);
rust_dom *dom = spawner->dom;
DLOG(dom, task,
rust_scheduler *sched = spawner->sched;
DLOG(sched, task,
"upcall start_task(task %s @0x%" PRIxPTR
", spawnee 0x%" PRIxPTR ")",
task->name, task,
@ -563,7 +563,7 @@ upcall_ivec_resize(rust_task *task,
rust_ivec *v,
size_t newsz) {
scoped_lock with(task->kernel->scheduler_lock);
I(task->dom, !v->fill);
I(task->sched, !v->fill);
size_t new_alloc = next_power_of_two(newsz);
rust_ivec_heap *new_heap_part = (rust_ivec_heap *)

View File

@ -25,24 +25,24 @@ ptr_vec<T>::ptr_vec(rust_task *task) :
fill(0),
data(new (task) T*[alloc])
{
I(task->dom, data);
DLOG(task->dom, mem, "new ptr_vec(data=0x%" PRIxPTR ") -> 0x%" PRIxPTR,
I(task->sched, data);
DLOG(task->sched, mem, "new ptr_vec(data=0x%" PRIxPTR ") -> 0x%" PRIxPTR,
(uintptr_t)data, (uintptr_t)this);
}
template <typename T>
ptr_vec<T>::~ptr_vec()
{
I(task->dom, data);
DLOG(task->dom, mem, "~ptr_vec 0x%" PRIxPTR ", data=0x%" PRIxPTR,
I(task->sched, data);
DLOG(task->sched, mem, "~ptr_vec 0x%" PRIxPTR ", data=0x%" PRIxPTR,
(uintptr_t)this, (uintptr_t)data);
I(task->dom, fill == 0);
I(task->sched, fill == 0);
task->free(data);
}
template <typename T> T *&
ptr_vec<T>::operator[](size_t offset) {
I(task->dom, data[offset]->idx == offset);
I(task->sched, data[offset]->idx == offset);
return data[offset];
}
@ -50,14 +50,14 @@ template <typename T>
void
ptr_vec<T>::push(T *p)
{
I(task->dom, data);
I(task->dom, fill <= alloc);
I(task->sched, data);
I(task->sched, fill <= alloc);
if (fill == alloc) {
alloc *= 2;
data = (T **)task->realloc(data, alloc * sizeof(T*));
I(task->dom, data);
I(task->sched, data);
}
I(task->dom, fill < alloc);
I(task->sched, fill < alloc);
p->idx = fill;
data[fill++] = p;
}
@ -80,13 +80,13 @@ template <typename T>
void
ptr_vec<T>::trim(size_t sz)
{
I(task->dom, data);
I(task->sched, data);
if (sz <= (alloc / 4) &&
(alloc / 2) >= INIT_SIZE) {
alloc /= 2;
I(task->dom, alloc >= fill);
I(task->sched, alloc >= fill);
data = (T **)task->realloc(data, alloc * sizeof(T*));
I(task->dom, data);
I(task->sched, data);
}
}
@ -95,9 +95,9 @@ void
ptr_vec<T>::swap_delete(T *item)
{
/* Swap the endpoint into i and decr fill. */
I(task->dom, data);
I(task->dom, fill > 0);
I(task->dom, item->idx < fill);
I(task->sched, data);
I(task->sched, fill > 0);
I(task->sched, item->idx < fill);
fill--;
if (fill > 0) {
T *subst = data[fill];
@ -127,22 +127,22 @@ next_power_of_two(size_t s)
// Initialization helper for ISAAC RNG
static inline void
isaac_init(rust_dom *dom, randctx *rctx)
isaac_init(rust_scheduler *sched, randctx *rctx)
{
memset(rctx, 0, sizeof(randctx));
#ifdef __WIN32__
{
HCRYPTPROV hProv;
dom->kernel->win32_require
sched->kernel->win32_require
(_T("CryptAcquireContext"),
CryptAcquireContext(&hProv, NULL, NULL, PROV_RSA_FULL,
CRYPT_VERIFYCONTEXT|CRYPT_SILENT));
dom->kernel->win32_require
sched->kernel->win32_require
(_T("CryptGenRandom"),
CryptGenRandom(hProv, sizeof(rctx->randrsl),
(BYTE*)(&rctx->randrsl)));
dom->kernel->win32_require
sched->kernel->win32_require
(_T("CryptReleaseContext"),
CryptReleaseContext(hProv, 0));
}
@ -156,11 +156,11 @@ isaac_init(rust_dom *dom, randctx *rctx)
}
} else {
int fd = open("/dev/urandom", O_RDONLY);
I(dom, fd > 0);
I(dom,
I(sched, fd > 0);
I(sched,
read(fd, (void*) &rctx->randrsl, sizeof(rctx->randrsl))
== sizeof(rctx->randrsl));
I(dom, close(fd) == 0);
I(sched, close(fd) == 0);
}
#endif
randinit(rctx, 1);
@ -175,9 +175,10 @@ rust_vec : public rc_base<rust_vec>
size_t fill;
size_t pad; // Pad to align data[0] to 16 bytes.
uint8_t data[];
rust_vec(rust_dom *dom, size_t alloc, size_t fill, uint8_t const *d) :
alloc(alloc),
fill(fill)
rust_vec(rust_scheduler *sched, size_t alloc, size_t fill,
uint8_t const *d)
: alloc(alloc),
fill(fill)
{
if (d)
memcpy(&data[0], d, fill);