From 5d2442d89cbfbba03545a877b7768fe2ac34ef82 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Thu, 9 Feb 2012 11:51:34 -0800 Subject: [PATCH] rt: Add upcall_call_shim_on_rust_stack --- src/rt/rust_task.cpp | 40 ++++++++++++++++++++++++++++++++++++++-- src/rt/rust_task.h | 2 ++ src/rt/rust_upcall.cpp | 27 ++++++++++++++++++++++++++- src/rt/rustrt.def.in | 1 + 4 files changed, 67 insertions(+), 3 deletions(-) diff --git a/src/rt/rust_task.cpp b/src/rt/rust_task.cpp index a025800e4ae..6d61065d551 100644 --- a/src/rt/rust_task.cpp +++ b/src/rt/rust_task.cpp @@ -90,7 +90,8 @@ rust_task::rust_task(rust_task_thread *thread, rust_task_list *state, cc_counter(0), total_stack_sz(0), c_stack(NULL), - next_c_sp(0) + next_c_sp(0), + next_rust_sp(0) { LOGPTR(thread, "new task", (uintptr_t)this); DLOG(thread, task, "sizeof(task) = %d (0x%x)", sizeof *this, sizeof *this); @@ -658,6 +659,7 @@ rust_task::prev_stack() { void rust_task::record_stack_limit() { + I(thread, stk); // The function prolog compares the amount of stack needed to the end of // the stack. As an optimization, when the frame size is less than 256 // bytes, it will simply compare %esp to to the stack limit instead of @@ -732,18 +734,40 @@ rust_task::config_notify(chan_handle chan) { extern "C" void __morestack(void *args, void *fn_ptr, uintptr_t stack_ptr); +static uintptr_t +sanitize_next_sp(uintptr_t next_sp) { + + // Since I'm not precisely sure where the next stack pointer sits in + // relation to where the context switch actually happened, nor in relation + // to the amount of stack needed for calling __morestack I've added some + // extra bytes here. + + // FIXME: On the rust stack this potentially puts is quite far into the + // red zone. Might want to just allocate a new rust stack every time we + // switch back to rust. + const uintptr_t padding = 16; + + return align_down(next_sp - padding); +} + void rust_task::call_on_c_stack(void *args, void *fn_ptr) { I(thread, on_rust_stack()); + next_rust_sp = get_sp(); + bool borrowed_a_c_stack = false; + uintptr_t sp; if (c_stack == NULL) { c_stack = thread->borrow_c_stack(); next_c_sp = align_down(c_stack->end); + sp = next_c_sp; borrowed_a_c_stack = true; + } else { + sp = sanitize_next_sp(next_c_sp); } - __morestack(args, fn_ptr, next_c_sp); + __morestack(args, fn_ptr, sp); // Note that we may not actually get here if we threw an exception, // in which case we will return the c stack when the exception is caught. @@ -752,6 +776,18 @@ rust_task::call_on_c_stack(void *args, void *fn_ptr) { } } +void +rust_task::call_on_rust_stack(void *args, void *fn_ptr) { + I(thread, !on_rust_stack()); + I(thread, next_rust_sp); + + next_c_sp = get_sp(); + + uintptr_t sp = sanitize_next_sp(next_rust_sp); + + __morestack(args, fn_ptr, sp); +} + void rust_task::return_c_stack() { I(thread, on_rust_stack()); diff --git a/src/rt/rust_task.h b/src/rt/rust_task.h index 5ac2d7a212e..0a6704b64cc 100644 --- a/src/rt/rust_task.h +++ b/src/rt/rust_task.h @@ -110,6 +110,7 @@ private: // The stack used for running C code, borrowed from the scheduler thread stk_seg *c_stack; uintptr_t next_c_sp; + uintptr_t next_rust_sp; // Called when the atomic refcount reaches zero void delete_this(); @@ -194,6 +195,7 @@ public: void config_notify(chan_handle chan); void call_on_c_stack(void *args, void *fn_ptr); + void call_on_rust_stack(void *args, void *fn_ptr); }; // diff --git a/src/rt/rust_upcall.cpp b/src/rt/rust_upcall.cpp index d12e7311303..8751a4b13e5 100644 --- a/src/rt/rust_upcall.cpp +++ b/src/rt/rust_upcall.cpp @@ -71,13 +71,38 @@ upcall_call_shim_on_c_stack(void *args, void *fn_ptr) { try { task->call_on_c_stack(args, fn_ptr); } catch (...) { - A(task->thread, false, "Native code threw an exception"); + LOG_ERR(task, task, "Native code threw an exception"); + abort(); } task = rust_task_thread::get_task(); task->record_stack_limit(); } +/* + * The opposite of above. Starts on a C stack and switches to the Rust + * stack. This is the only upcall that runs from the C stack. + */ +extern "C" CDECL void +upcall_call_shim_on_rust_stack(void *args, void *fn_ptr) { + rust_task *task = rust_task_thread::get_task(); + + // FIXME: Because of the hack in the other function that disables the + // stack limit when entering the C stack, here we restore the stack limit + // again. + task->record_stack_limit(); + + try { + task->call_on_rust_stack(args, fn_ptr); + } catch (...) { + // We can't count on being able to unwind through arbitrary + // code. Our best option is to just fail hard. + LOG_ERR(task, task, + "Rust task failed after reentering the Rust stack"); + abort(); + } +} + /**********************************************************************/ struct s_fail_args { diff --git a/src/rt/rustrt.def.in b/src/rt/rustrt.def.in index 0a1a6fc913b..f1e543ed6d4 100644 --- a/src/rt/rustrt.def.in +++ b/src/rt/rustrt.def.in @@ -75,6 +75,7 @@ upcall_shared_free upcall_vec_grow upcall_vec_push upcall_call_shim_on_c_stack +upcall_call_shim_on_rust_stack upcall_new_stack upcall_del_stack upcall_reset_stack_limit