Avoid lifecycle_lock traffic in call_on_rust_stack. (close #3270)

This commit is contained in:
Ben Blum 2012-08-24 17:26:42 -04:00
parent e55c5ceac2
commit 5ba7434cb1
2 changed files with 13 additions and 10 deletions

View File

@ -248,6 +248,13 @@ MUST_CHECK bool rust_task::yield() {
// This check is largely superfluous; it's the one after the context swap
// that really matters. This one allows us to assert a useful invariant.
// NB: This takes lifecycle_lock three times, and I believe that none of
// them are actually necessary, as per #3213. Removing the locks here may
// cause *harmless* races with a killer... but I didn't observe any
// substantial performance improvement from removing them, even with
// msgsend-ring-pipes, and also it's my last day, so I'm not about to
// remove them. -- bblum
if (must_fail_from_being_killed()) {
{
scoped_lock with(lifecycle_lock);

View File

@ -431,12 +431,11 @@ rust_task::call_on_rust_stack(void *args, void *fn_ptr) {
assert(get_sp_limit() != 0 && "Stack must be configured");
assert(next_rust_sp);
bool had_reentered_rust_stack;
{
scoped_lock with(lifecycle_lock);
had_reentered_rust_stack = reentered_rust_stack;
reentered_rust_stack = true;
}
// Unlocked access. Might "race" a killer, but harmlessly. This code is
// only run by the task itself, so cannot race itself. See the comment
// above inhibit_kill (or #3213) in rust_task.cpp for justification.
bool had_reentered_rust_stack = reentered_rust_stack;
reentered_rust_stack = true;
uintptr_t prev_c_sp = next_c_sp;
next_c_sp = get_sp();
@ -448,10 +447,7 @@ rust_task::call_on_rust_stack(void *args, void *fn_ptr) {
__morestack(args, fn_ptr, sp);
next_c_sp = prev_c_sp;
{
scoped_lock with(lifecycle_lock);
reentered_rust_stack = had_reentered_rust_stack;
}
reentered_rust_stack = had_reentered_rust_stack;
record_sp_limit(0);
}