Relax some atomic barriers. Loosen up all that tension. There, doesn't that feel good?

This commit is contained in:
Ben Blum 2013-07-31 15:56:18 -04:00
parent 479809a267
commit 880246618b
3 changed files with 23 additions and 22 deletions

View File

@ -18,7 +18,7 @@ use kinds::Send;
use rt::sched::Scheduler;
use rt::local::Local;
use rt::select::{Select, SelectPort};
use unstable::atomics::{AtomicUint, AtomicOption, Acquire, Release, SeqCst};
use unstable::atomics::{AtomicUint, AtomicOption, Acquire, Relaxed, SeqCst};
use unstable::sync::UnsafeAtomicRcBox;
use util::Void;
use comm::{GenericChan, GenericSmartChan, GenericPort, Peekable};
@ -216,15 +216,15 @@ impl<T> Select for PortOne<T> {
}
STATE_ONE => {
// Re-record that we are the only owner of the packet.
// Release barrier needed in case the task gets reawoken
// on a different core (this is analogous to writing a
// payload; a barrier in enqueueing the task protects it).
// No barrier needed, even if the task gets reawoken
// on a different core -- this is analogous to writing a
// payload; a barrier in enqueueing the task protects it.
// NB(#8132). This *must* occur before the enqueue below.
// FIXME(#6842, #8130) This is usually only needed for the
// assertion in recv_ready, except in the case of select().
// This won't actually ever have cacheline contention, but
// maybe should be optimized out with a cfg(test) anyway?
(*self.packet()).state.store(STATE_ONE, Release);
(*self.packet()).state.store(STATE_ONE, Relaxed);
rtdebug!("rendezvous recv");
sched.metrics.rendezvous_recvs += 1;
@ -299,7 +299,7 @@ impl<T> SelectPort<T> for PortOne<T> {
unsafe {
// See corresponding store() above in block_on for rationale.
// FIXME(#8130) This can happen only in test builds.
assert!((*packet).state.load(Acquire) == STATE_ONE);
assert!((*packet).state.load(Relaxed) == STATE_ONE);
let payload = (*packet).payload.take();

View File

@ -17,7 +17,7 @@ use option::{Option, Some, None};
use prelude::*;
use rt::task::Task;
use to_bytes::IterBytes;
use unstable::atomics::{AtomicUint, Acquire, SeqCst};
use unstable::atomics::{AtomicUint, Relaxed};
use unstable::sync::{UnsafeAtomicRcBox, LittleLock};
use util;
@ -95,7 +95,7 @@ impl Drop for KillFlag {
// Letting a KillFlag with a task inside get dropped would leak the task.
// We could free it here, but the task should get awoken by hand somehow.
fn drop(&self) {
match self.load(Acquire) {
match self.load(Relaxed) {
KILL_RUNNING | KILL_KILLED => { },
_ => rtabort!("can't drop kill flag with a blocked task inside!"),
}
@ -124,7 +124,7 @@ impl BlockedTask {
Unkillable(task) => Some(task),
Killable(flag_arc) => {
let flag = unsafe { &mut **flag_arc.get() };
match flag.swap(KILL_RUNNING, SeqCst) {
match flag.swap(KILL_RUNNING, Relaxed) {
KILL_RUNNING => None, // woken from select(), perhaps
KILL_KILLED => None, // a killer stole it already
task_ptr =>
@ -159,7 +159,7 @@ impl BlockedTask {
let flag = &mut **flag_arc.get();
let task_ptr = cast::transmute(task);
// Expect flag to contain RUNNING. If KILLED, it should stay KILLED.
match flag.compare_and_swap(KILL_RUNNING, task_ptr, SeqCst) {
match flag.compare_and_swap(KILL_RUNNING, task_ptr, Relaxed) {
KILL_RUNNING => Right(Killable(flag_arc)),
KILL_KILLED => Left(revive_task_ptr(task_ptr, Some(flag_arc))),
x => rtabort!("can't block task! kill flag = %?", x),
@ -257,7 +257,7 @@ impl KillHandle {
let inner = unsafe { &mut *self.get() };
// Expect flag to contain RUNNING. If KILLED, it should stay KILLED.
// FIXME(#7544)(bblum): is it really necessary to prohibit double kill?
match inner.unkillable.compare_and_swap(KILL_RUNNING, KILL_UNKILLABLE, SeqCst) {
match inner.unkillable.compare_and_swap(KILL_RUNNING, KILL_UNKILLABLE, Relaxed) {
KILL_RUNNING => { }, // normal case
KILL_KILLED => if !already_failing { fail!(KILLED_MSG) },
_ => rtabort!("inhibit_kill: task already unkillable"),
@ -270,7 +270,7 @@ impl KillHandle {
let inner = unsafe { &mut *self.get() };
// Expect flag to contain UNKILLABLE. If KILLED, it should stay KILLED.
// FIXME(#7544)(bblum): is it really necessary to prohibit double kill?
match inner.unkillable.compare_and_swap(KILL_UNKILLABLE, KILL_RUNNING, SeqCst) {
match inner.unkillable.compare_and_swap(KILL_UNKILLABLE, KILL_RUNNING, Relaxed) {
KILL_UNKILLABLE => { }, // normal case
KILL_KILLED => if !already_failing { fail!(KILLED_MSG) },
_ => rtabort!("allow_kill: task already killable"),
@ -281,10 +281,10 @@ impl KillHandle {
// if it was blocked and needs punted awake. To be called by other tasks.
pub fn kill(&mut self) -> Option<~Task> {
let inner = unsafe { &mut *self.get() };
if inner.unkillable.swap(KILL_KILLED, SeqCst) == KILL_RUNNING {
if inner.unkillable.swap(KILL_KILLED, Relaxed) == KILL_RUNNING {
// Got in. Allowed to try to punt the task awake.
let flag = unsafe { &mut *inner.killed.get() };
match flag.swap(KILL_KILLED, SeqCst) {
match flag.swap(KILL_KILLED, Relaxed) {
// Task either not blocked or already taken care of.
KILL_RUNNING | KILL_KILLED => None,
// Got ownership of the blocked task.
@ -306,8 +306,11 @@ impl KillHandle {
// is unkillable with a kill signal pending.
let inner = unsafe { &*self.get() };
let flag = unsafe { &*inner.killed.get() };
// FIXME(#6598): can use relaxed ordering (i think)
flag.load(Acquire) == KILL_KILLED
// A barrier-related concern here is that a task that gets killed
// awake needs to see the killer's write of KILLED to this flag. This
// is analogous to receiving a pipe payload; the appropriate barrier
// should happen when enqueueing the task.
flag.load(Relaxed) == KILL_KILLED
}
pub fn notify_immediate_failure(&mut self) {

View File

@ -16,7 +16,7 @@ use ptr;
use option::*;
use either::{Either, Left, Right};
use task;
use unstable::atomics::{AtomicOption,AtomicUint,Acquire,Release,SeqCst};
use unstable::atomics::{AtomicOption,AtomicUint,Acquire,Release,Relaxed,SeqCst};
use unstable::finally::Finally;
use ops::Drop;
use clone::Clone;
@ -95,8 +95,7 @@ impl<T: Send> UnsafeAtomicRcBox<T> {
pub fn get(&self) -> *mut T {
unsafe {
let mut data: ~AtomicRcBoxData<T> = cast::transmute(self.data);
// FIXME(#6598) Change Acquire to Relaxed.
assert!(data.count.load(Acquire) > 0);
assert!(data.count.load(Relaxed) > 0);
let r: *mut T = data.data.get_mut_ref();
cast::forget(data);
return r;
@ -107,7 +106,7 @@ impl<T: Send> UnsafeAtomicRcBox<T> {
pub fn get_immut(&self) -> *T {
unsafe {
let data: ~AtomicRcBoxData<T> = cast::transmute(self.data);
assert!(data.count.load(Acquire) > 0); // no barrier is really needed
assert!(data.count.load(Relaxed) > 0);
let r: *T = data.data.get_ref();
cast::forget(data);
return r;
@ -130,8 +129,7 @@ impl<T: Send> UnsafeAtomicRcBox<T> {
// Try to put our server end in the unwrapper slot.
// This needs no barrier -- it's protected by the release barrier on
// the xadd, and the acquire+release barrier in the destructor's xadd.
// FIXME(#6598) Change Acquire to Relaxed.
if data.unwrapper.fill(~(c1,p2), Acquire).is_none() {
if data.unwrapper.fill(~(c1,p2), Relaxed).is_none() {
// Got in. Tell this handle's destructor not to run (we are now it).
this.data = ptr::mut_null();
// Drop our own reference.