core: Make some parts of task private

This commit is contained in:
Brian Anderson 2012-10-03 22:06:51 -07:00
parent c2fc7316a9
commit ae42318bef
3 changed files with 36 additions and 36 deletions

View File

@ -78,7 +78,7 @@ pub unsafe fn local_data_modify<T: Owned>(
}
#[test]
pub fn test_tls_multitask() unsafe {
fn test_tls_multitask() unsafe {
fn my_key(_x: @~str) { }
local_data_set(my_key, @~"parent data");
do task::spawn unsafe {
@ -94,7 +94,7 @@ pub fn test_tls_multitask() unsafe {
}
#[test]
pub fn test_tls_overwrite() unsafe {
fn test_tls_overwrite() unsafe {
fn my_key(_x: @~str) { }
local_data_set(my_key, @~"first data");
local_data_set(my_key, @~"next data"); // Shouldn't leak.
@ -102,7 +102,7 @@ pub fn test_tls_overwrite() unsafe {
}
#[test]
pub fn test_tls_pop() unsafe {
fn test_tls_pop() unsafe {
fn my_key(_x: @~str) { }
local_data_set(my_key, @~"weasel");
assert *(local_data_pop(my_key).get()) == ~"weasel";
@ -111,7 +111,7 @@ pub fn test_tls_pop() unsafe {
}
#[test]
pub fn test_tls_modify() unsafe {
fn test_tls_modify() unsafe {
fn my_key(_x: @~str) { }
local_data_modify(my_key, |data| {
match data {
@ -130,7 +130,7 @@ pub fn test_tls_modify() unsafe {
}
#[test]
pub fn test_tls_crust_automorestack_memorial_bug() unsafe {
fn test_tls_crust_automorestack_memorial_bug() unsafe {
// This might result in a stack-canary clobber if the runtime fails to set
// sp_limit to 0 when calling the cleanup extern - it might automatically
// jump over to the rust stack, which causes next_c_sp to get recorded as
@ -143,7 +143,7 @@ pub fn test_tls_crust_automorestack_memorial_bug() unsafe {
}
#[test]
pub fn test_tls_multiple_types() unsafe {
fn test_tls_multiple_types() unsafe {
fn str_key(_x: @~str) { }
fn box_key(_x: @@()) { }
fn int_key(_x: @int) { }
@ -155,7 +155,7 @@ pub fn test_tls_multiple_types() unsafe {
}
#[test]
pub fn test_tls_overwrite_multiple_types() {
fn test_tls_overwrite_multiple_types() {
fn str_key(_x: @~str) { }
fn box_key(_x: @@()) { }
fn int_key(_x: @int) { }
@ -171,7 +171,7 @@ pub fn test_tls_overwrite_multiple_types() {
#[test]
#[should_fail]
#[ignore(cfg(windows))]
pub fn test_tls_cleanup_on_failure() unsafe {
fn test_tls_cleanup_on_failure() unsafe {
fn str_key(_x: @~str) { }
fn box_key(_x: @@()) { }
fn int_key(_x: @int) { }

View File

@ -17,11 +17,11 @@ impl LocalData: Eq {
// We use dvec because it's the best data structure in core. If TLS is used
// heavily in future, this could be made more efficient with a proper map.
pub type TaskLocalElement = (*libc::c_void, *libc::c_void, LocalData);
type TaskLocalElement = (*libc::c_void, *libc::c_void, LocalData);
// Has to be a pointer at outermost layer; the foreign call returns void *.
pub type TaskLocalMap = @dvec::DVec<Option<TaskLocalElement>>;
type TaskLocalMap = @dvec::DVec<Option<TaskLocalElement>>;
pub extern fn cleanup_task_local_map(map_ptr: *libc::c_void) unsafe {
extern fn cleanup_task_local_map(map_ptr: *libc::c_void) unsafe {
assert !map_ptr.is_null();
// Get and keep the single reference that was created at the beginning.
let _map: TaskLocalMap = cast::reinterpret_cast(&map_ptr);
@ -29,7 +29,7 @@ pub extern fn cleanup_task_local_map(map_ptr: *libc::c_void) unsafe {
}
// Gets the map from the runtime. Lazily initialises if not done so already.
pub unsafe fn get_task_local_map(task: *rust_task) -> TaskLocalMap {
unsafe fn get_task_local_map(task: *rust_task) -> TaskLocalMap {
// Relies on the runtime initialising the pointer to null.
// NOTE: The map's box lives in TLS invisibly referenced once. Each time
@ -52,7 +52,7 @@ pub unsafe fn get_task_local_map(task: *rust_task) -> TaskLocalMap {
}
}
pub unsafe fn key_to_key_value<T: Owned>(
unsafe fn key_to_key_value<T: Owned>(
key: LocalDataKey<T>) -> *libc::c_void {
// Keys are closures, which are (fnptr,envptr) pairs. Use fnptr.
@ -62,7 +62,7 @@ pub unsafe fn key_to_key_value<T: Owned>(
}
// If returning Some(..), returns with @T with the map's reference. Careful!
pub unsafe fn local_data_lookup<T: Owned>(
unsafe fn local_data_lookup<T: Owned>(
map: TaskLocalMap, key: LocalDataKey<T>)
-> Option<(uint, *libc::c_void)> {
@ -80,7 +80,7 @@ pub unsafe fn local_data_lookup<T: Owned>(
}
}
pub unsafe fn local_get_helper<T: Owned>(
unsafe fn local_get_helper<T: Owned>(
task: *rust_task, key: LocalDataKey<T>,
do_pop: bool) -> Option<@T> {

View File

@ -69,16 +69,16 @@ macro_rules! move_it (
{ $x:expr } => { unsafe { let y <- *ptr::addr_of(&($x)); move y } }
)
pub type TaskSet = send_map::linear::LinearMap<*rust_task,()>;
type TaskSet = send_map::linear::LinearMap<*rust_task,()>;
pub fn new_taskset() -> TaskSet {
fn new_taskset() -> TaskSet {
send_map::linear::LinearMap()
}
pub fn taskset_insert(tasks: &mut TaskSet, task: *rust_task) {
fn taskset_insert(tasks: &mut TaskSet, task: *rust_task) {
let didnt_overwrite = tasks.insert(task, ());
assert didnt_overwrite;
}
pub fn taskset_remove(tasks: &mut TaskSet, task: *rust_task) {
fn taskset_remove(tasks: &mut TaskSet, task: *rust_task) {
let was_present = tasks.remove(&task);
assert was_present;
}
@ -87,7 +87,7 @@ pub fn taskset_each(tasks: &TaskSet, blk: fn(v: *rust_task) -> bool) {
}
// One of these per group of linked-failure tasks.
pub type TaskGroupData = {
type TaskGroupData = {
// All tasks which might kill this group. When this is empty, the group
// can be "GC"ed (i.e., its link in the ancestor list can be removed).
mut members: TaskSet,
@ -95,12 +95,12 @@ pub type TaskGroupData = {
// tasks in this group.
mut descendants: TaskSet,
};
pub type TaskGroupArc = private::Exclusive<Option<TaskGroupData>>;
type TaskGroupArc = private::Exclusive<Option<TaskGroupData>>;
pub type TaskGroupInner = &mut Option<TaskGroupData>;
type TaskGroupInner = &mut Option<TaskGroupData>;
// A taskgroup is 'dead' when nothing can cause it to fail; only members can.
pub pure fn taskgroup_is_dead(tg: &TaskGroupData) -> bool {
pure fn taskgroup_is_dead(tg: &TaskGroupData) -> bool {
(&tg.members).is_empty()
}
@ -111,7 +111,7 @@ pub pure fn taskgroup_is_dead(tg: &TaskGroupData) -> bool {
// taskgroup which was spawned-unlinked. Tasks from intermediate generations
// have references to the middle of the list; when intermediate generations
// die, their node in the list will be collected at a descendant's spawn-time.
pub type AncestorNode = {
type AncestorNode = {
// Since the ancestor list is recursive, we end up with references to
// exclusives within other exclusives. This is dangerous business (if
// circular references arise, deadlock and memory leaks are imminent).
@ -124,16 +124,16 @@ pub type AncestorNode = {
// Recursive rest of the list.
mut ancestors: AncestorList,
};
pub enum AncestorList = Option<private::Exclusive<AncestorNode>>;
enum AncestorList = Option<private::Exclusive<AncestorNode>>;
// Accessors for taskgroup arcs and ancestor arcs that wrap the unsafety.
#[inline(always)]
pub fn access_group<U>(x: &TaskGroupArc, blk: fn(TaskGroupInner) -> U) -> U {
fn access_group<U>(x: &TaskGroupArc, blk: fn(TaskGroupInner) -> U) -> U {
unsafe { x.with(blk) }
}
#[inline(always)]
pub fn access_ancestors<U>(x: &private::Exclusive<AncestorNode>,
fn access_ancestors<U>(x: &private::Exclusive<AncestorNode>,
blk: fn(x: &mut AncestorNode) -> U) -> U {
unsafe { x.with(blk) }
}
@ -146,7 +146,7 @@ pub fn access_ancestors<U>(x: &private::Exclusive<AncestorNode>,
// (3) As a bonus, coalesces away all 'dead' taskgroup nodes in the list.
// FIXME(#2190): Change Option<fn@(...)> to Option<fn&(...)>, to save on
// allocations. Once that bug is fixed, changing the sigil should suffice.
pub fn each_ancestor(list: &mut AncestorList,
fn each_ancestor(list: &mut AncestorList,
bail_opt: Option<fn@(TaskGroupInner)>,
forward_blk: fn(TaskGroupInner) -> bool)
-> bool {
@ -271,7 +271,7 @@ pub fn each_ancestor(list: &mut AncestorList,
}
// One of these per task.
pub struct TCB {
struct TCB {
me: *rust_task,
// List of tasks with whose fates this one's is intertwined.
tasks: TaskGroupArc, // 'none' means the group has failed.
@ -303,7 +303,7 @@ pub struct TCB {
}
}
pub fn TCB(me: *rust_task, tasks: TaskGroupArc, ancestors: AncestorList,
fn TCB(me: *rust_task, tasks: TaskGroupArc, ancestors: AncestorList,
is_main: bool, notifier: Option<AutoNotify>) -> TCB {
let notifier = move notifier;
@ -318,7 +318,7 @@ pub fn TCB(me: *rust_task, tasks: TaskGroupArc, ancestors: AncestorList,
}
}
pub struct AutoNotify {
struct AutoNotify {
notify_chan: Chan<Notification>,
mut failed: bool,
drop {
@ -327,14 +327,14 @@ pub struct AutoNotify {
}
}
pub fn AutoNotify(chan: Chan<Notification>) -> AutoNotify {
fn AutoNotify(chan: Chan<Notification>) -> AutoNotify {
AutoNotify {
notify_chan: chan,
failed: true // Un-set above when taskgroup successfully made.
}
}
pub fn enlist_in_taskgroup(state: TaskGroupInner, me: *rust_task,
fn enlist_in_taskgroup(state: TaskGroupInner, me: *rust_task,
is_member: bool) -> bool {
let newstate = util::replace(state, None);
// If 'None', the group was failing. Can't enlist.
@ -350,7 +350,7 @@ pub fn enlist_in_taskgroup(state: TaskGroupInner, me: *rust_task,
}
// NB: Runs in destructor/post-exit context. Can't 'fail'.
pub fn leave_taskgroup(state: TaskGroupInner, me: *rust_task,
fn leave_taskgroup(state: TaskGroupInner, me: *rust_task,
is_member: bool) {
let newstate = util::replace(state, None);
// If 'None', already failing and we've already gotten a kill signal.
@ -363,7 +363,7 @@ pub fn leave_taskgroup(state: TaskGroupInner, me: *rust_task,
}
// NB: Runs in destructor/post-exit context. Can't 'fail'.
pub fn kill_taskgroup(state: TaskGroupInner, me: *rust_task, is_main: bool) {
fn kill_taskgroup(state: TaskGroupInner, me: *rust_task, is_main: bool) {
// NB: We could do the killing iteration outside of the group arc, by
// having "let mut newstate" here, swapping inside, and iterating after.
// But that would let other exiting tasks fall-through and exit while we
@ -405,7 +405,7 @@ macro_rules! taskgroup_key (
() => (cast::transmute((-2 as uint, 0u)))
)
pub fn gen_child_taskgroup(linked: bool, supervised: bool)
fn gen_child_taskgroup(linked: bool, supervised: bool)
-> (TaskGroupArc, AncestorList, bool) {
let spawner = rt::rust_get_task();
/*######################################################################*