diff --git a/src/libcore/comm.rs b/src/libcore/comm.rs index b1f60ec469084..322584f8df174 100644 --- a/src/libcore/comm.rs +++ b/src/libcore/comm.rs @@ -19,8 +19,8 @@ use option::{Option, Some, None}; use uint; use unstable; use vec; -use unstable::Exclusive; use util::replace; +use unstable::sync::{Exclusive, exclusive}; use pipes::{recv, try_recv, wait_many, peek, PacketHeader}; @@ -304,7 +304,7 @@ pub struct SharedChan { impl SharedChan { /// Converts a `chan` into a `shared_chan`. pub fn new(c: Chan) -> SharedChan { - SharedChan { ch: unstable::exclusive(c) } + SharedChan { ch: exclusive(c) } } } diff --git a/src/libcore/core.rc b/src/libcore/core.rc index d029fbc07f6fb..0b4c0cf806931 100644 --- a/src/libcore/core.rc +++ b/src/libcore/core.rc @@ -238,6 +238,7 @@ pub mod util; /* Unsupported interfaces */ // Private APIs +#[path = "unstable/mod.rs"] pub mod unstable; /* For internal use, not exported */ diff --git a/src/libcore/os.rs b/src/libcore/os.rs index 030b7ec3e42ff..1a8d996cc46fb 100644 --- a/src/libcore/os.rs +++ b/src/libcore/os.rs @@ -152,7 +152,7 @@ FIXME #4726: It would probably be appropriate to make this a real global */ fn with_env_lock(f: &fn() -> T) -> T { use unstable::global::global_data_clone_create; - use unstable::{Exclusive, exclusive}; + use unstable::sync::{Exclusive, exclusive}; struct SharedValue(()); type ValueMutex = Exclusive; @@ -860,7 +860,7 @@ pub fn change_dir(p: &Path) -> bool { /// is otherwise unsuccessful. pub fn change_dir_locked(p: &Path, action: &fn()) -> bool { use unstable::global::global_data_clone_create; - use unstable::{Exclusive, exclusive}; + use unstable::sync::{Exclusive, exclusive}; fn key(_: Exclusive<()>) { } diff --git a/src/libcore/task/spawn.rs b/src/libcore/task/spawn.rs index 327b7a988aaa0..545f1ac8adaa8 100644 --- a/src/libcore/task/spawn.rs +++ b/src/libcore/task/spawn.rs @@ -90,6 +90,7 @@ use task::{ExistingScheduler, SchedulerHandle}; use task::unkillable; use uint; use util; +use unstable::sync::{Exclusive, exclusive}; #[cfg(test)] use task::default_task_opts; @@ -128,7 +129,7 @@ struct TaskGroupData { // tasks in this group. descendants: TaskSet, } -type TaskGroupArc = unstable::Exclusive>; +type TaskGroupArc = Exclusive>; type TaskGroupInner<'self> = &'self mut Option; @@ -158,7 +159,7 @@ struct AncestorNode { ancestors: AncestorList, } -struct AncestorList(Option>); +struct AncestorList(Option>); // Accessors for taskgroup arcs and ancestor arcs that wrap the unsafety. #[inline(always)] @@ -167,7 +168,7 @@ fn access_group(x: &TaskGroupArc, blk: &fn(TaskGroupInner) -> U) -> U { } #[inline(always)] -fn access_ancestors(x: &unstable::Exclusive, +fn access_ancestors(x: &Exclusive, blk: &fn(x: &mut AncestorNode) -> U) -> U { x.with(blk) } @@ -479,7 +480,7 @@ fn gen_child_taskgroup(linked: bool, supervised: bool) // here. let mut members = new_taskset(); taskset_insert(&mut members, spawner); - let tasks = unstable::exclusive(Some(TaskGroupData { + let tasks = exclusive(Some(TaskGroupData { members: members, descendants: new_taskset(), })); @@ -508,7 +509,7 @@ fn gen_child_taskgroup(linked: bool, supervised: bool) (g, a, spawner_group.is_main) } else { // Child is in a separate group from spawner. - let g = unstable::exclusive(Some(TaskGroupData { + let g = exclusive(Some(TaskGroupData { members: new_taskset(), descendants: new_taskset(), })); @@ -528,7 +529,7 @@ fn gen_child_taskgroup(linked: bool, supervised: bool) }; assert!(new_generation < uint::max_value); // Build a new node in the ancestor list. - AncestorList(Some(unstable::exclusive(AncestorNode { + AncestorList(Some(exclusive(AncestorNode { generation: new_generation, parent_group: Some(spawner_group.tasks.clone()), ancestors: old_ancestors, diff --git a/src/libcore/unstable.rs b/src/libcore/unstable.rs deleted file mode 100644 index 25e4d07b01da1..0000000000000 --- a/src/libcore/unstable.rs +++ /dev/null @@ -1,354 +0,0 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#[doc(hidden)]; - -use cast; -use libc; -use comm::{GenericChan, GenericPort}; -use prelude::*; -use task; -use task::atomically; -use self::finally::Finally; - -#[path = "unstable/at_exit.rs"] -pub mod at_exit; -#[path = "unstable/global.rs"] -pub mod global; -#[path = "unstable/finally.rs"] -pub mod finally; -#[path = "unstable/weak_task.rs"] -pub mod weak_task; -#[path = "unstable/exchange_alloc.rs"] -pub mod exchange_alloc; -#[path = "unstable/intrinsics.rs"] -pub mod intrinsics; -#[path = "unstable/simd.rs"] -pub mod simd; -#[path = "unstable/extfmt.rs"] -pub mod extfmt; -#[path = "unstable/lang.rs"] -#[cfg(not(test))] -pub mod lang; - -mod rustrt { - use unstable::{raw_thread, rust_little_lock}; - - pub extern { - pub unsafe fn rust_create_little_lock() -> rust_little_lock; - pub unsafe fn rust_destroy_little_lock(lock: rust_little_lock); - pub unsafe fn rust_lock_little_lock(lock: rust_little_lock); - pub unsafe fn rust_unlock_little_lock(lock: rust_little_lock); - - pub unsafe fn rust_raw_thread_start(f: &(&fn())) -> *raw_thread; - pub unsafe fn rust_raw_thread_join_delete(thread: *raw_thread); - } -} - -#[allow(non_camel_case_types)] // runtime type -pub type raw_thread = libc::c_void; - -/** - -Start a new thread outside of the current runtime context and wait -for it to terminate. - -The executing thread has no access to a task pointer and will be using -a normal large stack. -*/ -pub fn run_in_bare_thread(f: ~fn()) { - let (port, chan) = comm::stream(); - // FIXME #4525: Unfortunate that this creates an extra scheduler but it's - // necessary since rust_raw_thread_join_delete is blocking - do task::spawn_sched(task::SingleThreaded) { - unsafe { - let closure: &fn() = || { - f() - }; - let thread = rustrt::rust_raw_thread_start(&closure); - rustrt::rust_raw_thread_join_delete(thread); - chan.send(()); - } - } - port.recv(); -} - -#[test] -fn test_run_in_bare_thread() { - let i = 100; - do run_in_bare_thread { - assert!(i == 100); - } -} - -#[test] -fn test_run_in_bare_thread_exchange() { - // Does the exchange heap work without the runtime? - let i = ~100; - do run_in_bare_thread { - assert!(i == ~100); - } -} - -fn compare_and_swap(address: &mut int, oldval: int, newval: int) -> bool { - unsafe { - let old = intrinsics::atomic_cxchg(address, oldval, newval); - old == oldval - } -} - -/**************************************************************************** - * Shared state & exclusive ARC - ****************************************************************************/ - -struct ArcData { - count: libc::intptr_t, - // FIXME(#3224) should be able to make this non-option to save memory - data: Option, -} - -struct ArcDestruct { - data: *libc::c_void, -} - -#[unsafe_destructor] -impl Drop for ArcDestruct{ - fn finalize(&self) { - unsafe { - do task::unkillable { - let mut data: ~ArcData = cast::transmute(self.data); - let new_count = - intrinsics::atomic_xsub(&mut data.count, 1) - 1; - assert!(new_count >= 0); - if new_count == 0 { - // drop glue takes over. - } else { - cast::forget(data); - } - } - } - } -} - -fn ArcDestruct(data: *libc::c_void) -> ArcDestruct { - ArcDestruct { - data: data - } -} - -/** - * COMPLETELY UNSAFE. Used as a primitive for the safe versions in std::arc. - * - * Data races between tasks can result in crashes and, with sufficient - * cleverness, arbitrary type coercion. - */ -pub type SharedMutableState = ArcDestruct; - -pub unsafe fn shared_mutable_state(data: T) -> - SharedMutableState { - let data = ~ArcData { count: 1, data: Some(data) }; - let ptr = cast::transmute(data); - ArcDestruct(ptr) -} - -#[inline(always)] -pub unsafe fn get_shared_mutable_state( - rc: *SharedMutableState) -> *mut T -{ - let ptr: ~ArcData = cast::transmute((*rc).data); - assert!(ptr.count > 0); - let r = cast::transmute(ptr.data.get_ref()); - cast::forget(ptr); - return r; -} -#[inline(always)] -pub unsafe fn get_shared_immutable_state<'a,T:Owned>( - rc: &'a SharedMutableState) -> &'a T { - let ptr: ~ArcData = cast::transmute((*rc).data); - assert!(ptr.count > 0); - // Cast us back into the correct region - let r = cast::transmute_region(ptr.data.get_ref()); - cast::forget(ptr); - return r; -} - -pub unsafe fn clone_shared_mutable_state(rc: &SharedMutableState) - -> SharedMutableState { - let mut ptr: ~ArcData = cast::transmute((*rc).data); - let new_count = intrinsics::atomic_xadd(&mut ptr.count, 1) + 1; - assert!(new_count >= 2); - cast::forget(ptr); - ArcDestruct((*rc).data) -} - -impl Clone for SharedMutableState { - fn clone(&self) -> SharedMutableState { - unsafe { - clone_shared_mutable_state(self) - } - } -} - -/****************************************************************************/ - -#[allow(non_camel_case_types)] // runtime type -pub type rust_little_lock = *libc::c_void; - -struct LittleLock { - l: rust_little_lock, -} - -impl Drop for LittleLock { - fn finalize(&self) { - unsafe { - rustrt::rust_destroy_little_lock(self.l); - } - } -} - -fn LittleLock() -> LittleLock { - unsafe { - LittleLock { - l: rustrt::rust_create_little_lock() - } - } -} - -pub impl LittleLock { - #[inline(always)] - unsafe fn lock(&self, f: &fn() -> T) -> T { - do atomically { - rustrt::rust_lock_little_lock(self.l); - do (|| { - f() - }).finally { - rustrt::rust_unlock_little_lock(self.l); - } - } - } -} - -struct ExData { - lock: LittleLock, - failed: bool, - data: T, -} - -/** - * An arc over mutable data that is protected by a lock. For library use only. - */ -pub struct Exclusive { - x: SharedMutableState> -} - -pub fn exclusive(user_data: T) -> Exclusive { - let data = ExData { - lock: LittleLock(), - failed: false, - data: user_data - }; - Exclusive { - x: unsafe { - shared_mutable_state(data) - } - } -} - -impl Clone for Exclusive { - // Duplicate an exclusive ARC, as std::arc::clone. - fn clone(&self) -> Exclusive { - Exclusive { x: unsafe { clone_shared_mutable_state(&self.x) } } - } -} - -pub impl Exclusive { - // Exactly like std::arc::mutex_arc,access(), but with the little_lock - // instead of a proper mutex. Same reason for being unsafe. - // - // Currently, scheduling operations (i.e., yielding, receiving on a pipe, - // accessing the provided condition variable) are prohibited while inside - // the exclusive. Supporting that is a work in progress. - #[inline(always)] - unsafe fn with(&self, f: &fn(x: &mut T) -> U) -> U { - let rec = get_shared_mutable_state(&self.x); - do (*rec).lock.lock { - if (*rec).failed { - fail!( - ~"Poisoned exclusive - another task failed inside!"); - } - (*rec).failed = true; - let result = f(&mut (*rec).data); - (*rec).failed = false; - result - } - } - - #[inline(always)] - unsafe fn with_imm(&self, f: &fn(x: &T) -> U) -> U { - do self.with |x| { - f(cast::transmute_immut(x)) - } - } -} - -#[cfg(test)] -mod tests { - use comm; - use super::exclusive; - use task; - use uint; - - #[test] - fn exclusive_arc() { - let mut futures = ~[]; - - let num_tasks = 10; - let count = 10; - - let total = exclusive(~0); - - for uint::range(0, num_tasks) |_i| { - let total = total.clone(); - let (port, chan) = comm::stream(); - futures.push(port); - - do task::spawn || { - for uint::range(0, count) |_i| { - do total.with |count| { - **count += 1; - } - } - chan.send(()); - } - }; - - for futures.each |f| { f.recv() } - - do total.with |total| { - assert!(**total == num_tasks * count) - }; - } - - #[test] #[should_fail] #[ignore(cfg(windows))] - fn exclusive_poison() { - // Tests that if one task fails inside of an exclusive, subsequent - // accesses will also fail. - let x = exclusive(1); - let x2 = x.clone(); - do task::try || { - do x2.with |one| { - assert!(*one == 2); - } - }; - do x.with |one| { - assert!(*one == 1); - } - } -} diff --git a/src/libcore/unstable/global.rs b/src/libcore/unstable/global.rs index eac686e28d1c6..2d6698fb96ade 100644 --- a/src/libcore/unstable/global.rs +++ b/src/libcore/unstable/global.rs @@ -31,14 +31,13 @@ use kinds::Owned; use libc::{c_void}; use option::{Option, Some, None}; use ops::Drop; -use unstable::{Exclusive, exclusive}; +use unstable::sync::{Exclusive, exclusive}; use unstable::at_exit::at_exit; use unstable::intrinsics::atomic_cxchg; use hashmap::HashMap; use sys::Closure; -#[cfg(test)] use unstable::{SharedMutableState, shared_mutable_state}; -#[cfg(test)] use unstable::get_shared_immutable_state; +#[cfg(test)] use unstable::sync::{UnsafeAtomicRcBox}; #[cfg(test)] use task::spawn; #[cfg(test)] use uint; @@ -234,18 +233,16 @@ extern { #[test] fn test_clone_rc() { - type MyType = SharedMutableState; - - fn key(_v: SharedMutableState) { } + fn key(_v: UnsafeAtomicRcBox) { } for uint::range(0, 100) |_| { do spawn { unsafe { let val = do global_data_clone_create(key) { - ~shared_mutable_state(10) + ~UnsafeAtomicRcBox::new(10) }; - assert!(get_shared_immutable_state(&val) == &10); + assert!(val.get() == &10); } } } @@ -253,16 +250,14 @@ fn test_clone_rc() { #[test] fn test_modify() { - type MyType = SharedMutableState; - - fn key(_v: SharedMutableState) { } + fn key(_v: UnsafeAtomicRcBox) { } unsafe { do global_data_modify(key) |v| { match v { None => { unsafe { - Some(~shared_mutable_state(10)) + Some(~UnsafeAtomicRcBox::new(10)) } } _ => fail!() @@ -272,7 +267,7 @@ fn test_modify() { do global_data_modify(key) |v| { match v { Some(sms) => { - let v = get_shared_immutable_state(sms); + let v = sms.get(); assert!(*v == 10); None }, @@ -284,7 +279,7 @@ fn test_modify() { match v { None => { unsafe { - Some(~shared_mutable_state(10)) + Some(~UnsafeAtomicRcBox::new(10)) } } _ => fail!() diff --git a/src/libcore/unstable/mod.rs b/src/libcore/unstable/mod.rs new file mode 100644 index 0000000000000..bef7a7f87d3bd --- /dev/null +++ b/src/libcore/unstable/mod.rs @@ -0,0 +1,78 @@ +// Copyright 2012 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#[doc(hidden)]; + +use libc; +use comm::{GenericChan, GenericPort}; +use prelude::*; +use task; + +pub mod at_exit; +pub mod global; +pub mod finally; +pub mod weak_task; +pub mod exchange_alloc; +pub mod intrinsics; +pub mod simd; +pub mod extfmt; +#[cfg(not(test))] +pub mod lang; +pub mod sync; + +/** + +Start a new thread outside of the current runtime context and wait +for it to terminate. + +The executing thread has no access to a task pointer and will be using +a normal large stack. +*/ +pub fn run_in_bare_thread(f: ~fn()) { + let (port, chan) = comm::stream(); + // FIXME #4525: Unfortunate that this creates an extra scheduler but it's + // necessary since rust_raw_thread_join_delete is blocking + do task::spawn_sched(task::SingleThreaded) { + unsafe { + let closure: &fn() = || { + f() + }; + let thread = rust_raw_thread_start(&closure); + rust_raw_thread_join_delete(thread); + chan.send(()); + } + } + port.recv(); +} + +#[test] +fn test_run_in_bare_thread() { + let i = 100; + do run_in_bare_thread { + assert!(i == 100); + } +} + +#[test] +fn test_run_in_bare_thread_exchange() { + // Does the exchange heap work without the runtime? + let i = ~100; + do run_in_bare_thread { + assert!(i == ~100); + } +} + +#[allow(non_camel_case_types)] // runtime type +pub type raw_thread = libc::c_void; + +extern { + fn rust_raw_thread_start(f: &(&fn())) -> *raw_thread; + fn rust_raw_thread_join_delete(thread: *raw_thread); +} diff --git a/src/libcore/unstable/sync.rs b/src/libcore/unstable/sync.rs new file mode 100644 index 0000000000000..e22046f04f95b --- /dev/null +++ b/src/libcore/unstable/sync.rs @@ -0,0 +1,286 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use cast; +use libc; +use option::*; +use task; +use task::atomically; +use unstable::finally::Finally; +use unstable::intrinsics; +use ops::Drop; +use clone::Clone; +use kinds::Owned; + +/// An atomically reference counted pointer. +/// +/// Enforces no shared-memory safety. +pub struct UnsafeAtomicRcBox { + data: *mut libc::c_void, +} + +struct AtomicRcBoxData { + count: int, + data: Option, +} + +impl UnsafeAtomicRcBox { + pub fn new(data: T) -> UnsafeAtomicRcBox { + unsafe { + let data = ~AtomicRcBoxData { count: 1, data: Some(data) }; + let ptr = cast::transmute(data); + return UnsafeAtomicRcBox { data: ptr }; + } + } + + #[inline(always)] + #[cfg(stage0)] + pub unsafe fn get(&self) -> *mut T + { + let mut data: ~AtomicRcBoxData = cast::transmute(self.data); + assert!(data.count > 0); + let r: *mut T = cast::transmute(data.data.get_mut_ref()); + cast::forget(data); + return r; + } + + #[inline(always)] + #[cfg(not(stage0))] + pub unsafe fn get(&self) -> *mut T + { + let mut data: ~AtomicRcBoxData = cast::transmute(self.data); + assert!(data.count > 0); + let r: *mut T = data.data.get_mut_ref(); + cast::forget(data); + return r; + } + + #[inline(always)] + #[cfg(stage0)] + pub unsafe fn get_immut(&self) -> *T + { + let mut data: ~AtomicRcBoxData = cast::transmute(self.data); + assert!(data.count > 0); + let r: *T = cast::transmute(data.data.get_mut_ref()); + cast::forget(data); + return r; + } + + #[inline(always)] + #[cfg(not(stage0))] + pub unsafe fn get_immut(&self) -> *T + { + let mut data: ~AtomicRcBoxData = cast::transmute(self.data); + assert!(data.count > 0); + let r: *T = cast::transmute_immut(data.data.get_mut_ref()); + cast::forget(data); + return r; + } +} + +impl Clone for UnsafeAtomicRcBox { + fn clone(&self) -> UnsafeAtomicRcBox { + unsafe { + let mut data: ~AtomicRcBoxData = cast::transmute(self.data); + let new_count = intrinsics::atomic_xadd(&mut data.count, 1) + 1; + assert!(new_count >= 2); + cast::forget(data); + return UnsafeAtomicRcBox { data: self.data }; + } + } +} + +#[unsafe_destructor] +impl Drop for UnsafeAtomicRcBox{ + fn finalize(&self) { + unsafe { + do task::unkillable { + let mut data: ~AtomicRcBoxData = cast::transmute(self.data); + let new_count = intrinsics::atomic_xsub(&mut data.count, 1) - 1; + assert!(new_count >= 0); + if new_count == 0 { + // drop glue takes over. + } else { + cast::forget(data); + } + } + } + } +} + + +/****************************************************************************/ + +#[allow(non_camel_case_types)] // runtime type +pub type rust_little_lock = *libc::c_void; + +struct LittleLock { + l: rust_little_lock, +} + +impl Drop for LittleLock { + fn finalize(&self) { + unsafe { + rust_destroy_little_lock(self.l); + } + } +} + +fn LittleLock() -> LittleLock { + unsafe { + LittleLock { + l: rust_create_little_lock() + } + } +} + +pub impl LittleLock { + #[inline(always)] + unsafe fn lock(&self, f: &fn() -> T) -> T { + do atomically { + rust_lock_little_lock(self.l); + do (|| { + f() + }).finally { + rust_unlock_little_lock(self.l); + } + } + } +} + +struct ExData { + lock: LittleLock, + failed: bool, + data: T, +} + +/** + * An arc over mutable data that is protected by a lock. For library use only. + */ +pub struct Exclusive { + x: UnsafeAtomicRcBox> +} + +pub fn exclusive(user_data: T) -> Exclusive { + let data = ExData { + lock: LittleLock(), + failed: false, + data: user_data + }; + Exclusive { + x: UnsafeAtomicRcBox::new(data) + } +} + +impl Clone for Exclusive { + // Duplicate an exclusive ARC, as std::arc::clone. + fn clone(&self) -> Exclusive { + Exclusive { x: self.x.clone() } + } +} + +pub impl Exclusive { + // Exactly like std::arc::mutex_arc,access(), but with the little_lock + // instead of a proper mutex. Same reason for being unsafe. + // + // Currently, scheduling operations (i.e., yielding, receiving on a pipe, + // accessing the provided condition variable) are prohibited while inside + // the exclusive. Supporting that is a work in progress. + #[inline(always)] + unsafe fn with(&self, f: &fn(x: &mut T) -> U) -> U { + let rec = self.x.get(); + do (*rec).lock.lock { + if (*rec).failed { + fail!( + ~"Poisoned exclusive - another task failed inside!"); + } + (*rec).failed = true; + let result = f(&mut (*rec).data); + (*rec).failed = false; + result + } + } + + #[inline(always)] + unsafe fn with_imm(&self, f: &fn(x: &T) -> U) -> U { + do self.with |x| { + f(cast::transmute_immut(x)) + } + } +} + +fn compare_and_swap(address: &mut int, oldval: int, newval: int) -> bool { + unsafe { + let old = intrinsics::atomic_cxchg(address, oldval, newval); + old == oldval + } +} + +extern { + fn rust_create_little_lock() -> rust_little_lock; + fn rust_destroy_little_lock(lock: rust_little_lock); + fn rust_lock_little_lock(lock: rust_little_lock); + fn rust_unlock_little_lock(lock: rust_little_lock); +} + +#[cfg(test)] +mod tests { + use comm; + use super::exclusive; + use task; + use uint; + + #[test] + fn exclusive_arc() { + let mut futures = ~[]; + + let num_tasks = 10; + let count = 10; + + let total = exclusive(~0); + + for uint::range(0, num_tasks) |_i| { + let total = total.clone(); + let (port, chan) = comm::stream(); + futures.push(port); + + do task::spawn || { + for uint::range(0, count) |_i| { + do total.with |count| { + **count += 1; + } + } + chan.send(()); + } + }; + + for futures.each |f| { f.recv() } + + do total.with |total| { + assert!(**total == num_tasks * count) + }; + } + + #[test] #[should_fail] #[ignore(cfg(windows))] + fn exclusive_poison() { + // Tests that if one task fails inside of an exclusive, subsequent + // accesses will also fail. + let x = exclusive(1); + let x2 = x.clone(); + do task::try || { + do x2.with |one| { + assert!(*one == 2); + } + }; + do x.with |one| { + assert!(*one == 1); + } + } +} diff --git a/src/libcore/vec.rs b/src/libcore/vec.rs index 604f0297b647e..e56144ebc0a3b 100644 --- a/src/libcore/vec.rs +++ b/src/libcore/vec.rs @@ -3298,8 +3298,9 @@ mod tests { #[test] fn test_swap_remove_noncopyable() { // Tests that we don't accidentally run destructors twice. - let mut v = ~[::unstable::exclusive(()), ::unstable::exclusive(()), - ::unstable::exclusive(())]; + let mut v = ~[::unstable::sync::exclusive(()), + ::unstable::sync::exclusive(()), + ::unstable::sync::exclusive(())]; let mut _e = v.swap_remove(0); assert!(v.len() == 2); _e = v.swap_remove(1); diff --git a/src/libstd/arc.rs b/src/libstd/arc.rs index 7af68f3321d49..d3f774a1cd51f 100644 --- a/src/libstd/arc.rs +++ b/src/libstd/arc.rs @@ -17,9 +17,7 @@ use sync; use sync::{Mutex, mutex_with_condvars, RWlock, rwlock_with_condvars}; use core::cast; -use core::unstable::{SharedMutableState, shared_mutable_state}; -use core::unstable::{clone_shared_mutable_state}; -use core::unstable::{get_shared_mutable_state, get_shared_immutable_state}; +use core::unstable::sync::UnsafeAtomicRcBox; use core::ptr; use core::task; @@ -83,11 +81,11 @@ pub impl<'self> Condvar<'self> { ****************************************************************************/ /// An atomically reference counted wrapper for shared immutable state. -struct ARC { x: SharedMutableState } +struct ARC { x: UnsafeAtomicRcBox } /// Create an atomically reference counted wrapper. pub fn ARC(data: T) -> ARC { - ARC { x: unsafe { shared_mutable_state(data) } } + ARC { x: UnsafeAtomicRcBox::new(data) } } /** @@ -95,7 +93,7 @@ pub fn ARC(data: T) -> ARC { * wrapper. */ pub fn get<'a, T:Const + Owned>(rc: &'a ARC) -> &'a T { - unsafe { get_shared_immutable_state(&rc.x) } + unsafe { &*rc.x.get_immut() } } /** @@ -106,7 +104,7 @@ pub fn get<'a, T:Const + Owned>(rc: &'a ARC) -> &'a T { * allowing them to share the underlying data. */ pub fn clone(rc: &ARC) -> ARC { - ARC { x: unsafe { clone_shared_mutable_state(&rc.x) } } + ARC { x: rc.x.clone() } } impl Clone for ARC { @@ -122,7 +120,7 @@ impl Clone for ARC { #[doc(hidden)] struct MutexARCInner { lock: Mutex, failed: bool, data: T } /// An ARC with mutable data protected by a blocking mutex. -struct MutexARC { x: SharedMutableState> } +struct MutexARC { x: UnsafeAtomicRcBox> } /// Create a mutex-protected ARC with the supplied data. pub fn MutexARC(user_data: T) -> MutexARC { @@ -137,7 +135,7 @@ pub fn mutex_arc_with_condvars(user_data: T, let data = MutexARCInner { lock: mutex_with_condvars(num_condvars), failed: false, data: user_data }; - MutexARC { x: unsafe { shared_mutable_state(data) } } + MutexARC { x: UnsafeAtomicRcBox::new(data) } } impl Clone for MutexARC { @@ -145,7 +143,7 @@ impl Clone for MutexARC { fn clone(&self) -> MutexARC { // NB: Cloning the underlying mutex is not necessary. Its reference // count would be exactly the same as the shared state's. - MutexARC { x: unsafe { clone_shared_mutable_state(&self.x) } } + MutexARC { x: self.x.clone() } } } @@ -176,7 +174,7 @@ pub impl MutexARC { */ #[inline(always)] unsafe fn access(&self, blk: &fn(x: &mut T) -> U) -> U { - let state = get_shared_mutable_state(&self.x); + let state = self.x.get(); // Borrowck would complain about this if the function were // not already unsafe. See borrow_rwlock, far below. do (&(*state).lock).lock { @@ -192,7 +190,7 @@ pub impl MutexARC { &self, blk: &fn(x: &'x mut T, c: &'c Condvar) -> U) -> U { - let state = get_shared_mutable_state(&self.x); + let state = self.x.get(); do (&(*state).lock).lock_cond |cond| { check_poison(true, (*state).failed); let _z = PoisonOnFail(&mut (*state).failed); @@ -254,7 +252,7 @@ struct RWARCInner { lock: RWlock, failed: bool, data: T } */ #[mutable] struct RWARC { - x: SharedMutableState>, + x: UnsafeAtomicRcBox>, cant_nest: () } @@ -273,13 +271,13 @@ pub fn rw_arc_with_condvars( let data = RWARCInner { lock: rwlock_with_condvars(num_condvars), failed: false, data: user_data }; - RWARC { x: unsafe { shared_mutable_state(data) }, cant_nest: () } + RWARC { x: UnsafeAtomicRcBox::new(data), cant_nest: () } } pub impl RWARC { /// Duplicate a rwlock-protected ARC, as arc::clone. fn clone(&self) -> RWARC { - RWARC { x: unsafe { clone_shared_mutable_state(&self.x) }, + RWARC { x: self.x.clone(), cant_nest: () } } @@ -299,7 +297,7 @@ pub impl RWARC { #[inline(always)] fn write(&self, blk: &fn(x: &mut T) -> U) -> U { unsafe { - let state = get_shared_mutable_state(&self.x); + let state = self.x.get(); do (*borrow_rwlock(state)).write { check_poison(false, (*state).failed); let _z = PoisonOnFail(&mut (*state).failed); @@ -313,7 +311,7 @@ pub impl RWARC { blk: &fn(x: &'x mut T, c: &'c Condvar) -> U) -> U { unsafe { - let state = get_shared_mutable_state(&self.x); + let state = self.x.get(); do (*borrow_rwlock(state)).write_cond |cond| { check_poison(false, (*state).failed); let _z = PoisonOnFail(&mut (*state).failed); @@ -334,10 +332,12 @@ pub impl RWARC { * access modes, this will not poison the ARC. */ fn read(&self, blk: &fn(x: &T) -> U) -> U { - let state = unsafe { get_shared_immutable_state(&self.x) }; - do (&state.lock).read { - check_poison(false, state.failed); - blk(&state.data) + let state = self.x.get(); + unsafe { + do (*state).lock.read { + check_poison(false, (*state).failed); + blk(&(*state).data) + } } } @@ -360,7 +360,7 @@ pub impl RWARC { */ fn write_downgrade(&self, blk: &fn(v: RWWriteMode) -> U) -> U { unsafe { - let state = get_shared_mutable_state(&self.x); + let state = self.x.get(); do (*borrow_rwlock(state)).write_downgrade |write_mode| { check_poison(false, (*state).failed); blk(RWWriteMode { @@ -374,25 +374,27 @@ pub impl RWARC { /// To be called inside of the write_downgrade block. fn downgrade<'a>(&self, token: RWWriteMode<'a, T>) -> RWReadMode<'a, T> { - // The rwlock should assert that the token belongs to us for us. - let state = unsafe { get_shared_immutable_state(&self.x) }; - let RWWriteMode { - data: data, - token: t, - poison: _poison - } = token; - // Let readers in - let new_token = (&state.lock).downgrade(t); - // Whatever region the input reference had, it will be safe to use - // the same region for the output reference. (The only 'unsafe' part - // of this cast is removing the mutability.) - let new_data = unsafe { cast::transmute_immut(data) }; - // Downgrade ensured the token belonged to us. Just a sanity check. - assert!(ptr::ref_eq(&state.data, new_data)); - // Produce new token - RWReadMode { - data: new_data, - token: new_token, + unsafe { + // The rwlock should assert that the token belongs to us for us. + let state = self.x.get(); + let RWWriteMode { + data: data, + token: t, + poison: _poison + } = token; + // Let readers in + let new_token = (*state).lock.downgrade(t); + // Whatever region the input reference had, it will be safe to use + // the same region for the output reference. (The only 'unsafe' part + // of this cast is removing the mutability.) + let new_data = cast::transmute_immut(data); + // Downgrade ensured the token belonged to us. Just a sanity check. + assert!(ptr::ref_eq(&(*state).data, new_data)); + // Produce new token + RWReadMode { + data: new_data, + token: new_token, + } } } } diff --git a/src/libstd/sync.rs b/src/libstd/sync.rs index 17d051518a9ec..108f24d60dc36 100644 --- a/src/libstd/sync.rs +++ b/src/libstd/sync.rs @@ -15,7 +15,7 @@ * in std. */ -use core::unstable::{Exclusive, exclusive}; +use core::unstable::sync::{Exclusive, exclusive}; use core::ptr; use core::task; use core::util; diff --git a/src/test/compile-fail/noncopyable-match-pattern.rs b/src/test/compile-fail/noncopyable-match-pattern.rs index e8b01765a447d..155b398148339 100644 --- a/src/test/compile-fail/noncopyable-match-pattern.rs +++ b/src/test/compile-fail/noncopyable-match-pattern.rs @@ -9,7 +9,7 @@ // except according to those terms. fn main() { - let x = Some(unstable::exclusive(false)); + let x = Some(unstable::sync::exclusive(false)); match x { Some(copy z) => { //~ ERROR copying a value of non-copyable type do z.with |b| { assert!(!*b); } diff --git a/src/test/run-pass/alt-ref-binding-in-guard-3256.rs b/src/test/run-pass/alt-ref-binding-in-guard-3256.rs index 1ece3b5fd93b6..ed7a631637433 100644 --- a/src/test/run-pass/alt-ref-binding-in-guard-3256.rs +++ b/src/test/run-pass/alt-ref-binding-in-guard-3256.rs @@ -9,7 +9,7 @@ // except according to those terms. pub fn main() { - let x = Some(unstable::exclusive(true)); + let x = Some(unstable::sync::exclusive(true)); match x { Some(ref z) if z.with(|b| *b) => { do z.with |b| { assert!(*b); }