diff --git a/src/liballoc/sync.rs b/src/liballoc/sync.rs index b7d7995b540ba..31a00699ab0b2 100644 --- a/src/liballoc/sync.rs +++ b/src/liballoc/sync.rs @@ -552,7 +552,7 @@ impl Arc { // allocation itself (there may still be weak pointers lying around). ptr::drop_in_place(&mut self.ptr.as_mut().data); - if self.inner().weak.fetch_sub(1, Release) == 1 { + if atomic::AtomicUsize::fetch_sub_explicit(&self.inner().weak, 1, Release) == 1 { atomic::fence(Acquire); Global.dealloc(self.ptr.cast(), Layout::for_value(self.ptr.as_ref())) } @@ -970,10 +970,11 @@ unsafe impl<#[may_dangle] T: ?Sized> Drop for Arc { /// [`Weak`]: ../../std/sync/struct.Weak.html #[inline] fn drop(&mut self) { - // Because `fetch_sub` is already atomic, we do not need to synchronize + // Because `fetch_sub_explicit` is already atomic, we do not need to synchronize // with other threads unless we are going to delete the object. This // same logic applies to the below `fetch_sub` to the `weak` count. - if self.inner().strong.fetch_sub(1, Release) != 1 { + // For preventing dangling self over the unsafe block strong ref pointer passed. + if atomic::AtomicUsize::fetch_sub_explicit(&self.inner().strong, 1, Release) != 1 { return; } @@ -1350,7 +1351,7 @@ impl Drop for Weak { return }; - if inner.weak.fetch_sub(1, Release) == 1 { + if atomic::AtomicUsize::fetch_sub_explicit(&inner.weak, 1, Release) == 1 { atomic::fence(Acquire); unsafe { Global.dealloc(self.ptr.cast(), Layout::for_value(self.ptr.as_ref())) diff --git a/src/libcore/sync/atomic.rs b/src/libcore/sync/atomic.rs index 8c5dde7dc271b..5fc8f27a3a00c 100644 --- a/src/libcore/sync/atomic.rs +++ b/src/libcore/sync/atomic.rs @@ -1575,6 +1575,41 @@ assert_eq!(foo.load(Ordering::SeqCst), 10); } } + doc_comment! { + concat!("Subtracts from the given atomic value, returning the previous value. + +This operation wraps around on overflow. + +`fetch_sub_explicit` takes ", stringify!($atomic_type), " which will be substracted by given value +in respect to [`Ordering`] argument which describes the memory ordering +of this operation. All ordering modes are possible. Note that using +[`Acquire`] makes the store part of this operation [`Relaxed`], and +using [`Release`] makes the load part [`Relaxed`]. + +[`Ordering`]: enum.Ordering.html +[`Relaxed`]: enum.Ordering.html#variant.Relaxed +[`Release`]: enum.Ordering.html#variant.Release +[`Acquire`]: enum.Ordering.html#variant.Acquire + +# Examples + +``` +", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering}; + +let foo = ", stringify!($atomic_type), "::new(20); +assert_eq!(", stringify!($atomic_type), ".fetch_sub_explicit(foo, Ordering::SeqCst), 20); +assert_eq!(foo.load(Ordering::SeqCst), 10); + ```"), + #[inline] + #[$stable] + #[cfg(target_has_atomic = "cas")] + pub fn fetch_sub_explicit(f: *const $atomic_type, + val: $int_type, + order: Ordering) -> $int_type { + unsafe { atomic_sub((*f).v.get(), val, order) } + } + } + doc_comment! { concat!("Bitwise \"and\" with the current value.