Upgrade rust/crates/once_cell to 1.5.0 am: df3e3bfe05
Original change: https://android-review.googlesource.com/c/platform/external/rust/crates/once_cell/+/1495184
Change-Id: I4474c7268cf12b1bf2747e7bc0c65c25004b04f9
diff --git a/.cargo_vcs_info.json b/.cargo_vcs_info.json
index afdaae2..6221430 100644
--- a/.cargo_vcs_info.json
+++ b/.cargo_vcs_info.json
@@ -1,5 +1,5 @@
{
"git": {
- "sha1": "d6b9907273d20bd3d1aa26f74d358b483469d122"
+ "sha1": "b1f2b57603fbf8c3d27453b3b2e556d503b39ebd"
}
}
diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml
new file mode 100644
index 0000000..7967bb9
--- /dev/null
+++ b/.github/workflows/ci.yaml
@@ -0,0 +1,27 @@
+name: CI
+on:
+ pull_request:
+ push:
+ branches: ["master", "staging", "trying"]
+
+env:
+ CARGO_INCREMENTAL: 0
+ CARGO_NET_RETRY: 10
+ CI: 1
+ RUST_BACKTRACE: short
+ RUSTFLAGS: -D warnings
+ RUSTUP_MAX_RETRIES: 10
+
+jobs:
+ test:
+ name: Rust
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v2
+ with:
+ fetch-depth: 0 # fetch tags for publish
+
+ - run: cargo run -p xtask -- ci
+ env:
+ CRATES_IO_TOKEN: ${{ secrets.CRATES_IO_TOKEN }}
diff --git a/CHANGELOG.md b/CHANGELOG.md
index de5e795..e656983 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,10 @@
# Changelog
+## 1.5.0
+
+- add new `once_cell::race` module for "first one wins" no_std-compatible initialization flavor.
+ The API is provisional, subject to change and is gated by the `unstable` cargo feature.
+
## 1.4.1
- upgrade `parking_lot` to `0.11.0`
diff --git a/Cargo.toml b/Cargo.toml
index b505be3..edd42db 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -13,7 +13,7 @@
[package]
edition = "2018"
name = "once_cell"
-version = "1.4.1"
+version = "1.5.0"
authors = ["Aleksey Kladov <aleksey.kladov@gmail.com>"]
exclude = ["*.png", "*.svg", "/Cargo.lock.min", "/.travis.yml", "/run-miri-tests.sh", "rustfmt.toml"]
description = "Single assignment cells and lazy values."
@@ -23,6 +23,8 @@
categories = ["rust-patterns", "memory-management"]
license = "MIT OR Apache-2.0"
repository = "https://github.com/matklad/once_cell"
+[package.metadata.docs.rs]
+all-features = true
[[example]]
name = "bench"
@@ -67,3 +69,4 @@
[features]
default = ["std"]
std = []
+unstable = []
diff --git a/Cargo.toml.orig b/Cargo.toml.orig
index 7768b12..d80baa0 100644
--- a/Cargo.toml.orig
+++ b/Cargo.toml.orig
@@ -1,6 +1,6 @@
[package]
name = "once_cell"
-version = "1.4.1"
+version = "1.5.0"
authors = ["Aleksey Kladov <aleksey.kladov@gmail.com>"]
license = "MIT OR Apache-2.0"
edition = "2018"
@@ -15,6 +15,9 @@
exclude = ["*.png", "*.svg", "/Cargo.lock.min", "/.travis.yml", "/run-miri-tests.sh", "rustfmt.toml"]
+[workspace]
+members = ["xtask"]
+
[dependencies]
# Uses parking_lot to implement once_cell::sync::OnceCell.
# This makes not speed difference, but makes each OnceCell<T>
@@ -30,6 +33,8 @@
default = ["std"]
# Enables `once_cell::sync` module.
std = []
+# Enables semver-exempt APIs of this crate
+unstable = []
[[example]]
name = "bench"
@@ -58,3 +63,6 @@
[[example]]
name = "test_synchronization"
required-features = ["std"]
+
+[package.metadata.docs.rs]
+all-features = true
diff --git a/METADATA b/METADATA
index 01cc916..cd0a956 100644
--- a/METADATA
+++ b/METADATA
@@ -7,13 +7,13 @@
}
url {
type: ARCHIVE
- value: "https://static.crates.io/crates/once_cell/once_cell-1.4.1.crate"
+ value: "https://static.crates.io/crates/once_cell/once_cell-1.5.0.crate"
}
- version: "1.4.1"
+ version: "1.5.0"
license_type: NOTICE
last_upgrade_date {
year: 2020
- month: 8
- day: 17
+ month: 11
+ day: 10
}
}
diff --git a/bors.toml b/bors.toml
new file mode 100644
index 0000000..b92b99a
--- /dev/null
+++ b/bors.toml
@@ -0,0 +1,2 @@
+status = [ "Rust" ]
+delete_merged_branches = true
diff --git a/src/imp_std.rs b/src/imp_std.rs
index bb076a6..d7ccc85 100644
--- a/src/imp_std.rs
+++ b/src/imp_std.rs
@@ -18,10 +18,6 @@
// `Waiter`, so we add the `PhantomData` appropriately.
state_and_queue: AtomicUsize,
_marker: PhantomData<*mut Waiter>,
- // FIXME: switch to `std::mem::MaybeUninit` once we are ready to bump MSRV
- // that far. It was stabilized in 1.36.0, so, if you are reading this and
- // it's higher than 1.46.0 outside, please send a PR! ;) (and do the same
- // for `Lazy`, while we are at it).
value: UnsafeCell<Option<T>>,
}
@@ -257,7 +253,6 @@
}
#[test]
- #[cfg_attr(miri, ignore)] // miri doesn't support threads
fn stampede_once() {
static O: OnceCell<()> = OnceCell::new();
static mut RUN: bool = false;
@@ -315,7 +310,6 @@
}
#[test]
- #[cfg_attr(miri, ignore)] // miri doesn't support threads
fn wait_for_force_to_finish() {
static O: OnceCell<()> = OnceCell::new();
diff --git a/src/lib.rs b/src/lib.rs
index fd36471..119de86 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -157,6 +157,51 @@
This macro can be useful to avoid the "compile regex on every loop iteration" problem.
+Another pattern would be a `LateInit` type for delayed initialization:
+
+
+```
+use once_cell::sync::OnceCell;
+
+#[derive(Debug)]
+pub struct LateInit<T> { cell: OnceCell<T> }
+
+impl<T> LateInit<T> {
+ pub fn init(&self, value: T) {
+ assert!(self.cell.set(value).is_ok())
+ }
+}
+
+impl<T> Default for LateInit<T> {
+ fn default() -> Self { LateInit { cell: OnceCell::default() } }
+}
+
+impl<T> std::ops::Deref for LateInit<T> {
+ type Target = T;
+ fn deref(&self) -> &T {
+ self.cell.get().unwrap()
+ }
+}
+
+#[derive(Default, Debug)]
+struct A<'a> {
+ b: LateInit<&'a B<'a>>,
+}
+
+#[derive(Default, Debug)]
+struct B<'a> {
+ a: LateInit<&'a A<'a>>
+}
+
+fn build_cycle() {
+ let a = A::default();
+ let b = B::default();
+ a.b.init(&b);
+ b.a.init(&a);
+ println!("{:?}", a.b.a.b.a);
+}
+```
+
# Comparison with std
|`!Sync` types | Access Mode | Drawbacks |
@@ -994,3 +1039,196 @@
/// ```
fn _dummy() {}
}
+
+/// "First one wins" flavor of `OnceCell`.
+///
+/// If two threads race to initialize a type from the `race` module, they
+/// don't block, execute initialization function together, but only one of
+/// them stores the result.
+///
+/// This module does not require `std` feature.
+#[cfg(feature = "unstable")]
+pub mod race {
+ use core::{
+ num::NonZeroUsize,
+ sync::atomic::{AtomicUsize, Ordering},
+ };
+ #[cfg(feature = "std")]
+ use std::{marker::PhantomData, ptr, sync::atomic::AtomicPtr};
+
+ #[derive(Default, Debug)]
+ pub struct OnceNonZeroUsize {
+ inner: AtomicUsize,
+ }
+
+ impl OnceNonZeroUsize {
+ pub const fn new() -> OnceNonZeroUsize {
+ OnceNonZeroUsize { inner: AtomicUsize::new(0) }
+ }
+
+ pub fn get(&self) -> Option<NonZeroUsize> {
+ let val = self.inner.load(Ordering::Acquire);
+ NonZeroUsize::new(val)
+ }
+
+ pub fn set(&self, value: NonZeroUsize) -> Result<(), ()> {
+ let val = self.inner.compare_and_swap(0, value.get(), Ordering::AcqRel);
+ if val == 0 {
+ Ok(())
+ } else {
+ Err(())
+ }
+ }
+
+ pub fn get_or_init<F>(&self, f: F) -> NonZeroUsize
+ where
+ F: FnOnce() -> NonZeroUsize,
+ {
+ enum Void {}
+ match self.get_or_try_init(|| Ok::<NonZeroUsize, Void>(f())) {
+ Ok(val) => val,
+ Err(void) => match void {},
+ }
+ }
+
+ pub fn get_or_try_init<F, E>(&self, f: F) -> Result<NonZeroUsize, E>
+ where
+ F: FnOnce() -> Result<NonZeroUsize, E>,
+ {
+ let val = self.inner.load(Ordering::Acquire);
+ let res = match NonZeroUsize::new(val) {
+ Some(it) => it,
+ None => {
+ let mut val = f()?.get();
+ let old_val = self.inner.compare_and_swap(0, val, Ordering::AcqRel);
+ if old_val != 0 {
+ val = old_val;
+ }
+ unsafe { NonZeroUsize::new_unchecked(val) }
+ }
+ };
+ Ok(res)
+ }
+ }
+
+ #[derive(Default, Debug)]
+ pub struct OnceBool {
+ inner: OnceNonZeroUsize,
+ }
+
+ impl OnceBool {
+ fn from_usize(value: NonZeroUsize) -> bool {
+ value.get() == 1
+ }
+ fn to_usize(value: bool) -> NonZeroUsize {
+ unsafe { NonZeroUsize::new_unchecked(if value { 1 } else { 2 }) }
+ }
+
+ pub const fn new() -> OnceBool {
+ OnceBool { inner: OnceNonZeroUsize::new() }
+ }
+
+ pub fn get(&self) -> Option<bool> {
+ self.inner.get().map(OnceBool::from_usize)
+ }
+
+ pub fn set(&self, value: bool) -> Result<(), ()> {
+ self.inner.set(OnceBool::to_usize(value))
+ }
+
+ pub fn get_or_init<F>(&self, f: F) -> bool
+ where
+ F: FnOnce() -> bool,
+ {
+ OnceBool::from_usize(self.inner.get_or_init(|| OnceBool::to_usize(f())))
+ }
+
+ pub fn get_or_try_init<F, E>(&self, f: F) -> Result<bool, E>
+ where
+ F: FnOnce() -> Result<bool, E>,
+ {
+ self.inner.get_or_try_init(|| f().map(OnceBool::to_usize)).map(OnceBool::from_usize)
+ }
+ }
+
+ #[derive(Default, Debug)]
+ #[cfg(feature = "std")]
+ pub struct OnceBox<T> {
+ inner: AtomicPtr<T>,
+ ghost: PhantomData<Option<Box<T>>>,
+ }
+
+ #[cfg(feature = "std")]
+ impl<T> Drop for OnceBox<T> {
+ fn drop(&mut self) {
+ let ptr = *self.inner.get_mut();
+ if !ptr.is_null() {
+ drop(unsafe { Box::from_raw(ptr) })
+ }
+ }
+ }
+
+ #[cfg(feature = "std")]
+ impl<T> OnceBox<T> {
+ pub const fn new() -> OnceBox<T> {
+ OnceBox { inner: AtomicPtr::new(ptr::null_mut()), ghost: PhantomData }
+ }
+
+ pub fn get(&self) -> Option<&T> {
+ let ptr = self.inner.load(Ordering::Acquire);
+ if ptr.is_null() {
+ return None;
+ }
+ Some(unsafe { &*ptr })
+ }
+
+ // Result<(), Box<T>> here?
+ pub fn set(&self, value: T) -> Result<(), ()> {
+ let ptr = Box::into_raw(Box::new(value));
+ if ptr.is_null() {
+ drop(unsafe { Box::from_raw(ptr) });
+ return Err(());
+ }
+ Ok(())
+ }
+
+ pub fn get_or_init<F>(&self, f: F) -> &T
+ where
+ F: FnOnce() -> T,
+ {
+ enum Void {}
+ match self.get_or_try_init(|| Ok::<T, Void>(f())) {
+ Ok(val) => val,
+ Err(void) => match void {},
+ }
+ }
+
+ pub fn get_or_try_init<F, E>(&self, f: F) -> Result<&T, E>
+ where
+ F: FnOnce() -> Result<T, E>,
+ {
+ let mut ptr = self.inner.load(Ordering::Acquire);
+
+ if ptr.is_null() {
+ let val = f()?;
+ ptr = Box::into_raw(Box::new(val));
+ let old_ptr = self.inner.compare_and_swap(ptr::null_mut(), ptr, Ordering::AcqRel);
+ if !old_ptr.is_null() {
+ drop(unsafe { Box::from_raw(ptr) });
+ ptr = old_ptr;
+ }
+ };
+ Ok(unsafe { &*ptr })
+ }
+ }
+
+ /// ```compile_fail
+ /// struct S(*mut ());
+ /// unsafe impl Sync for S {}
+ ///
+ /// fn share<T: Sync>(_: &T) {}
+ /// share(&once_cell::race::OnceBox::<S>::new());
+ /// ```
+ #[cfg(feature = "std")]
+ unsafe impl<T: Sync + Send> Sync for OnceBox<T> {}
+}
diff --git a/tests/test.rs b/tests/it.rs
similarity index 71%
rename from tests/test.rs
rename to tests/it.rs
index 18c86fe..05025e5 100644
--- a/tests/test.rs
+++ b/tests/it.rs
@@ -202,33 +202,10 @@
mod sync {
use std::sync::atomic::{AtomicUsize, Ordering::SeqCst};
+ use crossbeam_utils::thread::scope;
+
use once_cell::sync::{Lazy, OnceCell};
- #[cfg(not(miri))] // miri doesn't support threads
- mod scope {
- pub(super) use crossbeam_utils::thread::scope;
- }
-
- #[cfg(miri)] // "stub threads" for Miri
- mod scope {
- pub(super) struct Scope;
-
- #[cfg(miri)]
- impl Scope {
- pub(super) fn spawn<R>(&self, f: impl FnOnce(()) -> R) -> R {
- f(())
- }
- }
-
- #[cfg(miri)]
- pub(super) fn scope(f: impl FnOnce(&Scope)) -> Result<(), ()> {
- f(&Scope);
- Ok(())
- }
- }
-
- use scope::scope;
-
#[test]
fn once_cell() {
let c = OnceCell::new();
@@ -534,7 +511,7 @@
}
#[test]
- #[cfg_attr(miri, ignore)] // deadlocks without real threads
+ #[cfg_attr(miri, ignore)] // FIXME: deadlocks, likely caused by https://github.com/rust-lang/miri/issues/1388
fn once_cell_does_not_leak_partially_constructed_boxes() {
let n_tries = 100;
let n_readers = 10;
@@ -593,3 +570,216 @@
}
}
}
+
+#[cfg(feature = "unstable")]
+mod race {
+ use std::{
+ num::NonZeroUsize,
+ sync::{
+ atomic::{AtomicUsize, Ordering::SeqCst},
+ Barrier,
+ },
+ };
+
+ use crossbeam_utils::thread::scope;
+
+ use once_cell::race::{OnceBool, OnceNonZeroUsize};
+
+ #[test]
+ fn once_non_zero_usize_smoke_test() {
+ let cnt = AtomicUsize::new(0);
+ let cell = OnceNonZeroUsize::new();
+ let val = NonZeroUsize::new(92).unwrap();
+ scope(|s| {
+ s.spawn(|_| {
+ assert_eq!(
+ cell.get_or_init(|| {
+ cnt.fetch_add(1, SeqCst);
+ val
+ }),
+ val
+ );
+ assert_eq!(cnt.load(SeqCst), 1);
+
+ assert_eq!(
+ cell.get_or_init(|| {
+ cnt.fetch_add(1, SeqCst);
+ val
+ }),
+ val
+ );
+ assert_eq!(cnt.load(SeqCst), 1);
+ });
+ })
+ .unwrap();
+ assert_eq!(cell.get(), Some(val));
+ assert_eq!(cnt.load(SeqCst), 1);
+ }
+
+ #[test]
+ fn once_non_zero_usize_first_wins() {
+ let cell = OnceNonZeroUsize::new();
+ let val1 = NonZeroUsize::new(92).unwrap();
+ let val2 = NonZeroUsize::new(62).unwrap();
+
+ let b1 = Barrier::new(2);
+ let b2 = Barrier::new(2);
+ let b3 = Barrier::new(2);
+ scope(|s| {
+ s.spawn(|_| {
+ let r1 = cell.get_or_init(|| {
+ b1.wait();
+ b2.wait();
+ val1
+ });
+ assert_eq!(r1, val1);
+ b3.wait();
+ });
+ b1.wait();
+ s.spawn(|_| {
+ let r2 = cell.get_or_init(|| {
+ b2.wait();
+ b3.wait();
+ val2
+ });
+ assert_eq!(r2, val1);
+ });
+ })
+ .unwrap();
+
+ assert_eq!(cell.get(), Some(val1));
+ }
+
+ #[test]
+ fn once_bool_smoke_test() {
+ let cnt = AtomicUsize::new(0);
+ let cell = OnceBool::new();
+ scope(|s| {
+ s.spawn(|_| {
+ assert_eq!(
+ cell.get_or_init(|| {
+ cnt.fetch_add(1, SeqCst);
+ false
+ }),
+ false
+ );
+ assert_eq!(cnt.load(SeqCst), 1);
+
+ assert_eq!(
+ cell.get_or_init(|| {
+ cnt.fetch_add(1, SeqCst);
+ false
+ }),
+ false
+ );
+ assert_eq!(cnt.load(SeqCst), 1);
+ });
+ })
+ .unwrap();
+ assert_eq!(cell.get(), Some(false));
+ assert_eq!(cnt.load(SeqCst), 1);
+ }
+
+ #[test]
+ #[cfg(feature = "std")]
+ fn once_box_smoke_test() {
+ #[derive(Debug)]
+ struct Pebble {
+ id: usize,
+ }
+ static TOTAL: AtomicUsize = AtomicUsize::new(0);
+
+ impl Pebble {
+ fn total() -> usize {
+ TOTAL.load(SeqCst)
+ }
+ fn new() -> Pebble {
+ let id = TOTAL.fetch_add(1, SeqCst);
+ Pebble { id }
+ }
+ }
+ impl Drop for Pebble {
+ fn drop(&mut self) {
+ TOTAL.fetch_sub(1, SeqCst);
+ }
+ }
+
+ let global_cnt = AtomicUsize::new(0);
+ let cell = once_cell::race::OnceBox::new();
+ let b = Barrier::new(128);
+ scope(|s| {
+ for _ in 0..128 {
+ s.spawn(|_| {
+ let local_cnt = AtomicUsize::new(0);
+ cell.get_or_init(|| {
+ global_cnt.fetch_add(1, SeqCst);
+ local_cnt.fetch_add(1, SeqCst);
+ b.wait();
+ Pebble::new()
+ });
+ assert_eq!(local_cnt.load(SeqCst), 1);
+
+ cell.get_or_init(|| {
+ global_cnt.fetch_add(1, SeqCst);
+ local_cnt.fetch_add(1, SeqCst);
+ Pebble::new()
+ });
+ assert_eq!(local_cnt.load(SeqCst), 1);
+ });
+ }
+ })
+ .unwrap();
+ assert!(cell.get().is_some());
+ assert!(global_cnt.load(SeqCst) > 10);
+
+ assert_eq!(Pebble::total(), 1);
+ drop(cell);
+ assert_eq!(Pebble::total(), 0);
+ }
+
+ #[test]
+ #[cfg(feature = "std")]
+ fn once_box_first_wins() {
+ let cell = once_cell::race::OnceBox::new();
+ let val1 = 92;
+ let val2 = 62;
+
+ let b1 = Barrier::new(2);
+ let b2 = Barrier::new(2);
+ let b3 = Barrier::new(2);
+ scope(|s| {
+ s.spawn(|_| {
+ let r1 = cell.get_or_init(|| {
+ b1.wait();
+ b2.wait();
+ val1
+ });
+ assert_eq!(*r1, val1);
+ b3.wait();
+ });
+ b1.wait();
+ s.spawn(|_| {
+ let r2 = cell.get_or_init(|| {
+ b2.wait();
+ b3.wait();
+ val2
+ });
+ assert_eq!(*r2, val1);
+ });
+ })
+ .unwrap();
+
+ assert_eq!(cell.get(), Some(&val1));
+ }
+
+ #[test]
+ #[cfg(feature = "std")]
+ fn once_box_reentrant() {
+ let cell = once_cell::race::OnceBox::new();
+ let res = cell.get_or_init(|| {
+ cell.get_or_init(|| "hello".to_string());
+ "world".to_string()
+ });
+ assert_eq!(res, "hello");
+ }
+}