pub struct AtomicIsize(/* private fields */);Expand description
Creusot wrapper around std::sync::atomic::AtomicIsize.
Implementations§
Source§impl AtomicIsize
impl AtomicIsize
Sourcepub fn new(
val: isize,
sync_view: Ghost<&mut SyncView>,
) -> (Self, Ghost<Perm<AtomicIsize>>)
pub fn new( val: isize, sync_view: Ghost<&mut SyncView>, ) -> (Self, Ghost<Perm<AtomicIsize>>)
ensures
result.1.val() == FMap::singleton(result.0.get_timestamp(^sync_view), (val, **sync_view))
ensures
*result.1.ward() == result.0terminates
Sourcepub fn into_inner(
self,
own: Ghost<Perm<AtomicIsize>>,
sync_view: Ghost<&mut SyncView>,
) -> (isize, Ghost<Timestamp>)
pub fn into_inner( self, own: Ghost<Perm<AtomicIsize>>, sync_view: Ghost<&mut SyncView>, ) -> (isize, Ghost<Timestamp>)
Wrapper for std::sync::atomic::AtomicIsize::into_inner.
requires
self == *own.ward()ensures
match own.val().get(*result.1) { Some((v, _)) => result.0 == v, None => false }
ensures
self.get_timestamp(^sync_view) == *result.1ensures
forall<t> match own.val().get(t) { Some((_, view)) => t <= *result.1 && view <= ^sync_view, None => true }
Sourcepub fn refresh(
&mut self,
own: Ghost<&mut Perm<AtomicIsize>>,
sync_view: Ghost<&mut SyncView>,
) -> Ghost<Timestamp>
pub fn refresh( &mut self, own: Ghost<&mut Perm<AtomicIsize>>, sync_view: Ghost<&mut SyncView>, ) -> Ghost<Timestamp>
Clear the old unusable history, thanks to the full ownership of the atomic.
requires
*self == *own.ward()ensures
match (*own).val().get(*result) { Some((v, _)) => (^own).val() == FMap::singleton(*result, (v, **sync_view)), None => false }
ensures
self.get_timestamp(^sync_view) == *resultensures
forall<t> match own.val().get(t) { Some((_, view)) => t <= *result && view <= ^sync_view , None => true }
ensures
*self == ^selfterminates
Sourcepub fn compare_exchange<F, Success: Ordering, Failure: Ordering>(
&self,
current: isize,
new: isize,
f: Ghost<F>,
) -> Result<isize, isize>
pub fn compare_exchange<F, Success: Ordering, Failure: Ordering>( &self, current: isize, new: isize, f: Ghost<F>, ) -> Result<isize, isize>
Wrapper for std::sync::atomic::AtomicIsize::compare_exchange.
The load and the store are always sequentially consistent.
requires
Success::ORDERING != Ordering::SeqCst::ORDERINGrequires
Failure::ORDERING == Ordering::Acquire::ORDERING || Failure::ORDERING == Ordering::Relaxed::ORDERING
requires
forall<c: &mut Committer<Self, $type, _, _>> !c.shot_store() ==> c.ward() == *self ==> c.val_load().deep_model() == current.deep_model() ==> c.val_store() == new ==> f.precondition((Ok(c),)) && (f.postcondition_once((Ok(c),), ()) ==> (^c).shot_store())
requires
forall<c: &Committer<Self, $type, _, _>> !c.shot_store() ==> c.ward() == *self ==> // NOTE: This following line is not present for `weak` c.val_load().deep_model() != current.deep_model() ==> f.precondition((Err(c),))
ensures
match result { Ok(result) => { exists<c: &mut Committer<Self, $type, _, _>> !c.shot_store() && c.ward() == *self && c.val_load().deep_model() == current.deep_model() && c.val_store() == new && result == c.val_load() && f.postcondition_once((Ok(c),), ()) }, Err(result) => { exists<c: &Committer<Self, $type, _, _>> !c.shot_store() && c.ward() == *self && // NOTE: This following line is not present for `weak` c.val_load().deep_model() != current.deep_model() && result == c.val_load() && f.postcondition_once((Err(c),), ()) } }
Sourcepub fn compare_exchange_weak<F, Success: Ordering, Failure: Ordering>(
&self,
current: isize,
new: isize,
f: Ghost<F>,
) -> Result<isize, isize>
pub fn compare_exchange_weak<F, Success: Ordering, Failure: Ordering>( &self, current: isize, new: isize, f: Ghost<F>, ) -> Result<isize, isize>
Wrapper for std::sync::atomic::AtomicIsize::compare_exchange_weak.
The load and the store are always sequentially consistent.
requires
Success::ORDERING != Ordering::SeqCst::ORDERINGrequires
Failure::ORDERING == Ordering::Acquire::ORDERING || Failure::ORDERING == Ordering::Relaxed::ORDERING
requires
forall<c: &mut Committer<Self, $type, _, _>> // TODO: [VL] Wrong permission here (Success == Ordering::RelAcq) !c.shot_store() ==> c.ward() == *self ==> c.val_load().deep_model() == current.deep_model() ==> c.val_store() == new ==> f.precondition((Ok(c),)) && (f.postcondition_once((Ok(c),), ()) ==> (^c).shot_store())
requires
forall<c: &Committer<Self, $type, _, _>> !c.shot_store() ==> c.ward() == *self ==> f.precondition((Err(c),))
ensures
match result { Ok(result) => { exists<c: &mut Committer<Self, $type, _, _>> !c.shot_store() && c.ward() == *self && c.val_load().deep_model() == current.deep_model() && c.val_store() == new && result == c.val_load() && f.postcondition_once((Ok(c),), ()) }, Err(result) => { exists<c: &Committer<Self, $type, _, _>> !c.shot_store() && c.ward() == *self && result == c.val_load() && f.postcondition_once((Err(c),), ()) } }
Sourcepub fn load<F, Load: Ordering>(&self, f: Ghost<F>) -> isize
pub fn load<F, Load: Ordering>(&self, f: Ghost<F>) -> isize
Wrapper for std::sync::atomic::AtomicIsize::load.
requires
Load::ORDERING == Ordering::Acquire::ORDERING || Load::ORDERING == Ordering::Relaxed::ORDERING
requires
forall<c: &Committer<Self, $type, Load, Ordering::None>> !c.shot_store() ==> c.ward() == *self ==> f.precondition((c,))
ensures
exists<c: &Committer<Self, $type, Load, Ordering::None>> !c.shot_store() && c.ward() == *self && c.val_load() == result && f.postcondition_once((c,), ())
Sourcepub fn store<F, Store: Ordering>(&self, val: isize, f: Ghost<F>)
pub fn store<F, Store: Ordering>(&self, val: isize, f: Ghost<F>)
Wrapper for std::sync::atomic::AtomicIsize::store.
requires
Store::ORDERING == Ordering::Release::ORDERING || Store::ORDERING == Ordering::Relaxed::ORDERING
requires
forall<c: &mut Committer<Self, $type, Ordering::None, Store>> !c.shot_store() ==> c.ward() == *self ==> c.val_store() == val ==> f.precondition((c,)) && (f.postcondition_once((c,), ()) ==> (^c).shot_store())
ensures
exists<c: &mut Committer<Self, $type, Ordering::None, Store>> !c.shot_store() && c.ward() == *self && c.val_store() == val && f.postcondition_once((c,), ())
Source§impl AtomicIsize
impl AtomicIsize
Sourcepub fn fetch_add<F, Ord: Ordering>(&self, val: isize, f: Ghost<F>) -> isize
pub fn fetch_add<F, Ord: Ordering>(&self, val: isize, f: Ghost<F>) -> isize
Wrapper for std::sync::atomic::AtomicIsize::fetch_add.
requires
forall<c: &mut Committer<Self, $int_type, Ord::Load, Ord::Store>> !c.shot_store() ==> c.ward() == *self ==> c.val_store() == val + c.val_load() ==> f.precondition((c,)) && (f.postcondition_once((c,), ()) ==> (^c).shot_store())
ensures
exists<c: &mut Committer<Self, $int_type, Ord::Load, Ord::Store>> !c.shot_store() && c.ward() == *self && c.val_store() == val + c.val_load() && c.val_load() == result && f.postcondition_once((c,), ())
Trait Implementations§
Source§impl HasTimestamp for AtomicIsize
impl HasTimestamp for AtomicIsize
Source§fn get_timestamp(self, _: SyncView) -> Timestamp
fn get_timestamp(self, _: SyncView) -> Timestamp
logic(opaque) ⚠
Source§fn get_timestamp_monotonic(self, x: SyncView, y: SyncView)
fn get_timestamp_monotonic(self, x: SyncView, y: SyncView)
logic(law) ⚠
requires
x <= yensures
self.get_timestamp(x) <= self.get_timestamp(y)