1use crate::{
2 ghost::{
3 FnGhost,
4 perm::{Perm, PermTarget},
5 },
6 logic::FMap,
7 prelude::*,
8 std::sync::{
9 committer::Committer,
10 view::{HasTimestamp, SyncView, Timestamp},
11 },
12};
13
14#[allow(non_snake_case)]
16pub mod Ordering {
17 use std::sync::atomic;
18
19 pub trait Ordering {
20 const ORDERING: atomic::Ordering;
21
22 type Load: Ordering;
23 type Store: Ordering;
24 }
25
26 pub struct None;
27
28 macro_rules! impl_ordering {
29 ( $order:ident, load = $load:ident, store = $store:ident ) => {
30 pub struct $order;
31
32 impl Ordering for $order {
33 const ORDERING: atomic::Ordering = atomic::Ordering::$order;
34
35 type Load = $load;
36 type Store = $store;
37 }
38 };
39 }
40
41 impl_ordering!(Relaxed, load = Relaxed, store = Relaxed);
42 impl_ordering!(Acquire, load = Acquire, store = Relaxed);
43 impl_ordering!(Release, load = Relaxed, store = Release);
44 impl_ordering!(AcqRel, load = Acquire, store = Release);
45 impl_ordering!(SeqCst, load = SeqCst, store = SeqCst);
46}
47
48use Ordering::Ordering as _;
49
50macro_rules! impl_atomic {
51 ($( ($type:ty, $atomic_type:ident $(< $T:ident >)?) ),+) => { $(
52
53 #[doc = concat!("Creusot wrapper around [`std::sync::atomic::", stringify!($atomic_type), "`].")]
54 pub struct $atomic_type $(< $T >)?(::std::sync::atomic::$atomic_type $(< $T >)?);
55
56 impl $(< $T >)? PermTarget for $atomic_type $(< $T >)? {
57 type Value<'a> = FMap<Timestamp, ($type, SyncView)> where Self: 'a;
58 type PermPayload = ();
59 }
60
61 impl $(< $T >)? HasTimestamp for $atomic_type $(< $T >)? {
62 #[logic(opaque)]
63 fn get_timestamp(self, _: SyncView) -> Timestamp {
64 dead
65 }
66
67 #[logic(law)]
68 #[requires(x <= y)]
69 #[ensures(self.get_timestamp(x) <= self.get_timestamp(y))]
70 #[trusted]
71 fn get_timestamp_monotonic(self, x: SyncView, y: SyncView) {}
72 }
73
74 impl $(< $T >)? $atomic_type $(< $T >)? {
75 #[ensures(result.1.val() == FMap::singleton(result.0.get_timestamp(^sync_view), (val, **sync_view)))]
76 #[ensures(*result.1.ward() == result.0)]
77 #[inline(always)]
78 #[trusted]
79 #[check(terminates)]
80 #[allow(unused_variables)]
81 pub fn new(val: $type, sync_view: Ghost<&mut SyncView>) -> (Self, Ghost<Perm<$atomic_type $(< $T >)?>>) {
82 (Self(std::sync::atomic::$atomic_type::new(val)), Ghost::conjure())
83 }
84
85 #[doc = concat!("Wrapper for [`std::sync::atomic::", stringify!($atomic_type), "::into_inner`].")]
86 #[requires(self == *own.ward())]
87 #[ensures(match own.val().get(*result.1) { Some((v, _)) => result.0 == v, None => false })]
88 #[ensures(self.get_timestamp(^sync_view) == *result.1)]
89 #[ensures(forall<t> match own.val().get(t) {
90 Some((_, view)) => t <= *result.1 && view <= ^sync_view,
91 None => true
92 })]
93 #[inline(always)]
94 #[trusted]
95 #[allow(unused_variables)]
96 pub fn into_inner(self, own: Ghost<Perm<$atomic_type $(< $T >)?>>, sync_view: Ghost<&mut SyncView>) -> ($type, Ghost<Timestamp>) {
97 (self.0.into_inner(), Ghost::conjure())
98 }
99
100
101 #[doc = "Clear the old unusable history, thanks to the full ownership of the atomic."]
102 #[requires(*self == *own.ward())]
103 #[ensures(match (*own).val().get(*result) {
104 Some((v, _)) => (^own).val() == FMap::singleton(*result, (v, **sync_view)),
105 None => false
106 })]
107 #[ensures(self.get_timestamp(^sync_view) == *result)]
108 #[ensures(forall<t> match own.val().get(t) {
109 Some((_, view)) => t <= *result && view <= ^sync_view ,
110 None => true
111 })]
112 #[ensures(*self == ^self)]
113 #[inline(always)]
114 #[trusted]
115 #[check(terminates)]
116 #[allow(unused_variables)]
117 pub fn refresh(&mut self, own: Ghost<&mut Perm<$atomic_type $(< $T >)?>>, sync_view: Ghost<&mut SyncView>) -> Ghost<Timestamp> {
118 Ghost::conjure()
119 }
120
121 #[doc = concat!("Wrapper for [`std::sync::atomic::", stringify!($atomic_type), "::compare_exchange`].")]
122 #[doc = ""]
123 #[doc = "The load and the store are always sequentially consistent."]
124 #[requires(Success::ORDERING != Ordering::SeqCst::ORDERING)]
125 #[requires(Failure::ORDERING == Ordering::Acquire::ORDERING || Failure::ORDERING == Ordering::Relaxed::ORDERING)]
126 #[requires(forall<c: &mut Committer<Self, $type, _, _>>
127 !c.shot_store() ==> c.ward() == *self ==>
128 c.val_load().deep_model() == current.deep_model() ==>
129 c.val_store() == new ==>
130 f.precondition((Ok(c),)) && (f.postcondition_once((Ok(c),), ()) ==> (^c).shot_store())
131 )]
132 #[requires(forall<c: &Committer<Self, $type, _, _>>
133 !c.shot_store() ==> c.ward() == *self ==>
134 c.val_load().deep_model() != current.deep_model() ==>
136 f.precondition((Err(c),))
137 )]
138 #[ensures(
139 match result {
140 Ok(result) => {
141 exists<c: &mut Committer<Self, $type, _, _>>
142 !c.shot_store() && c.ward() == *self &&
143 c.val_load().deep_model() == current.deep_model() &&
144 c.val_store() == new &&
145 result == c.val_load() &&
146 f.postcondition_once((Ok(c),), ())
147 },
148 Err(result) => {
149 exists<c: &Committer<Self, $type, _, _>>
150 !c.shot_store() && c.ward() == *self &&
151 c.val_load().deep_model() != current.deep_model() &&
153 result == c.val_load() &&
154 f.postcondition_once((Err(c),), ())
155 }
156 }
157 )]
158 #[inline(always)]
159 #[trusted]
160 #[allow(unused_variables)]
161 pub fn compare_exchange<F, Success: Ordering::Ordering, Failure: Ordering::Ordering>(&self, current: $type, new: $type, f: Ghost<F>) -> Result<$type, $type>
162 where
163 F: FnGhost + FnOnce(Result<
164 &mut Committer<Self, $type, Success::Load, Success::Store>,
165 &Committer<Self, $type, Failure, Ordering::None>
166 >,
167 )
168 {
169 self.0.compare_exchange(current, new, Success::ORDERING, Failure::ORDERING)
170 }
171
172 #[doc = concat!("Wrapper for [`std::sync::atomic::", stringify!($atomic_type), "::compare_exchange_weak`].")]
173 #[doc = ""]
174 #[doc = "The load and the store are always sequentially consistent."]
175 #[requires(Success::ORDERING != Ordering::SeqCst::ORDERING)] #[requires(Failure::ORDERING == Ordering::Acquire::ORDERING || Failure::ORDERING == Ordering::Relaxed::ORDERING)]
177 #[requires(forall<c: &mut Committer<Self, $type, _, _>> !c.shot_store() ==> c.ward() == *self ==>
179 c.val_load().deep_model() == current.deep_model() ==>
180 c.val_store() == new ==>
181 f.precondition((Ok(c),)) && (f.postcondition_once((Ok(c),), ()) ==> (^c).shot_store())
182 )]
183 #[requires(forall<c: &Committer<Self, $type, _, _>>
184 !c.shot_store() ==> c.ward() == *self ==>
185 f.precondition((Err(c),))
186 )]
187 #[ensures(
188 match result {
189 Ok(result) => {
190 exists<c: &mut Committer<Self, $type, _, _>>
191 !c.shot_store() && c.ward() == *self &&
192 c.val_load().deep_model() == current.deep_model() &&
193 c.val_store() == new &&
194 result == c.val_load() &&
195 f.postcondition_once((Ok(c),), ())
196 },
197 Err(result) => {
198 exists<c: &Committer<Self, $type, _, _>>
199 !c.shot_store() && c.ward() == *self &&
200 result == c.val_load() &&
201 f.postcondition_once((Err(c),), ())
202 }
203 }
204 )]
205 #[inline(always)]
206 #[trusted]
207 #[allow(unused_variables)]
208 pub fn compare_exchange_weak<F, Success: Ordering::Ordering, Failure: Ordering::Ordering>(&self, current: $type, new: $type, f: Ghost<F>) -> Result<$type, $type>
209 where
210 F: FnGhost + FnOnce(Result<
211 &mut Committer<Self, $type, Success::Load, Success::Store>,
212 &Committer<Self, $type, Failure, Ordering::None>
213 >,
214 )
215 {
216 self.0.compare_exchange_weak(current, new, Success::ORDERING, Failure::ORDERING)
217 }
218
219 #[doc = concat!("Wrapper for [`std::sync::atomic::", stringify!($atomic_type), "::load`].")]
220 #[requires(Load::ORDERING == Ordering::Acquire::ORDERING || Load::ORDERING == Ordering::Relaxed::ORDERING)]
221 #[requires(forall<c: &Committer<Self, $type, Load, Ordering::None>>
222 !c.shot_store() ==> c.ward() == *self ==> f.precondition((c,))
223 )]
224 #[ensures(exists<c: &Committer<Self, $type, Load, Ordering::None>>
225 !c.shot_store() && c.ward() == *self && c.val_load() == result && f.postcondition_once((c,), ())
226 )]
227 #[inline(always)]
228 #[trusted]
229 #[allow(unused_variables)]
230 pub fn load<F, Load: Ordering::Ordering>(&self, f: Ghost<F>) -> $type
231 where
232 F: FnGhost + FnOnce(&Committer<Self, $type, Load, Ordering::None>),
233 {
234 self.0.load(if cfg!(feature = "sc-drf") {
236 Ordering::SeqCst::ORDERING
237 } else {
238 Load::ORDERING
239 })
240 }
241
242 #[doc = concat!("Wrapper for [`std::sync::atomic::", stringify!($atomic_type), "::store`].")]
243 #[requires(Store::ORDERING == Ordering::Release::ORDERING || Store::ORDERING == Ordering::Relaxed::ORDERING)]
244 #[requires(forall<c: &mut Committer<Self, $type, Ordering::None, Store>>
245 !c.shot_store() ==> c.ward() == *self ==> c.val_store() == val ==>
246 f.precondition((c,)) && (f.postcondition_once((c,), ()) ==> (^c).shot_store())
247 )]
248 #[ensures(exists<c: &mut Committer<Self, $type, Ordering::None, Store>>
249 !c.shot_store() && c.ward() == *self && c.val_store() == val &&
250 f.postcondition_once((c,), ())
251 )]
252 #[inline(always)]
253 #[trusted]
254 #[allow(unused_variables)]
255 pub fn store<F, Store: Ordering::Ordering>(&self, val: $type, f: Ghost<F>)
256 where
257 F: FnGhost + FnOnce(&mut Committer<Self, $type, Ordering::None, Store>),
258 {
259 self.0.store(
260 val,
261 if cfg!(feature = "sc-drf") {
262 Ordering::SeqCst::ORDERING
263 } else {
264 Store::ORDERING
265 },
266 )
267 }
268 }
269
270 )* };
271}
272
273macro_rules! impl_atomic_int {
274 ($( ($int_type:ty, $atomic_type:ident) ),+) => { $(
275
276 impl_atomic!(($int_type, $atomic_type));
277
278 impl $atomic_type {
279 #[doc = concat!("Wrapper for [`std::sync::atomic::", stringify!($atomic_type), "::fetch_add`].")]
280 #[requires(forall<c: &mut Committer<Self, $int_type, Ord::Load, Ord::Store>>
281 !c.shot_store() ==> c.ward() == *self ==> c.val_store() == val + c.val_load() ==>
282 f.precondition((c,)) && (f.postcondition_once((c,), ()) ==> (^c).shot_store())
283 )]
284 #[ensures(exists<c: &mut Committer<Self, $int_type, Ord::Load, Ord::Store>>
285 !c.shot_store() && c.ward() == *self && c.val_store() == val + c.val_load() &&
286 c.val_load() == result && f.postcondition_once((c,), ())
287 )]
288 #[trusted]
289 #[allow(unused_variables)]
290 pub fn fetch_add<F, Ord: Ordering::Ordering>(&self, val: $int_type, f: Ghost<F>) -> $int_type
291 where
292 F: FnGhost + FnOnce(&mut Committer<Self, $int_type, Ord::Load, Ord::Store>),
293 {
294 self.0.fetch_add(val, Ordering::SeqCst::ORDERING)
295 }
296 }
297
298 )* };
299}
300
301impl_atomic! {
302 (bool, AtomicBool),
303 (*mut T, AtomicPtr<T>)
304}
305
306impl_atomic_int! {
307 (i8, AtomicI8),
308 (u8, AtomicU8),
309 (i16, AtomicI16),
310 (u16, AtomicU16),
311 (i32, AtomicI32),
312 (u32, AtomicU32),
313 (i64, AtomicI64),
314 (u64, AtomicU64),
315 (isize, AtomicIsize),
316 (usize, AtomicUsize)
317}