blob: 87b5f204816cbdd696b8a1594758e7b1b66c70ae (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
|
//@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
use std::ptr::null_mut;
use std::sync::atomic::{AtomicPtr, Ordering};
use std::thread::{sleep, spawn};
use std::time::Duration;
#[derive(Copy, Clone)]
struct EvilSend<T>(pub T);
unsafe impl<T> Send for EvilSend<T> {}
unsafe impl<T> Sync for EvilSend<T> {}
pub fn main() {
// Shared atomic pointer
let pointer = AtomicPtr::new(null_mut::<usize>());
let ptr = EvilSend(&pointer as *const AtomicPtr<usize>);
// Note: this is scheduler-dependent
// the operations need to occur in
// order, otherwise the allocation is
// not visible to the other-thread to
// detect the race:
// 1. stack-allocate
// 2. read
// 3. stack-deallocate
unsafe {
let j1 = spawn(move || {
let ptr = ptr; // avoid field capturing
let pointer = &*ptr.0;
{
let mut stack_var = 0usize;
pointer.store(&mut stack_var as *mut _, Ordering::Release);
sleep(Duration::from_millis(200));
// Now `stack_var` gets deallocated.
} //~ ERROR: Data race detected between (1) Read on thread `<unnamed>` and (2) Deallocate on thread `<unnamed>`
});
let j2 = spawn(move || {
let ptr = ptr; // avoid field capturing
let pointer = &*ptr.0;
*pointer.load(Ordering::Acquire)
});
j1.join().unwrap();
j2.join().unwrap();
}
}
|