summaryrefslogtreecommitdiff
path: root/src/tools/miri/tests/fail/data_race/alloc_read_race.rs
blob: 42077dfae2d38dd0f57d7fcb9764de7df3106b5b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
//@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
#![feature(new_uninit)]

use std::mem::MaybeUninit;
use std::ptr::null_mut;
use std::sync::atomic::{AtomicPtr, Ordering};
use std::thread::spawn;

#[derive(Copy, Clone)]
struct EvilSend<T>(pub T);

unsafe impl<T> Send for EvilSend<T> {}
unsafe impl<T> Sync for EvilSend<T> {}

pub fn main() {
    // Shared atomic pointer
    let pointer = AtomicPtr::new(null_mut::<MaybeUninit<usize>>());
    let ptr = EvilSend(&pointer as *const AtomicPtr<MaybeUninit<usize>>);

    // Note: this is scheduler-dependent
    // the operations need to occur in
    // order, otherwise the allocation is
    // not visible to the other-thread to
    // detect the race:
    //  1. alloc
    //  2. write
    unsafe {
        let j1 = spawn(move || {
            let ptr = ptr; // avoid field capturing
            // Concurrent allocate the memory.
            // Uses relaxed semantics to not generate
            // a release sequence.
            let pointer = &*ptr.0;
            pointer.store(Box::into_raw(Box::new_uninit()), Ordering::Relaxed);
        });

        let j2 = spawn(move || {
            let ptr = ptr; // avoid field capturing
            let pointer = &*ptr.0;

            // Note: could also error due to reading uninitialized memory, but the data-race detector triggers first.
            *pointer.load(Ordering::Relaxed) //~ ERROR: Data race detected between (1) Allocate on thread `<unnamed>` and (2) Read on thread `<unnamed>`
        });

        j1.join().unwrap();
        j2.join().unwrap();

        // Clean up memory, will never be executed
        drop(Box::from_raw(pointer.load(Ordering::Relaxed)));
    }
}