summaryrefslogtreecommitdiff
path: root/library/core/src/ptr/mod.rs
diff options
context:
space:
mode:
Diffstat (limited to 'library/core/src/ptr/mod.rs')
-rw-r--r--library/core/src/ptr/mod.rs37
1 files changed, 22 insertions, 15 deletions
diff --git a/library/core/src/ptr/mod.rs b/library/core/src/ptr/mod.rs
index 13e546497f2..ecbf4e66fa4 100644
--- a/library/core/src/ptr/mod.rs
+++ b/library/core/src/ptr/mod.rs
@@ -1133,7 +1133,8 @@ pub const unsafe fn replace<T>(dst: *mut T, mut src: T) -> T {
/// [valid]: self#safety
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
-#[rustc_const_unstable(feature = "const_ptr_read", issue = "80377")]
+#[rustc_const_stable(feature = "const_ptr_read", since = "CURRENT_RUSTC_VERSION")]
+#[rustc_allow_const_fn_unstable(const_mut_refs, const_maybe_uninit_as_mut_ptr)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn read<T>(src: *const T) -> T {
// It would be semantically correct to implement this via `copy_nonoverlapping`
@@ -1249,7 +1250,8 @@ pub const unsafe fn read<T>(src: *const T) -> T {
/// ```
#[inline]
#[stable(feature = "ptr_unaligned", since = "1.17.0")]
-#[rustc_const_unstable(feature = "const_ptr_read", issue = "80377")]
+#[rustc_const_stable(feature = "const_ptr_read", since = "CURRENT_RUSTC_VERSION")]
+#[rustc_allow_const_fn_unstable(const_mut_refs, const_maybe_uninit_as_mut_ptr)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn read_unaligned<T>(src: *const T) -> T {
let mut tmp = MaybeUninit::<T>::uninit();
@@ -1349,13 +1351,13 @@ pub const unsafe fn read_unaligned<T>(src: *const T) -> T {
#[rustc_const_unstable(feature = "const_ptr_write", issue = "86302")]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn write<T>(dst: *mut T, src: T) {
- // We are calling the intrinsics directly to avoid function calls in the generated code
- // as `intrinsics::copy_nonoverlapping` is a wrapper function.
- extern "rust-intrinsic" {
- #[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.63.0")]
- #[rustc_nounwind]
- fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: usize);
- }
+ // Semantically, it would be fine for this to be implemented as a
+ // `copy_nonoverlapping` and appropriate drop suppression of `src`.
+
+ // However, implementing via that currently produces more MIR than is ideal.
+ // Using an intrinsic keeps it down to just the simple `*dst = move src` in
+ // MIR (11 statements shorter, at the time of writing), and also allows
+ // `src` to stay an SSA value in codegen_ssa, rather than a memory one.
// SAFETY: the caller must guarantee that `dst` is valid for writes.
// `dst` cannot overlap `src` because the caller has mutable access
@@ -1365,8 +1367,7 @@ pub const unsafe fn write<T>(dst: *mut T, src: T) {
"ptr::write requires that the pointer argument is aligned and non-null",
[T](dst: *mut T) => is_aligned_and_not_null(dst)
);
- copy_nonoverlapping(&src as *const T, dst, 1);
- intrinsics::forget(src);
+ intrinsics::write_via_move(dst, src)
}
}
@@ -1633,8 +1634,8 @@ pub(crate) const unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usiz
// FIXME(#75598): Direct use of these intrinsics improves codegen significantly at opt-level <=
// 1, where the method versions of these operations are not inlined.
use intrinsics::{
- cttz_nonzero, exact_div, mul_with_overflow, unchecked_rem, unchecked_shl, unchecked_shr,
- unchecked_sub, wrapping_add, wrapping_mul, wrapping_sub,
+ assume, cttz_nonzero, exact_div, mul_with_overflow, unchecked_rem, unchecked_shl,
+ unchecked_shr, unchecked_sub, wrapping_add, wrapping_mul, wrapping_sub,
};
/// Calculate multiplicative modular inverse of `x` modulo `m`.
@@ -1725,12 +1726,18 @@ pub(crate) const unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usiz
// in a branch-free way and then bitwise-OR it with whatever result the `-p mod a`
// computation produces.
+ let aligned_address = wrapping_add(addr, a_minus_one) & wrapping_sub(0, a);
+ let byte_offset = wrapping_sub(aligned_address, addr);
+ // FIXME: Remove the assume after <https://github.com/llvm/llvm-project/issues/62502>
+ // SAFETY: Masking by `-a` can only affect the low bits, and thus cannot have reduced
+ // the value by more than `a-1`, so even though the intermediate values might have
+ // wrapped, the byte_offset is always in `[0, a)`.
+ unsafe { assume(byte_offset < a) };
+
// SAFETY: `stride == 0` case has been handled by the special case above.
let addr_mod_stride = unsafe { unchecked_rem(addr, stride) };
return if addr_mod_stride == 0 {
- let aligned_address = wrapping_add(addr, a_minus_one) & wrapping_sub(0, a);
- let byte_offset = wrapping_sub(aligned_address, addr);
// SAFETY: `stride` is non-zero. This is guaranteed to divide exactly as well, because
// addr has been verified to be aligned to the original type’s alignment requirements.
unsafe { exact_div(byte_offset, stride) }