|
237 | 237 | //! pointer. For code which *does* cast a usize to a pointer, the scope of the change depends
|
238 | 238 | //! on exactly what you're doing.
|
239 | 239 | //!
|
240 |
| -//! In general you just need to make sure that if you want to convert a usize address to a |
| 240 | +//! In general, you just need to make sure that if you want to convert a usize address to a |
241 | 241 | //! pointer and then use that pointer to read/write memory, you need to keep around a pointer
|
242 | 242 | //! that has sufficient provenance to perform that read/write itself. In this way all of your
|
243 | 243 | //! casts from an address to a pointer are essentially just applying offsets/indexing.
|
|
309 | 309 | //! i.e. the usual "ZSTs are fake, do what you want" rules apply *but* this only applies
|
310 | 310 | //! for actual forgery (integers cast to pointers). If you borrow some struct's field
|
311 | 311 | //! that *happens* to be zero-sized, the resulting pointer will have provenance tied to
|
312 |
| -//! that allocation and it will still get invalidated if the allocation gets deallocated. |
| 312 | +//! that allocation, and it will still get invalidated if the allocation gets deallocated. |
313 | 313 | //! In the future we may introduce an API to make such a forged allocation explicit.
|
314 | 314 | //!
|
315 | 315 | //! * [`wrapping_offset`][] a pointer outside its provenance. This includes pointers
|
@@ -698,7 +698,7 @@ pub const fn dangling_mut<T>() -> *mut T {
|
698 | 698 | ///
|
699 | 699 | /// If there is no 'exposed' provenance that justifies the way this pointer will be used,
|
700 | 700 | /// the program has undefined behavior. In particular, the aliasing rules still apply: pointers
|
701 |
| -/// and references that have been invalidated due to aliasing accesses cannot be used any more, |
| 701 | +/// and references that have been invalidated due to aliasing accesses cannot be used anymore, |
702 | 702 | /// even if they have been exposed!
|
703 | 703 | ///
|
704 | 704 | /// Note that there is no algorithm that decides which provenance will be used. You can think of this
|
@@ -728,8 +728,8 @@ pub const fn dangling_mut<T>() -> *mut T {
|
728 | 728 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
|
729 | 729 | #[allow(fuzzy_provenance_casts)] // this *is* the explicit provenance API one should use instead
|
730 | 730 | pub fn with_exposed_provenance<T>(addr: usize) -> *const T
|
731 |
| -where |
732 |
| - T: Sized, |
| 731 | + where |
| 732 | + T: Sized, |
733 | 733 | {
|
734 | 734 | // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
|
735 | 735 | addr as *const T
|
@@ -768,8 +768,8 @@ where
|
768 | 768 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
|
769 | 769 | #[allow(fuzzy_provenance_casts)] // this *is* the explicit provenance API one should use instead
|
770 | 770 | pub fn with_exposed_provenance_mut<T>(addr: usize) -> *mut T
|
771 |
| -where |
772 |
| - T: Sized, |
| 771 | + where |
| 772 | + T: Sized, |
773 | 773 | {
|
774 | 774 | // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
|
775 | 775 | addr as *mut T
|
@@ -1067,7 +1067,7 @@ pub const unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
|
1067 | 1067 | // because we don't want to pessimize things like slices of SIMD vectors.)
|
1068 | 1068 | if mem::align_of::<T>() <= mem::size_of::<usize>()
|
1069 | 1069 | && (!mem::size_of::<T>().is_power_of_two()
|
1070 |
| - || mem::size_of::<T>() > mem::size_of::<usize>() * 2) |
| 1070 | + || mem::size_of::<T>() > mem::size_of::<usize>() * 2) |
1071 | 1071 | {
|
1072 | 1072 | attempt_swap_as_chunks!(usize);
|
1073 | 1073 | attempt_swap_as_chunks!(u8);
|
@@ -1097,7 +1097,7 @@ const unsafe fn swap_nonoverlapping_simple_untyped<T>(x: *mut T, y: *mut T, coun
|
1097 | 1097 | // If we end up here, it's because we're using a simple type -- like
|
1098 | 1098 | // a small power-of-two-sized thing -- or a special type with particularly
|
1099 | 1099 | // large alignment, particularly SIMD types.
|
1100 |
| - // Thus we're fine just reading-and-writing it, as either it's small |
| 1100 | + // Thus, we're fine just reading-and-writing it, as either it's small |
1101 | 1101 | // and that works well anyway or it's special and the type's author
|
1102 | 1102 | // presumably wanted things to be done in the larger chunk.
|
1103 | 1103 |
|
@@ -1290,7 +1290,7 @@ pub const unsafe fn read<T>(src: *const T) -> T {
|
1290 | 1290 | // provides enough information to know that this is a typed operation.
|
1291 | 1291 |
|
1292 | 1292 | // However, as of March 2023 the compiler was not capable of taking advantage
|
1293 |
| - // of that information. Thus the implementation here switched to an intrinsic, |
| 1293 | + // of that information. Thus, the implementation here switched to an intrinsic, |
1294 | 1294 | // which lowers to `_0 = *src` in MIR, to address a few issues:
|
1295 | 1295 | //
|
1296 | 1296 | // - Using `MaybeUninit::assume_init` after a `copy_nonoverlapping` was not
|
@@ -1405,9 +1405,9 @@ pub const unsafe fn read<T>(src: *const T) -> T {
|
1405 | 1405 | #[stable(feature = "ptr_unaligned", since = "1.17.0")]
|
1406 | 1406 | #[rustc_const_stable(feature = "const_ptr_read", since = "1.71.0")]
|
1407 | 1407 | #[rustc_allow_const_fn_unstable(
|
1408 |
| - const_mut_refs, |
1409 |
| - const_maybe_uninit_as_mut_ptr, |
1410 |
| - const_intrinsic_copy |
| 1408 | +const_mut_refs, |
| 1409 | +const_maybe_uninit_as_mut_ptr, |
| 1410 | +const_intrinsic_copy |
1411 | 1411 | )]
|
1412 | 1412 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
|
1413 | 1413 | #[rustc_diagnostic_item = "ptr_read_unaligned"]
|
@@ -1570,7 +1570,7 @@ pub const unsafe fn write<T>(dst: *mut T, src: T) {
|
1570 | 1570 | /// As a result, using `&packed.unaligned as *const FieldType` causes immediate
|
1571 | 1571 | /// *undefined behavior* in your program.
|
1572 | 1572 | ///
|
1573 |
| -/// Instead you must use the [`ptr::addr_of_mut!`](addr_of_mut) |
| 1573 | +/// Instead, you must use the [`ptr::addr_of_mut!`](addr_of_mut) |
1574 | 1574 | /// macro to create the pointer. You may use that returned pointer together with
|
1575 | 1575 | /// this function.
|
1576 | 1576 | ///
|
@@ -2205,7 +2205,7 @@ impl<F: FnPtr> fmt::Debug for F {
|
2205 | 2205 | #[rustc_macro_transparency = "semitransparent"]
|
2206 | 2206 | #[allow_internal_unstable(raw_ref_op)]
|
2207 | 2207 | pub macro addr_of($place:expr) {
|
2208 |
| - &raw const $place |
| 2208 | +&raw const $place |
2209 | 2209 | }
|
2210 | 2210 |
|
2211 | 2211 | /// Create a `mut` raw pointer to a place, without creating an intermediate reference.
|
@@ -2293,5 +2293,5 @@ pub macro addr_of($place:expr) {
|
2293 | 2293 | #[rustc_macro_transparency = "semitransparent"]
|
2294 | 2294 | #[allow_internal_unstable(raw_ref_op)]
|
2295 | 2295 | pub macro addr_of_mut($place:expr) {
|
2296 |
| - &raw mut $place |
| 2296 | +&raw mut $place |
2297 | 2297 | }
|
0 commit comments