zerocopy/
lib.rs

1// Copyright 2018 The Fuchsia Authors
2//
3// Licensed under the 2-Clause BSD License <LICENSE-BSD or
4// https://opensource.org/license/bsd-2-clause>, Apache License, Version 2.0
5// <LICENSE-APACHE or https://www.apache.org/licenses/LICENSE-2.0>, or the MIT
6// license <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your option.
7// This file may not be copied, modified, or distributed except according to
8// those terms.
9
10// After updating the following doc comment, make sure to run the following
11// command to update `README.md` based on its contents:
12//
13//   cargo -q run --manifest-path tools/Cargo.toml -p generate-readme > README.md
14
15//! ***<span style="font-size: 140%">Fast, safe, <span
16//! style="color:red;">compile error</span>. Pick two.</span>***
17//!
18//! Zerocopy makes zero-cost memory manipulation effortless. We write `unsafe`
19//! so you don't have to.
20//!
21//! *For an overview of what's changed from zerocopy 0.7, check out our [release
22//! notes][release-notes], which include a step-by-step upgrading guide.*
23//!
24//! *Have questions? Need more out of zerocopy? Submit a [customer request
25//! issue][customer-request-issue] or ask the maintainers on
26//! [GitHub][github-q-a] or [Discord][discord]!*
27//!
28//! [customer-request-issue]: https://github.com/google/zerocopy/issues/new/choose
29//! [release-notes]: https://github.com/google/zerocopy/discussions/1680
30//! [github-q-a]: https://github.com/google/zerocopy/discussions/categories/q-a
31//! [discord]: https://discord.gg/MAvWH2R6zk
32//!
33//! # Overview
34//!
35//! ##### Conversion Traits
36//!
37//! Zerocopy provides four derivable traits for zero-cost conversions:
38//! - [`TryFromBytes`] indicates that a type may safely be converted from
39//!   certain byte sequences (conditional on runtime checks)
40//! - [`FromZeros`] indicates that a sequence of zero bytes represents a valid
41//!   instance of a type
42//! - [`FromBytes`] indicates that a type may safely be converted from an
43//!   arbitrary byte sequence
44//! - [`IntoBytes`] indicates that a type may safely be converted *to* a byte
45//!   sequence
46//!
47//! These traits support sized types, slices, and [slice DSTs][slice-dsts].
48//!
49//! [slice-dsts]: KnownLayout#dynamically-sized-types
50//!
51//! ##### Marker Traits
52//!
53//! Zerocopy provides three derivable marker traits that do not provide any
54//! functionality themselves, but are required to call certain methods provided
55//! by the conversion traits:
56//! - [`KnownLayout`] indicates that zerocopy can reason about certain layout
57//!   qualities of a type
58//! - [`Immutable`] indicates that a type is free from interior mutability,
59//!   except by ownership or an exclusive (`&mut`) borrow
60//! - [`Unaligned`] indicates that a type's alignment requirement is 1
61//!
62//! You should generally derive these marker traits whenever possible.
63//!
64//! ##### Conversion Macros
65//!
66//! Zerocopy provides six macros for safe casting between types:
67//!
68//! - ([`try_`][try_transmute])[`transmute`] (conditionally) converts a value of
69//!   one type to a value of another type of the same size
70//! - ([`try_`][try_transmute_mut])[`transmute_mut`] (conditionally) converts a
71//!   mutable reference of one type to a mutable reference of another type of
72//!   the same size
73//! - ([`try_`][try_transmute_ref])[`transmute_ref`] (conditionally) converts a
74//!   mutable or immutable reference of one type to an immutable reference of
75//!   another type of the same size
76//!
77//! These macros perform *compile-time* size and alignment checks, meaning that
78//! unconditional casts have zero cost at runtime. Conditional casts do not need
79//! to validate size or alignment runtime, but do need to validate contents.
80//!
81//! These macros cannot be used in generic contexts. For generic conversions,
82//! use the methods defined by the [conversion traits](#conversion-traits).
83//!
84//! ##### Byteorder-Aware Numerics
85//!
86//! Zerocopy provides byte-order aware integer types that support these
87//! conversions; see the [`byteorder`] module. These types are especially useful
88//! for network parsing.
89//!
90//! # Cargo Features
91//!
92//! - **`alloc`**
93//!   By default, `zerocopy` is `no_std`. When the `alloc` feature is enabled,
94//!   the `alloc` crate is added as a dependency, and some allocation-related
95//!   functionality is added.
96//!
97//! - **`std`**
98//!   By default, `zerocopy` is `no_std`. When the `std` feature is enabled, the
99//!   `std` crate is added as a dependency (ie, `no_std` is disabled), and
100//!   support for some `std` types is added. `std` implies `alloc`.
101//!
102//! - **`derive`**
103//!   Provides derives for the core marker traits via the `zerocopy-derive`
104//!   crate. These derives are re-exported from `zerocopy`, so it is not
105//!   necessary to depend on `zerocopy-derive` directly.
106//!
107//!   However, you may experience better compile times if you instead directly
108//!   depend on both `zerocopy` and `zerocopy-derive` in your `Cargo.toml`,
109//!   since doing so will allow Rust to compile these crates in parallel. To do
110//!   so, do *not* enable the `derive` feature, and list both dependencies in
111//!   your `Cargo.toml` with the same leading non-zero version number; e.g:
112//!
113//!   ```toml
114//!   [dependencies]
115//!   zerocopy = "0.X"
116//!   zerocopy-derive = "0.X"
117//!   ```
118//!
119//!   To avoid the risk of [duplicate import errors][duplicate-import-errors] if
120//!   one of your dependencies enables zerocopy's `derive` feature, import
121//!   derives as `use zerocopy_derive::*` rather than by name (e.g., `use
122//!   zerocopy_derive::FromBytes`).
123//!
124//! - **`simd`**
125//!   When the `simd` feature is enabled, `FromZeros`, `FromBytes`, and
126//!   `IntoBytes` impls are emitted for all stable SIMD types which exist on the
127//!   target platform. Note that the layout of SIMD types is not yet stabilized,
128//!   so these impls may be removed in the future if layout changes make them
129//!   invalid. For more information, see the Unsafe Code Guidelines Reference
130//!   page on the [layout of packed SIMD vectors][simd-layout].
131//!
132//! - **`simd-nightly`**
133//!   Enables the `simd` feature and adds support for SIMD types which are only
134//!   available on nightly. Since these types are unstable, support for any type
135//!   may be removed at any point in the future.
136//!
137//! - **`float-nightly`**
138//!   Adds support for the unstable `f16` and `f128` types. These types are
139//!   not yet fully implemented and may not be supported on all platforms.
140//!
141//! [duplicate-import-errors]: https://github.com/google/zerocopy/issues/1587
142//! [simd-layout]: https://rust-lang.github.io/unsafe-code-guidelines/layout/packed-simd-vectors.html
143//!
144//! # Security Ethos
145//!
146//! Zerocopy is expressly designed for use in security-critical contexts. We
147//! strive to ensure that that zerocopy code is sound under Rust's current
148//! memory model, and *any future memory model*. We ensure this by:
149//! - **...not 'guessing' about Rust's semantics.**
150//!   We annotate `unsafe` code with a precise rationale for its soundness that
151//!   cites a relevant section of Rust's official documentation. When Rust's
152//!   documented semantics are unclear, we work with the Rust Operational
153//!   Semantics Team to clarify Rust's documentation.
154//! - **...rigorously testing our implementation.**
155//!   We run tests using [Miri], ensuring that zerocopy is sound across a wide
156//!   array of supported target platforms of varying endianness and pointer
157//!   width, and across both current and experimental memory models of Rust.
158//! - **...formally proving the correctness of our implementation.**
159//!   We apply formal verification tools like [Kani][kani] to prove zerocopy's
160//!   correctness.
161//!
162//! For more information, see our full [soundness policy].
163//!
164//! [Miri]: https://github.com/rust-lang/miri
165//! [Kani]: https://github.com/model-checking/kani
166//! [soundness policy]: https://github.com/google/zerocopy/blob/main/POLICIES.md#soundness
167//!
168//! # Relationship to Project Safe Transmute
169//!
170//! [Project Safe Transmute] is an official initiative of the Rust Project to
171//! develop language-level support for safer transmutation. The Project consults
172//! with crates like zerocopy to identify aspects of safer transmutation that
173//! would benefit from compiler support, and has developed an [experimental,
174//! compiler-supported analysis][mcp-transmutability] which determines whether,
175//! for a given type, any value of that type may be soundly transmuted into
176//! another type. Once this functionality is sufficiently mature, zerocopy
177//! intends to replace its internal transmutability analysis (implemented by our
178//! custom derives) with the compiler-supported one. This change will likely be
179//! an implementation detail that is invisible to zerocopy's users.
180//!
181//! Project Safe Transmute will not replace the need for most of zerocopy's
182//! higher-level abstractions. The experimental compiler analysis is a tool for
183//! checking the soundness of `unsafe` code, not a tool to avoid writing
184//! `unsafe` code altogether. For the foreseeable future, crates like zerocopy
185//! will still be required in order to provide higher-level abstractions on top
186//! of the building block provided by Project Safe Transmute.
187//!
188//! [Project Safe Transmute]: https://rust-lang.github.io/rfcs/2835-project-safe-transmute.html
189//! [mcp-transmutability]: https://github.com/rust-lang/compiler-team/issues/411
190//!
191//! # MSRV
192//!
193//! See our [MSRV policy].
194//!
195//! [MSRV policy]: https://github.com/google/zerocopy/blob/main/POLICIES.md#msrv
196//!
197//! # Changelog
198//!
199//! Zerocopy uses [GitHub Releases].
200//!
201//! [GitHub Releases]: https://github.com/google/zerocopy/releases
202//!
203//! # Thanks
204//!
205//! Zerocopy is maintained by engineers at Google and Amazon with help from
206//! [many wonderful contributors][contributors]. Thank you to everyone who has
207//! lent a hand in making Rust a little more secure!
208//!
209//! [contributors]: https://github.com/google/zerocopy/graphs/contributors
210
211// Sometimes we want to use lints which were added after our MSRV.
212// `unknown_lints` is `warn` by default and we deny warnings in CI, so without
213// this attribute, any unknown lint would cause a CI failure when testing with
214// our MSRV.
215#![allow(unknown_lints, non_local_definitions, unreachable_patterns)]
216#![deny(renamed_and_removed_lints)]
217#![deny(
218    anonymous_parameters,
219    deprecated_in_future,
220    late_bound_lifetime_arguments,
221    missing_copy_implementations,
222    missing_debug_implementations,
223    missing_docs,
224    path_statements,
225    patterns_in_fns_without_body,
226    rust_2018_idioms,
227    trivial_numeric_casts,
228    unreachable_pub,
229    unsafe_op_in_unsafe_fn,
230    unused_extern_crates,
231    // We intentionally choose not to deny `unused_qualifications`. When items
232    // are added to the prelude (e.g., `core::mem::size_of`), this has the
233    // consequence of making some uses trigger this lint on the latest toolchain
234    // (e.g., `mem::size_of`), but fixing it (e.g. by replacing with `size_of`)
235    // does not work on older toolchains.
236    //
237    // We tested a more complicated fix in #1413, but ultimately decided that,
238    // since this lint is just a minor style lint, the complexity isn't worth it
239    // - it's fine to occasionally have unused qualifications slip through,
240    // especially since these do not affect our user-facing API in any way.
241    variant_size_differences
242)]
243#![cfg_attr(
244    __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS,
245    deny(fuzzy_provenance_casts, lossy_provenance_casts)
246)]
247#![deny(
248    clippy::all,
249    clippy::alloc_instead_of_core,
250    clippy::arithmetic_side_effects,
251    clippy::as_underscore,
252    clippy::assertions_on_result_states,
253    clippy::as_conversions,
254    clippy::correctness,
255    clippy::dbg_macro,
256    clippy::decimal_literal_representation,
257    clippy::double_must_use,
258    clippy::get_unwrap,
259    clippy::indexing_slicing,
260    clippy::missing_inline_in_public_items,
261    clippy::missing_safety_doc,
262    clippy::must_use_candidate,
263    clippy::must_use_unit,
264    clippy::obfuscated_if_else,
265    clippy::perf,
266    clippy::print_stdout,
267    clippy::return_self_not_must_use,
268    clippy::std_instead_of_core,
269    clippy::style,
270    clippy::suspicious,
271    clippy::todo,
272    clippy::undocumented_unsafe_blocks,
273    clippy::unimplemented,
274    clippy::unnested_or_patterns,
275    clippy::unwrap_used,
276    clippy::use_debug
277)]
278// `clippy::incompatible_msrv` (implied by `clippy::suspicious`): This sometimes
279// has false positives, and we test on our MSRV in CI, so it doesn't help us
280// anyway.
281#![allow(clippy::needless_lifetimes, clippy::type_complexity, clippy::incompatible_msrv)]
282#![deny(
283    rustdoc::bare_urls,
284    rustdoc::broken_intra_doc_links,
285    rustdoc::invalid_codeblock_attributes,
286    rustdoc::invalid_html_tags,
287    rustdoc::invalid_rust_codeblocks,
288    rustdoc::missing_crate_level_docs,
289    rustdoc::private_intra_doc_links
290)]
291// In test code, it makes sense to weight more heavily towards concise, readable
292// code over correct or debuggable code.
293#![cfg_attr(any(test, kani), allow(
294    // In tests, you get line numbers and have access to source code, so panic
295    // messages are less important. You also often unwrap a lot, which would
296    // make expect'ing instead very verbose.
297    clippy::unwrap_used,
298    // In tests, there's no harm to "panic risks" - the worst that can happen is
299    // that your test will fail, and you'll fix it. By contrast, panic risks in
300    // production code introduce the possibly of code panicking unexpectedly "in
301    // the field".
302    clippy::arithmetic_side_effects,
303    clippy::indexing_slicing,
304))]
305#![cfg_attr(not(any(test, kani, feature = "std")), no_std)]
306// NOTE: This attribute should have the effect of causing CI to fail if
307// `stdarch_x86_avx512` - which is currently stable in 1.89.0-nightly as of this
308// writing on 2025-06-10 - has its stabilization rolled back.
309//
310// FIXME(#2583): Remove once `stdarch_x86_avx512` is stabilized in 1.89.0, and
311// 1.89.0 has been released as stable.
312#![cfg_attr(
313    all(feature = "simd-nightly", any(target_arch = "x86", target_arch = "x86_64")),
314    expect(stable_features)
315)]
316// FIXME(#2583): Remove once `stdarch_x86_avx512` is stabilized in 1.89.0, and
317// 1.89.0 has been released as stable. Replace with version detection for 1.89.0
318// (see #2574 for a draft implementation).
319#![cfg_attr(
320    all(feature = "simd-nightly", any(target_arch = "x86", target_arch = "x86_64")),
321    feature(stdarch_x86_avx512)
322)]
323#![cfg_attr(
324    all(feature = "simd-nightly", target_arch = "arm"),
325    feature(stdarch_arm_dsp, stdarch_arm_neon_intrinsics)
326)]
327#![cfg_attr(
328    all(feature = "simd-nightly", any(target_arch = "powerpc", target_arch = "powerpc64")),
329    feature(stdarch_powerpc)
330)]
331#![cfg_attr(feature = "float-nightly", feature(f16, f128))]
332#![cfg_attr(doc_cfg, feature(doc_cfg))]
333#![cfg_attr(
334    __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS,
335    feature(layout_for_ptr, coverage_attribute)
336)]
337
338// This is a hack to allow zerocopy-derive derives to work in this crate. They
339// assume that zerocopy is linked as an extern crate, so they access items from
340// it as `zerocopy::Xxx`. This makes that still work.
341#[cfg(any(feature = "derive", test))]
342extern crate self as zerocopy;
343
344#[doc(hidden)]
345#[macro_use]
346pub mod util;
347
348pub mod byte_slice;
349pub mod byteorder;
350mod deprecated;
351
352#[doc(hidden)]
353pub mod doctests;
354
355// This module is `pub` so that zerocopy's error types and error handling
356// documentation is grouped together in a cohesive module. In practice, we
357// expect most users to use the re-export of `error`'s items to avoid identifier
358// stuttering.
359pub mod error;
360mod impls;
361#[doc(hidden)]
362pub mod layout;
363mod macros;
364#[doc(hidden)]
365pub mod pointer;
366mod r#ref;
367mod split_at;
368// FIXME(#252): If we make this pub, come up with a better name.
369mod wrappers;
370
371use core::{
372    cell::{Cell, UnsafeCell},
373    cmp::Ordering,
374    fmt::{self, Debug, Display, Formatter},
375    hash::Hasher,
376    marker::PhantomData,
377    mem::{self, ManuallyDrop, MaybeUninit as CoreMaybeUninit},
378    num::{
379        NonZeroI128, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI8, NonZeroIsize, NonZeroU128,
380        NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8, NonZeroUsize, Wrapping,
381    },
382    ops::{Deref, DerefMut},
383    ptr::{self, NonNull},
384    slice,
385};
386#[cfg(feature = "std")]
387use std::io;
388
389use crate::pointer::invariant::{self, BecauseExclusive};
390pub use crate::{
391    byte_slice::*,
392    byteorder::*,
393    error::*,
394    r#ref::*,
395    split_at::{Split, SplitAt},
396    wrappers::*,
397};
398
399#[cfg(any(feature = "alloc", test, kani))]
400extern crate alloc;
401#[cfg(any(feature = "alloc", test))]
402use alloc::{boxed::Box, vec::Vec};
403#[cfg(any(feature = "alloc", test))]
404use core::alloc::Layout;
405
406use util::MetadataOf;
407
408// Used by `KnownLayout`.
409#[doc(hidden)]
410pub use crate::layout::*;
411// Used by `TryFromBytes::is_bit_valid`.
412#[doc(hidden)]
413pub use crate::pointer::{invariant::BecauseImmutable, Maybe, Ptr};
414// For each trait polyfill, as soon as the corresponding feature is stable, the
415// polyfill import will be unused because method/function resolution will prefer
416// the inherent method/function over a trait method/function. Thus, we suppress
417// the `unused_imports` warning.
418//
419// See the documentation on `util::polyfills` for more information.
420#[allow(unused_imports)]
421use crate::util::polyfills::{self, NonNullExt as _, NumExt as _};
422
423#[rustversion::nightly]
424#[cfg(all(test, not(__ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS)))]
425const _: () = {
426    #[deprecated = "some tests may be skipped due to missing RUSTFLAGS=\"--cfg __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS\""]
427    const _WARNING: () = ();
428    #[warn(deprecated)]
429    _WARNING
430};
431
432// These exist so that code which was written against the old names will get
433// less confusing error messages when they upgrade to a more recent version of
434// zerocopy. On our MSRV toolchain, the error messages read, for example:
435//
436//   error[E0603]: trait `FromZeroes` is private
437//       --> examples/deprecated.rs:1:15
438//        |
439//   1    | use zerocopy::FromZeroes;
440//        |               ^^^^^^^^^^ private trait
441//        |
442//   note: the trait `FromZeroes` is defined here
443//       --> /Users/josh/workspace/zerocopy/src/lib.rs:1845:5
444//        |
445//   1845 | use FromZeros as FromZeroes;
446//        |     ^^^^^^^^^^^^^^^^^^^^^^^
447//
448// The "note" provides enough context to make it easy to figure out how to fix
449// the error.
450/// Implements [`KnownLayout`].
451///
452/// This derive analyzes various aspects of a type's layout that are needed for
453/// some of zerocopy's APIs. It can be applied to structs, enums, and unions;
454/// e.g.:
455///
456/// ```
457/// # use zerocopy_derive::KnownLayout;
458/// #[derive(KnownLayout)]
459/// struct MyStruct {
460/// # /*
461///     ...
462/// # */
463/// }
464///
465/// #[derive(KnownLayout)]
466/// enum MyEnum {
467/// #   V00,
468/// # /*
469///     ...
470/// # */
471/// }
472///
473/// #[derive(KnownLayout)]
474/// union MyUnion {
475/// #   variant: u8,
476/// # /*
477///     ...
478/// # */
479/// }
480/// ```
481///
482/// # Limitations
483///
484/// This derive cannot currently be applied to unsized structs without an
485/// explicit `repr` attribute.
486///
487/// Some invocations of this derive run afoul of a [known bug] in Rust's type
488/// privacy checker. For example, this code:
489///
490/// ```compile_fail,E0446
491/// use zerocopy::*;
492/// # use zerocopy_derive::*;
493///
494/// #[derive(KnownLayout)]
495/// #[repr(C)]
496/// pub struct PublicType {
497///     leading: Foo,
498///     trailing: Bar,
499/// }
500///
501/// #[derive(KnownLayout)]
502/// struct Foo;
503///
504/// #[derive(KnownLayout)]
505/// struct Bar;
506/// ```
507///
508/// ...results in a compilation error:
509///
510/// ```text
511/// error[E0446]: private type `Bar` in public interface
512///  --> examples/bug.rs:3:10
513///    |
514/// 3  | #[derive(KnownLayout)]
515///    |          ^^^^^^^^^^^ can't leak private type
516/// ...
517/// 14 | struct Bar;
518///    | ---------- `Bar` declared as private
519///    |
520///    = note: this error originates in the derive macro `KnownLayout` (in Nightly builds, run with -Z macro-backtrace for more info)
521/// ```
522///
523/// This issue arises when `#[derive(KnownLayout)]` is applied to `repr(C)`
524/// structs whose trailing field type is less public than the enclosing struct.
525///
526/// To work around this, mark the trailing field type `pub` and annotate it with
527/// `#[doc(hidden)]`; e.g.:
528///
529/// ```no_run
530/// use zerocopy::*;
531/// # use zerocopy_derive::*;
532///
533/// #[derive(KnownLayout)]
534/// #[repr(C)]
535/// pub struct PublicType {
536///     leading: Foo,
537///     trailing: Bar,
538/// }
539///
540/// #[derive(KnownLayout)]
541/// struct Foo;
542///
543/// #[doc(hidden)]
544/// #[derive(KnownLayout)]
545/// pub struct Bar; // <- `Bar` is now also `pub`
546/// ```
547///
548/// [known bug]: https://github.com/rust-lang/rust/issues/45713
549#[cfg(any(feature = "derive", test))]
550#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
551pub use zerocopy_derive::KnownLayout;
552#[allow(unused)]
553use {FromZeros as FromZeroes, IntoBytes as AsBytes, Ref as LayoutVerified};
554
555/// Indicates that zerocopy can reason about certain aspects of a type's layout.
556///
557/// This trait is required by many of zerocopy's APIs. It supports sized types,
558/// slices, and [slice DSTs](#dynamically-sized-types).
559///
560/// # Implementation
561///
562/// **Do not implement this trait yourself!** Instead, use
563/// [`#[derive(KnownLayout)]`][derive]; e.g.:
564///
565/// ```
566/// # use zerocopy_derive::KnownLayout;
567/// #[derive(KnownLayout)]
568/// struct MyStruct {
569/// # /*
570///     ...
571/// # */
572/// }
573///
574/// #[derive(KnownLayout)]
575/// enum MyEnum {
576/// # /*
577///     ...
578/// # */
579/// }
580///
581/// #[derive(KnownLayout)]
582/// union MyUnion {
583/// #   variant: u8,
584/// # /*
585///     ...
586/// # */
587/// }
588/// ```
589///
590/// This derive performs a sophisticated analysis to deduce the layout
591/// characteristics of types. You **must** implement this trait via the derive.
592///
593/// # Dynamically-sized types
594///
595/// `KnownLayout` supports slice-based dynamically sized types ("slice DSTs").
596///
597/// A slice DST is a type whose trailing field is either a slice or another
598/// slice DST, rather than a type with fixed size. For example:
599///
600/// ```
601/// #[repr(C)]
602/// struct PacketHeader {
603/// # /*
604///     ...
605/// # */
606/// }
607///
608/// #[repr(C)]
609/// struct Packet {
610///     header: PacketHeader,
611///     body: [u8],
612/// }
613/// ```
614///
615/// It can be useful to think of slice DSTs as a generalization of slices - in
616/// other words, a normal slice is just the special case of a slice DST with
617/// zero leading fields. In particular:
618/// - Like slices, slice DSTs can have different lengths at runtime
619/// - Like slices, slice DSTs cannot be passed by-value, but only by reference
620///   or via other indirection such as `Box`
621/// - Like slices, a reference (or `Box`, or other pointer type) to a slice DST
622///   encodes the number of elements in the trailing slice field
623///
624/// ## Slice DST layout
625///
626/// Just like other composite Rust types, the layout of a slice DST is not
627/// well-defined unless it is specified using an explicit `#[repr(...)]`
628/// attribute such as `#[repr(C)]`. [Other representations are
629/// supported][reprs], but in this section, we'll use `#[repr(C)]` as our
630/// example.
631///
632/// A `#[repr(C)]` slice DST is laid out [just like sized `#[repr(C)]`
633/// types][repr-c-structs], but the presenence of a variable-length field
634/// introduces the possibility of *dynamic padding*. In particular, it may be
635/// necessary to add trailing padding *after* the trailing slice field in order
636/// to satisfy the outer type's alignment, and the amount of padding required
637/// may be a function of the length of the trailing slice field. This is just a
638/// natural consequence of the normal `#[repr(C)]` rules applied to slice DSTs,
639/// but it can result in surprising behavior. For example, consider the
640/// following type:
641///
642/// ```
643/// #[repr(C)]
644/// struct Foo {
645///     a: u32,
646///     b: u8,
647///     z: [u16],
648/// }
649/// ```
650///
651/// Assuming that `u32` has alignment 4 (this is not true on all platforms),
652/// then `Foo` has alignment 4 as well. Here is the smallest possible value for
653/// `Foo`:
654///
655/// ```text
656/// byte offset | 01234567
657///       field | aaaab---
658///                    ><
659/// ```
660///
661/// In this value, `z` has length 0. Abiding by `#[repr(C)]`, the lowest offset
662/// that we can place `z` at is 5, but since `z` has alignment 2, we need to
663/// round up to offset 6. This means that there is one byte of padding between
664/// `b` and `z`, then 0 bytes of `z` itself (denoted `><` in this diagram), and
665/// then two bytes of padding after `z` in order to satisfy the overall
666/// alignment of `Foo`. The size of this instance is 8 bytes.
667///
668/// What about if `z` has length 1?
669///
670/// ```text
671/// byte offset | 01234567
672///       field | aaaab-zz
673/// ```
674///
675/// In this instance, `z` has length 1, and thus takes up 2 bytes. That means
676/// that we no longer need padding after `z` in order to satisfy `Foo`'s
677/// alignment. We've now seen two different values of `Foo` with two different
678/// lengths of `z`, but they both have the same size - 8 bytes.
679///
680/// What about if `z` has length 2?
681///
682/// ```text
683/// byte offset | 012345678901
684///       field | aaaab-zzzz--
685/// ```
686///
687/// Now `z` has length 2, and thus takes up 4 bytes. This brings our un-padded
688/// size to 10, and so we now need another 2 bytes of padding after `z` to
689/// satisfy `Foo`'s alignment.
690///
691/// Again, all of this is just a logical consequence of the `#[repr(C)]` rules
692/// applied to slice DSTs, but it can be surprising that the amount of trailing
693/// padding becomes a function of the trailing slice field's length, and thus
694/// can only be computed at runtime.
695///
696/// [reprs]: https://doc.rust-lang.org/reference/type-layout.html#representations
697/// [repr-c-structs]: https://doc.rust-lang.org/reference/type-layout.html#reprc-structs
698///
699/// ## What is a valid size?
700///
701/// There are two places in zerocopy's API that we refer to "a valid size" of a
702/// type. In normal casts or conversions, where the source is a byte slice, we
703/// need to know whether the source byte slice is a valid size of the
704/// destination type. In prefix or suffix casts, we need to know whether *there
705/// exists* a valid size of the destination type which fits in the source byte
706/// slice and, if so, what the largest such size is.
707///
708/// As outlined above, a slice DST's size is defined by the number of elements
709/// in its trailing slice field. However, there is not necessarily a 1-to-1
710/// mapping between trailing slice field length and overall size. As we saw in
711/// the previous section with the type `Foo`, instances with both 0 and 1
712/// elements in the trailing `z` field result in a `Foo` whose size is 8 bytes.
713///
714/// When we say "x is a valid size of `T`", we mean one of two things:
715/// - If `T: Sized`, then we mean that `x == size_of::<T>()`
716/// - If `T` is a slice DST, then we mean that there exists a `len` such that the instance of
717///   `T` with `len` trailing slice elements has size `x`
718///
719/// When we say "largest possible size of `T` that fits in a byte slice", we
720/// mean one of two things:
721/// - If `T: Sized`, then we mean `size_of::<T>()` if the byte slice is at least
722///   `size_of::<T>()` bytes long
723/// - If `T` is a slice DST, then we mean to consider all values, `len`, such
724///   that the instance of `T` with `len` trailing slice elements fits in the
725///   byte slice, and to choose the largest such `len`, if any
726///
727///
728/// # Safety
729///
730/// This trait does not convey any safety guarantees to code outside this crate.
731///
732/// You must not rely on the `#[doc(hidden)]` internals of `KnownLayout`. Future
733/// releases of zerocopy may make backwards-breaking changes to these items,
734/// including changes that only affect soundness, which may cause code which
735/// uses those items to silently become unsound.
736///
737#[cfg_attr(feature = "derive", doc = "[derive]: zerocopy_derive::KnownLayout")]
738#[cfg_attr(
739    not(feature = "derive"),
740    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.KnownLayout.html"),
741)]
742#[cfg_attr(
743    zerocopy_diagnostic_on_unimplemented_1_78_0,
744    diagnostic::on_unimplemented(note = "Consider adding `#[derive(KnownLayout)]` to `{Self}`")
745)]
746pub unsafe trait KnownLayout {
747    // The `Self: Sized` bound makes it so that `KnownLayout` can still be
748    // object safe. It's not currently object safe thanks to `const LAYOUT`, and
749    // it likely won't be in the future, but there's no reason not to be
750    // forwards-compatible with object safety.
751    #[doc(hidden)]
752    fn only_derive_is_allowed_to_implement_this_trait()
753    where
754        Self: Sized;
755
756    /// The type of metadata stored in a pointer to `Self`.
757    ///
758    /// This is `()` for sized types and `usize` for slice DSTs.
759    type PointerMetadata: PointerMetadata;
760
761    /// A maybe-uninitialized analog of `Self`
762    ///
763    /// # Safety
764    ///
765    /// `Self::LAYOUT` and `Self::MaybeUninit::LAYOUT` are identical.
766    /// `Self::MaybeUninit` admits uninitialized bytes in all positions.
767    #[doc(hidden)]
768    type MaybeUninit: ?Sized + KnownLayout<PointerMetadata = Self::PointerMetadata>;
769
770    /// The layout of `Self`.
771    ///
772    /// # Safety
773    ///
774    /// Callers may assume that `LAYOUT` accurately reflects the layout of
775    /// `Self`. In particular:
776    /// - `LAYOUT.align` is equal to `Self`'s alignment
777    /// - If `Self: Sized`, then `LAYOUT.size_info == SizeInfo::Sized { size }`
778    ///   where `size == size_of::<Self>()`
779    /// - If `Self` is a slice DST, then `LAYOUT.size_info ==
780    ///   SizeInfo::SliceDst(slice_layout)` where:
781    ///   - The size, `size`, of an instance of `Self` with `elems` trailing
782    ///     slice elements is equal to `slice_layout.offset +
783    ///     slice_layout.elem_size * elems` rounded up to the nearest multiple
784    ///     of `LAYOUT.align`
785    ///   - For such an instance, any bytes in the range `[slice_layout.offset +
786    ///     slice_layout.elem_size * elems, size)` are padding and must not be
787    ///     assumed to be initialized
788    #[doc(hidden)]
789    const LAYOUT: DstLayout;
790
791    /// SAFETY: The returned pointer has the same address and provenance as
792    /// `bytes`. If `Self` is a DST, the returned pointer's referent has `elems`
793    /// elements in its trailing slice.
794    #[doc(hidden)]
795    fn raw_from_ptr_len(bytes: NonNull<u8>, meta: Self::PointerMetadata) -> NonNull<Self>;
796
797    /// Extracts the metadata from a pointer to `Self`.
798    ///
799    /// # Safety
800    ///
801    /// `pointer_to_metadata` always returns the correct metadata stored in
802    /// `ptr`.
803    #[doc(hidden)]
804    fn pointer_to_metadata(ptr: *mut Self) -> Self::PointerMetadata;
805
806    /// Computes the length of the byte range addressed by `ptr`.
807    ///
808    /// Returns `None` if the resulting length would not fit in an `usize`.
809    ///
810    /// # Safety
811    ///
812    /// Callers may assume that `size_of_val_raw` always returns the correct
813    /// size.
814    ///
815    /// Callers may assume that, if `ptr` addresses a byte range whose length
816    /// fits in an `usize`, this will return `Some`.
817    #[doc(hidden)]
818    #[must_use]
819    #[inline(always)]
820    fn size_of_val_raw(ptr: NonNull<Self>) -> Option<usize> {
821        let meta = Self::pointer_to_metadata(ptr.as_ptr());
822        // SAFETY: `size_for_metadata` promises to only return `None` if the
823        // resulting size would not fit in a `usize`.
824        meta.size_for_metadata(Self::LAYOUT)
825    }
826
827    #[doc(hidden)]
828    #[must_use]
829    #[inline(always)]
830    fn raw_dangling() -> NonNull<Self> {
831        let meta = Self::PointerMetadata::from_elem_count(0);
832        Self::raw_from_ptr_len(NonNull::dangling(), meta)
833    }
834}
835
836/// Efficiently produces the [`TrailingSliceLayout`] of `T`.
837#[inline(always)]
838pub(crate) fn trailing_slice_layout<T>() -> TrailingSliceLayout
839where
840    T: ?Sized + KnownLayout<PointerMetadata = usize>,
841{
842    trait LayoutFacts {
843        const SIZE_INFO: TrailingSliceLayout;
844    }
845
846    impl<T: ?Sized> LayoutFacts for T
847    where
848        T: KnownLayout<PointerMetadata = usize>,
849    {
850        const SIZE_INFO: TrailingSliceLayout = match T::LAYOUT.size_info {
851            crate::SizeInfo::Sized { .. } => const_panic!("unreachable"),
852            crate::SizeInfo::SliceDst(info) => info,
853        };
854    }
855
856    T::SIZE_INFO
857}
858
859/// The metadata associated with a [`KnownLayout`] type.
860#[doc(hidden)]
861pub trait PointerMetadata: Copy + Eq + Debug {
862    /// Constructs a `Self` from an element count.
863    ///
864    /// If `Self = ()`, this returns `()`. If `Self = usize`, this returns
865    /// `elems`. No other types are currently supported.
866    fn from_elem_count(elems: usize) -> Self;
867
868    /// Computes the size of the object with the given layout and pointer
869    /// metadata.
870    ///
871    /// # Panics
872    ///
873    /// If `Self = ()`, `layout` must describe a sized type. If `Self = usize`,
874    /// `layout` must describe a slice DST. Otherwise, `size_for_metadata` may
875    /// panic.
876    ///
877    /// # Safety
878    ///
879    /// `size_for_metadata` promises to only return `None` if the resulting size
880    /// would not fit in a `usize`.
881    fn size_for_metadata(self, layout: DstLayout) -> Option<usize>;
882}
883
884impl PointerMetadata for () {
885    #[inline]
886    #[allow(clippy::unused_unit)]
887    fn from_elem_count(_elems: usize) -> () {}
888
889    #[inline]
890    fn size_for_metadata(self, layout: DstLayout) -> Option<usize> {
891        match layout.size_info {
892            SizeInfo::Sized { size } => Some(size),
893            // NOTE: This branch is unreachable, but we return `None` rather
894            // than `unreachable!()` to avoid generating panic paths.
895            SizeInfo::SliceDst(_) => None,
896        }
897    }
898}
899
900impl PointerMetadata for usize {
901    #[inline]
902    fn from_elem_count(elems: usize) -> usize {
903        elems
904    }
905
906    #[inline]
907    fn size_for_metadata(self, layout: DstLayout) -> Option<usize> {
908        match layout.size_info {
909            SizeInfo::SliceDst(TrailingSliceLayout { offset, elem_size }) => {
910                let slice_len = elem_size.checked_mul(self)?;
911                let without_padding = offset.checked_add(slice_len)?;
912                without_padding.checked_add(util::padding_needed_for(without_padding, layout.align))
913            }
914            // NOTE: This branch is unreachable, but we return `None` rather
915            // than `unreachable!()` to avoid generating panic paths.
916            SizeInfo::Sized { .. } => None,
917        }
918    }
919}
920
921// SAFETY: Delegates safety to `DstLayout::for_slice`.
922unsafe impl<T> KnownLayout for [T] {
923    #[allow(clippy::missing_inline_in_public_items, dead_code)]
924    #[cfg_attr(
925        all(coverage_nightly, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS),
926        coverage(off)
927    )]
928    fn only_derive_is_allowed_to_implement_this_trait()
929    where
930        Self: Sized,
931    {
932    }
933
934    type PointerMetadata = usize;
935
936    // SAFETY: `CoreMaybeUninit<T>::LAYOUT` and `T::LAYOUT` are identical
937    // because `CoreMaybeUninit<T>` has the same size and alignment as `T` [1].
938    // Consequently, `[CoreMaybeUninit<T>]::LAYOUT` and `[T]::LAYOUT` are
939    // identical, because they both lack a fixed-sized prefix and because they
940    // inherit the alignments of their inner element type (which are identical)
941    // [2][3].
942    //
943    // `[CoreMaybeUninit<T>]` admits uninitialized bytes at all positions
944    // because `CoreMaybeUninit<T>` admits uninitialized bytes at all positions
945    // and because the inner elements of `[CoreMaybeUninit<T>]` are laid out
946    // back-to-back [2][3].
947    //
948    // [1] Per https://doc.rust-lang.org/1.81.0/std/mem/union.MaybeUninit.html#layout-1:
949    //
950    //   `MaybeUninit<T>` is guaranteed to have the same size, alignment, and ABI as
951    //   `T`
952    //
953    // [2] Per https://doc.rust-lang.org/1.82.0/reference/type-layout.html#slice-layout:
954    //
955    //   Slices have the same layout as the section of the array they slice.
956    //
957    // [3] Per https://doc.rust-lang.org/1.82.0/reference/type-layout.html#array-layout:
958    //
959    //   An array of `[T; N]` has a size of `size_of::<T>() * N` and the same
960    //   alignment of `T`. Arrays are laid out so that the zero-based `nth`
961    //   element of the array is offset from the start of the array by `n *
962    //   size_of::<T>()` bytes.
963    type MaybeUninit = [CoreMaybeUninit<T>];
964
965    const LAYOUT: DstLayout = DstLayout::for_slice::<T>();
966
967    // SAFETY: `.cast` preserves address and provenance. The returned pointer
968    // refers to an object with `elems` elements by construction.
969    #[inline(always)]
970    fn raw_from_ptr_len(data: NonNull<u8>, elems: usize) -> NonNull<Self> {
971        // FIXME(#67): Remove this allow. See NonNullExt for more details.
972        #[allow(unstable_name_collisions)]
973        NonNull::slice_from_raw_parts(data.cast::<T>(), elems)
974    }
975
976    #[inline(always)]
977    fn pointer_to_metadata(ptr: *mut [T]) -> usize {
978        #[allow(clippy::as_conversions)]
979        let slc = ptr as *const [()];
980
981        // SAFETY:
982        // - `()` has alignment 1, so `slc` is trivially aligned.
983        // - `slc` was derived from a non-null pointer.
984        // - The size is 0 regardless of the length, so it is sound to
985        //   materialize a reference regardless of location.
986        // - By invariant, `self.ptr` has valid provenance.
987        let slc = unsafe { &*slc };
988
989        // This is correct because the preceding `as` cast preserves the number
990        // of slice elements. [1]
991        //
992        // [1] Per https://doc.rust-lang.org/reference/expressions/operator-expr.html#pointer-to-pointer-cast:
993        //
994        //   For slice types like `[T]` and `[U]`, the raw pointer types `*const
995        //   [T]`, `*mut [T]`, `*const [U]`, and `*mut [U]` encode the number of
996        //   elements in this slice. Casts between these raw pointer types
997        //   preserve the number of elements. ... The same holds for `str` and
998        //   any compound type whose unsized tail is a slice type, such as
999        //   struct `Foo(i32, [u8])` or `(u64, Foo)`.
1000        slc.len()
1001    }
1002}
1003
1004#[rustfmt::skip]
1005impl_known_layout!(
1006    (),
1007    u8, i8, u16, i16, u32, i32, u64, i64, u128, i128, usize, isize, f32, f64,
1008    bool, char,
1009    NonZeroU8, NonZeroI8, NonZeroU16, NonZeroI16, NonZeroU32, NonZeroI32,
1010    NonZeroU64, NonZeroI64, NonZeroU128, NonZeroI128, NonZeroUsize, NonZeroIsize
1011);
1012#[rustfmt::skip]
1013#[cfg(feature = "float-nightly")]
1014impl_known_layout!(
1015    #[cfg_attr(doc_cfg, doc(cfg(feature = "float-nightly")))]
1016    f16,
1017    #[cfg_attr(doc_cfg, doc(cfg(feature = "float-nightly")))]
1018    f128
1019);
1020#[rustfmt::skip]
1021impl_known_layout!(
1022    T         => Option<T>,
1023    T: ?Sized => PhantomData<T>,
1024    T         => Wrapping<T>,
1025    T         => CoreMaybeUninit<T>,
1026    T: ?Sized => *const T,
1027    T: ?Sized => *mut T,
1028    T: ?Sized => &'_ T,
1029    T: ?Sized => &'_ mut T,
1030);
1031impl_known_layout!(const N: usize, T => [T; N]);
1032
1033// SAFETY: `str` has the same representation as `[u8]`. `ManuallyDrop<T>` [1],
1034// `UnsafeCell<T>` [2], and `Cell<T>` [3] have the same representation as `T`.
1035//
1036// [1] Per https://doc.rust-lang.org/1.85.0/std/mem/struct.ManuallyDrop.html:
1037//
1038//   `ManuallyDrop<T>` is guaranteed to have the same layout and bit validity as
1039//   `T`
1040//
1041// [2] Per https://doc.rust-lang.org/1.85.0/core/cell/struct.UnsafeCell.html#memory-layout:
1042//
1043//   `UnsafeCell<T>` has the same in-memory representation as its inner type
1044//   `T`.
1045//
1046// [3] Per https://doc.rust-lang.org/1.85.0/core/cell/struct.Cell.html#memory-layout:
1047//
1048//   `Cell<T>` has the same in-memory representation as `T`.
1049const _: () = unsafe {
1050    unsafe_impl_known_layout!(
1051        #[repr([u8])]
1052        str
1053    );
1054    unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T)] ManuallyDrop<T>);
1055    unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T)] UnsafeCell<T>);
1056    unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T)] Cell<T>);
1057};
1058
1059// SAFETY:
1060// - By consequence of the invariant on `T::MaybeUninit` that `T::LAYOUT` and
1061//   `T::MaybeUninit::LAYOUT` are equal, `T` and `T::MaybeUninit` have the same:
1062//   - Fixed prefix size
1063//   - Alignment
1064//   - (For DSTs) trailing slice element size
1065// - By consequence of the above, referents `T::MaybeUninit` and `T` have the
1066//   require the same kind of pointer metadata, and thus it is valid to perform
1067//   an `as` cast from `*mut T` and `*mut T::MaybeUninit`, and this operation
1068//   preserves referent size (ie, `size_of_val_raw`).
1069const _: () = unsafe {
1070    unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T::MaybeUninit)] MaybeUninit<T>)
1071};
1072
1073/// Analyzes whether a type is [`FromZeros`].
1074///
1075/// This derive analyzes, at compile time, whether the annotated type satisfies
1076/// the [safety conditions] of `FromZeros` and implements `FromZeros` and its
1077/// supertraits if it is sound to do so. This derive can be applied to structs,
1078/// enums, and unions; e.g.:
1079///
1080/// ```
1081/// # use zerocopy_derive::{FromZeros, Immutable};
1082/// #[derive(FromZeros)]
1083/// struct MyStruct {
1084/// # /*
1085///     ...
1086/// # */
1087/// }
1088///
1089/// #[derive(FromZeros)]
1090/// #[repr(u8)]
1091/// enum MyEnum {
1092/// #   Variant0,
1093/// # /*
1094///     ...
1095/// # */
1096/// }
1097///
1098/// #[derive(FromZeros, Immutable)]
1099/// union MyUnion {
1100/// #   variant: u8,
1101/// # /*
1102///     ...
1103/// # */
1104/// }
1105/// ```
1106///
1107/// [safety conditions]: trait@FromZeros#safety
1108///
1109/// # Analysis
1110///
1111/// *This section describes, roughly, the analysis performed by this derive to
1112/// determine whether it is sound to implement `FromZeros` for a given type.
1113/// Unless you are modifying the implementation of this derive, or attempting to
1114/// manually implement `FromZeros` for a type yourself, you don't need to read
1115/// this section.*
1116///
1117/// If a type has the following properties, then this derive can implement
1118/// `FromZeros` for that type:
1119///
1120/// - If the type is a struct, all of its fields must be `FromZeros`.
1121/// - If the type is an enum:
1122///   - It must have a defined representation (`repr`s `C`, `u8`, `u16`, `u32`,
1123///     `u64`, `usize`, `i8`, `i16`, `i32`, `i64`, or `isize`).
1124///   - It must have a variant with a discriminant/tag of `0`, and its fields
1125///     must be `FromZeros`. See [the reference] for a description of
1126///     discriminant values are specified.
1127///   - The fields of that variant must be `FromZeros`.
1128///
1129/// This analysis is subject to change. Unsafe code may *only* rely on the
1130/// documented [safety conditions] of `FromZeros`, and must *not* rely on the
1131/// implementation details of this derive.
1132///
1133/// [the reference]: https://doc.rust-lang.org/reference/items/enumerations.html#custom-discriminant-values-for-fieldless-enumerations
1134///
1135/// ## Why isn't an explicit representation required for structs?
1136///
1137/// Neither this derive, nor the [safety conditions] of `FromZeros`, requires
1138/// that structs are marked with `#[repr(C)]`.
1139///
1140/// Per the [Rust reference](reference),
1141///
1142/// > The representation of a type can change the padding between fields, but
1143/// > does not change the layout of the fields themselves.
1144///
1145/// [reference]: https://doc.rust-lang.org/reference/type-layout.html#representations
1146///
1147/// Since the layout of structs only consists of padding bytes and field bytes,
1148/// a struct is soundly `FromZeros` if:
1149/// 1. its padding is soundly `FromZeros`, and
1150/// 2. its fields are soundly `FromZeros`.
1151///
1152/// The answer to the first question is always yes: padding bytes do not have
1153/// any validity constraints. A [discussion] of this question in the Unsafe Code
1154/// Guidelines Working Group concluded that it would be virtually unimaginable
1155/// for future versions of rustc to add validity constraints to padding bytes.
1156///
1157/// [discussion]: https://github.com/rust-lang/unsafe-code-guidelines/issues/174
1158///
1159/// Whether a struct is soundly `FromZeros` therefore solely depends on whether
1160/// its fields are `FromZeros`.
1161// FIXME(#146): Document why we don't require an enum to have an explicit `repr`
1162// attribute.
1163#[cfg(any(feature = "derive", test))]
1164#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
1165pub use zerocopy_derive::FromZeros;
1166/// Analyzes whether a type is [`Immutable`].
1167///
1168/// This derive analyzes, at compile time, whether the annotated type satisfies
1169/// the [safety conditions] of `Immutable` and implements `Immutable` if it is
1170/// sound to do so. This derive can be applied to structs, enums, and unions;
1171/// e.g.:
1172///
1173/// ```
1174/// # use zerocopy_derive::Immutable;
1175/// #[derive(Immutable)]
1176/// struct MyStruct {
1177/// # /*
1178///     ...
1179/// # */
1180/// }
1181///
1182/// #[derive(Immutable)]
1183/// enum MyEnum {
1184/// #   Variant0,
1185/// # /*
1186///     ...
1187/// # */
1188/// }
1189///
1190/// #[derive(Immutable)]
1191/// union MyUnion {
1192/// #   variant: u8,
1193/// # /*
1194///     ...
1195/// # */
1196/// }
1197/// ```
1198///
1199/// # Analysis
1200///
1201/// *This section describes, roughly, the analysis performed by this derive to
1202/// determine whether it is sound to implement `Immutable` for a given type.
1203/// Unless you are modifying the implementation of this derive, you don't need
1204/// to read this section.*
1205///
1206/// If a type has the following properties, then this derive can implement
1207/// `Immutable` for that type:
1208///
1209/// - All fields must be `Immutable`.
1210///
1211/// This analysis is subject to change. Unsafe code may *only* rely on the
1212/// documented [safety conditions] of `Immutable`, and must *not* rely on the
1213/// implementation details of this derive.
1214///
1215/// [safety conditions]: trait@Immutable#safety
1216#[cfg(any(feature = "derive", test))]
1217#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
1218pub use zerocopy_derive::Immutable;
1219
1220/// Types which are free from interior mutability.
1221///
1222/// `T: Immutable` indicates that `T` does not permit interior mutation, except
1223/// by ownership or an exclusive (`&mut`) borrow.
1224///
1225/// # Implementation
1226///
1227/// **Do not implement this trait yourself!** Instead, use
1228/// [`#[derive(Immutable)]`][derive] (requires the `derive` Cargo feature);
1229/// e.g.:
1230///
1231/// ```
1232/// # use zerocopy_derive::Immutable;
1233/// #[derive(Immutable)]
1234/// struct MyStruct {
1235/// # /*
1236///     ...
1237/// # */
1238/// }
1239///
1240/// #[derive(Immutable)]
1241/// enum MyEnum {
1242/// # /*
1243///     ...
1244/// # */
1245/// }
1246///
1247/// #[derive(Immutable)]
1248/// union MyUnion {
1249/// #   variant: u8,
1250/// # /*
1251///     ...
1252/// # */
1253/// }
1254/// ```
1255///
1256/// This derive performs a sophisticated, compile-time safety analysis to
1257/// determine whether a type is `Immutable`.
1258///
1259/// # Safety
1260///
1261/// Unsafe code outside of this crate must not make any assumptions about `T`
1262/// based on `T: Immutable`. We reserve the right to relax the requirements for
1263/// `Immutable` in the future, and if unsafe code outside of this crate makes
1264/// assumptions based on `T: Immutable`, future relaxations may cause that code
1265/// to become unsound.
1266///
1267// # Safety (Internal)
1268//
1269// If `T: Immutable`, unsafe code *inside of this crate* may assume that, given
1270// `t: &T`, `t` does not contain any [`UnsafeCell`]s at any byte location
1271// within the byte range addressed by `t`. This includes ranges of length 0
1272// (e.g., `UnsafeCell<()>` and `[UnsafeCell<u8>; 0]`). If a type implements
1273// `Immutable` which violates this assumptions, it may cause this crate to
1274// exhibit [undefined behavior].
1275//
1276// [`UnsafeCell`]: core::cell::UnsafeCell
1277// [undefined behavior]: https://raphlinus.github.io/programming/rust/2018/08/17/undefined-behavior.html
1278#[cfg_attr(
1279    feature = "derive",
1280    doc = "[derive]: zerocopy_derive::Immutable",
1281    doc = "[derive-analysis]: zerocopy_derive::Immutable#analysis"
1282)]
1283#[cfg_attr(
1284    not(feature = "derive"),
1285    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.Immutable.html"),
1286    doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.Immutable.html#analysis"),
1287)]
1288#[cfg_attr(
1289    zerocopy_diagnostic_on_unimplemented_1_78_0,
1290    diagnostic::on_unimplemented(note = "Consider adding `#[derive(Immutable)]` to `{Self}`")
1291)]
1292pub unsafe trait Immutable {
1293    // The `Self: Sized` bound makes it so that `Immutable` is still object
1294    // safe.
1295    #[doc(hidden)]
1296    fn only_derive_is_allowed_to_implement_this_trait()
1297    where
1298        Self: Sized;
1299}
1300
1301/// Implements [`TryFromBytes`].
1302///
1303/// This derive synthesizes the runtime checks required to check whether a
1304/// sequence of initialized bytes corresponds to a valid instance of a type.
1305/// This derive can be applied to structs, enums, and unions; e.g.:
1306///
1307/// ```
1308/// # use zerocopy_derive::{TryFromBytes, Immutable};
1309/// #[derive(TryFromBytes)]
1310/// struct MyStruct {
1311/// # /*
1312///     ...
1313/// # */
1314/// }
1315///
1316/// #[derive(TryFromBytes)]
1317/// #[repr(u8)]
1318/// enum MyEnum {
1319/// #   V00,
1320/// # /*
1321///     ...
1322/// # */
1323/// }
1324///
1325/// #[derive(TryFromBytes, Immutable)]
1326/// union MyUnion {
1327/// #   variant: u8,
1328/// # /*
1329///     ...
1330/// # */
1331/// }
1332/// ```
1333///
1334/// # Portability
1335///
1336/// To ensure consistent endianness for enums with multi-byte representations,
1337/// explicitly specify and convert each discriminant using `.to_le()` or
1338/// `.to_be()`; e.g.:
1339///
1340/// ```
1341/// # use zerocopy_derive::TryFromBytes;
1342/// // `DataStoreVersion` is encoded in little-endian.
1343/// #[derive(TryFromBytes)]
1344/// #[repr(u32)]
1345/// pub enum DataStoreVersion {
1346///     /// Version 1 of the data store.
1347///     V1 = 9u32.to_le(),
1348///
1349///     /// Version 2 of the data store.
1350///     V2 = 10u32.to_le(),
1351/// }
1352/// ```
1353///
1354/// [safety conditions]: trait@TryFromBytes#safety
1355#[cfg(any(feature = "derive", test))]
1356#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
1357pub use zerocopy_derive::TryFromBytes;
1358
1359/// Types for which some bit patterns are valid.
1360///
1361/// A memory region of the appropriate length which contains initialized bytes
1362/// can be viewed as a `TryFromBytes` type so long as the runtime value of those
1363/// bytes corresponds to a [*valid instance*] of that type. For example,
1364/// [`bool`] is `TryFromBytes`, so zerocopy can transmute a [`u8`] into a
1365/// [`bool`] so long as it first checks that the value of the [`u8`] is `0` or
1366/// `1`.
1367///
1368/// # Implementation
1369///
1370/// **Do not implement this trait yourself!** Instead, use
1371/// [`#[derive(TryFromBytes)]`][derive]; e.g.:
1372///
1373/// ```
1374/// # use zerocopy_derive::{TryFromBytes, Immutable};
1375/// #[derive(TryFromBytes)]
1376/// struct MyStruct {
1377/// # /*
1378///     ...
1379/// # */
1380/// }
1381///
1382/// #[derive(TryFromBytes)]
1383/// #[repr(u8)]
1384/// enum MyEnum {
1385/// #   V00,
1386/// # /*
1387///     ...
1388/// # */
1389/// }
1390///
1391/// #[derive(TryFromBytes, Immutable)]
1392/// union MyUnion {
1393/// #   variant: u8,
1394/// # /*
1395///     ...
1396/// # */
1397/// }
1398/// ```
1399///
1400/// This derive ensures that the runtime check of whether bytes correspond to a
1401/// valid instance is sound. You **must** implement this trait via the derive.
1402///
1403/// # What is a "valid instance"?
1404///
1405/// In Rust, each type has *bit validity*, which refers to the set of bit
1406/// patterns which may appear in an instance of that type. It is impossible for
1407/// safe Rust code to produce values which violate bit validity (ie, values
1408/// outside of the "valid" set of bit patterns). If `unsafe` code produces an
1409/// invalid value, this is considered [undefined behavior].
1410///
1411/// Rust's bit validity rules are currently being decided, which means that some
1412/// types have three classes of bit patterns: those which are definitely valid,
1413/// and whose validity is documented in the language; those which may or may not
1414/// be considered valid at some point in the future; and those which are
1415/// definitely invalid.
1416///
1417/// Zerocopy takes a conservative approach, and only considers a bit pattern to
1418/// be valid if its validity is a documenteed guarantee provided by the
1419/// language.
1420///
1421/// For most use cases, Rust's current guarantees align with programmers'
1422/// intuitions about what ought to be valid. As a result, zerocopy's
1423/// conservatism should not affect most users.
1424///
1425/// If you are negatively affected by lack of support for a particular type,
1426/// we encourage you to let us know by [filing an issue][github-repo].
1427///
1428/// # `TryFromBytes` is not symmetrical with [`IntoBytes`]
1429///
1430/// There are some types which implement both `TryFromBytes` and [`IntoBytes`],
1431/// but for which `TryFromBytes` is not guaranteed to accept all byte sequences
1432/// produced by `IntoBytes`. In other words, for some `T: TryFromBytes +
1433/// IntoBytes`, there exist values of `t: T` such that
1434/// `TryFromBytes::try_ref_from_bytes(t.as_bytes()) == None`. Code should not
1435/// generally assume that values produced by `IntoBytes` will necessarily be
1436/// accepted as valid by `TryFromBytes`.
1437///
1438/// # Safety
1439///
1440/// On its own, `T: TryFromBytes` does not make any guarantees about the layout
1441/// or representation of `T`. It merely provides the ability to perform a
1442/// validity check at runtime via methods like [`try_ref_from_bytes`].
1443///
1444/// You must not rely on the `#[doc(hidden)]` internals of `TryFromBytes`.
1445/// Future releases of zerocopy may make backwards-breaking changes to these
1446/// items, including changes that only affect soundness, which may cause code
1447/// which uses those items to silently become unsound.
1448///
1449/// [undefined behavior]: https://raphlinus.github.io/programming/rust/2018/08/17/undefined-behavior.html
1450/// [github-repo]: https://github.com/google/zerocopy
1451/// [`try_ref_from_bytes`]: TryFromBytes::try_ref_from_bytes
1452/// [*valid instance*]: #what-is-a-valid-instance
1453#[cfg_attr(feature = "derive", doc = "[derive]: zerocopy_derive::TryFromBytes")]
1454#[cfg_attr(
1455    not(feature = "derive"),
1456    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.TryFromBytes.html"),
1457)]
1458#[cfg_attr(
1459    zerocopy_diagnostic_on_unimplemented_1_78_0,
1460    diagnostic::on_unimplemented(note = "Consider adding `#[derive(TryFromBytes)]` to `{Self}`")
1461)]
1462pub unsafe trait TryFromBytes {
1463    // The `Self: Sized` bound makes it so that `TryFromBytes` is still object
1464    // safe.
1465    #[doc(hidden)]
1466    fn only_derive_is_allowed_to_implement_this_trait()
1467    where
1468        Self: Sized;
1469
1470    /// Does a given memory range contain a valid instance of `Self`?
1471    ///
1472    /// # Safety
1473    ///
1474    /// Unsafe code may assume that, if `is_bit_valid(candidate)` returns true,
1475    /// `*candidate` contains a valid `Self`.
1476    ///
1477    /// # Panics
1478    ///
1479    /// `is_bit_valid` may panic. Callers are responsible for ensuring that any
1480    /// `unsafe` code remains sound even in the face of `is_bit_valid`
1481    /// panicking. (We support user-defined validation routines; so long as
1482    /// these routines are not required to be `unsafe`, there is no way to
1483    /// ensure that these do not generate panics.)
1484    ///
1485    /// Besides user-defined validation routines panicking, `is_bit_valid` will
1486    /// either panic or fail to compile if called on a pointer with [`Shared`]
1487    /// aliasing when `Self: !Immutable`.
1488    ///
1489    /// [`UnsafeCell`]: core::cell::UnsafeCell
1490    /// [`Shared`]: invariant::Shared
1491    #[doc(hidden)]
1492    fn is_bit_valid<A: invariant::Reference>(candidate: Maybe<'_, Self, A>) -> bool;
1493
1494    /// Attempts to interpret the given `source` as a `&Self`.
1495    ///
1496    /// If the bytes of `source` are a valid instance of `Self`, this method
1497    /// returns a reference to those bytes interpreted as a `Self`. If the
1498    /// length of `source` is not a [valid size of `Self`][valid-size], or if
1499    /// `source` is not appropriately aligned, or if `source` is not a valid
1500    /// instance of `Self`, this returns `Err`. If [`Self:
1501    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
1502    /// error][ConvertError::from].
1503    ///
1504    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
1505    ///
1506    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
1507    /// [self-unaligned]: Unaligned
1508    /// [slice-dst]: KnownLayout#dynamically-sized-types
1509    ///
1510    /// # Compile-Time Assertions
1511    ///
1512    /// This method cannot yet be used on unsized types whose dynamically-sized
1513    /// component is zero-sized. Attempting to use this method on such types
1514    /// results in a compile-time assertion error; e.g.:
1515    ///
1516    /// ```compile_fail,E0080
1517    /// use zerocopy::*;
1518    /// # use zerocopy_derive::*;
1519    ///
1520    /// #[derive(TryFromBytes, Immutable, KnownLayout)]
1521    /// #[repr(C)]
1522    /// struct ZSTy {
1523    ///     leading_sized: u16,
1524    ///     trailing_dst: [()],
1525    /// }
1526    ///
1527    /// let _ = ZSTy::try_ref_from_bytes(0u16.as_bytes()); // âš  Compile Error!
1528    /// ```
1529    ///
1530    /// # Examples
1531    ///
1532    /// ```
1533    /// use zerocopy::TryFromBytes;
1534    /// # use zerocopy_derive::*;
1535    ///
1536    /// // The only valid value of this type is the byte `0xC0`
1537    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1538    /// #[repr(u8)]
1539    /// enum C0 { xC0 = 0xC0 }
1540    ///
1541    /// // The only valid value of this type is the byte sequence `0xC0C0`.
1542    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1543    /// #[repr(C)]
1544    /// struct C0C0(C0, C0);
1545    ///
1546    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1547    /// #[repr(C)]
1548    /// struct Packet {
1549    ///     magic_number: C0C0,
1550    ///     mug_size: u8,
1551    ///     temperature: u8,
1552    ///     marshmallows: [[u8; 2]],
1553    /// }
1554    ///
1555    /// let bytes = &[0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5][..];
1556    ///
1557    /// let packet = Packet::try_ref_from_bytes(bytes).unwrap();
1558    ///
1559    /// assert_eq!(packet.mug_size, 240);
1560    /// assert_eq!(packet.temperature, 77);
1561    /// assert_eq!(packet.marshmallows, [[0, 1], [2, 3], [4, 5]]);
1562    ///
1563    /// // These bytes are not valid instance of `Packet`.
1564    /// let bytes = &[0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5][..];
1565    /// assert!(Packet::try_ref_from_bytes(bytes).is_err());
1566    /// ```
1567    #[must_use = "has no side effects"]
1568    #[inline]
1569    fn try_ref_from_bytes(source: &[u8]) -> Result<&Self, TryCastError<&[u8], Self>>
1570    where
1571        Self: KnownLayout + Immutable,
1572    {
1573        static_assert_dst_is_not_zst!(Self);
1574        match Ptr::from_ref(source).try_cast_into_no_leftover::<Self, BecauseImmutable>(None) {
1575            Ok(source) => {
1576                // This call may panic. If that happens, it doesn't cause any soundness
1577                // issues, as we have not generated any invalid state which we need to
1578                // fix before returning.
1579                //
1580                // Note that one panic or post-monomorphization error condition is
1581                // calling `try_into_valid` (and thus `is_bit_valid`) with a shared
1582                // pointer when `Self: !Immutable`. Since `Self: Immutable`, this panic
1583                // condition will not happen.
1584                match source.try_into_valid() {
1585                    Ok(valid) => Ok(valid.as_ref()),
1586                    Err(e) => {
1587                        Err(e.map_src(|src| src.as_bytes::<BecauseImmutable>().as_ref()).into())
1588                    }
1589                }
1590            }
1591            Err(e) => Err(e.map_src(Ptr::as_ref).into()),
1592        }
1593    }
1594
1595    /// Attempts to interpret the prefix of the given `source` as a `&Self`.
1596    ///
1597    /// This method computes the [largest possible size of `Self`][valid-size]
1598    /// that can fit in the leading bytes of `source`. If that prefix is a valid
1599    /// instance of `Self`, this method returns a reference to those bytes
1600    /// interpreted as `Self`, and a reference to the remaining bytes. If there
1601    /// are insufficient bytes, or if `source` is not appropriately aligned, or
1602    /// if those bytes are not a valid instance of `Self`, this returns `Err`.
1603    /// If [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
1604    /// alignment error][ConvertError::from].
1605    ///
1606    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
1607    ///
1608    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
1609    /// [self-unaligned]: Unaligned
1610    /// [slice-dst]: KnownLayout#dynamically-sized-types
1611    ///
1612    /// # Compile-Time Assertions
1613    ///
1614    /// This method cannot yet be used on unsized types whose dynamically-sized
1615    /// component is zero-sized. Attempting to use this method on such types
1616    /// results in a compile-time assertion error; e.g.:
1617    ///
1618    /// ```compile_fail,E0080
1619    /// use zerocopy::*;
1620    /// # use zerocopy_derive::*;
1621    ///
1622    /// #[derive(TryFromBytes, Immutable, KnownLayout)]
1623    /// #[repr(C)]
1624    /// struct ZSTy {
1625    ///     leading_sized: u16,
1626    ///     trailing_dst: [()],
1627    /// }
1628    ///
1629    /// let _ = ZSTy::try_ref_from_prefix(0u16.as_bytes()); // âš  Compile Error!
1630    /// ```
1631    ///
1632    /// # Examples
1633    ///
1634    /// ```
1635    /// use zerocopy::TryFromBytes;
1636    /// # use zerocopy_derive::*;
1637    ///
1638    /// // The only valid value of this type is the byte `0xC0`
1639    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1640    /// #[repr(u8)]
1641    /// enum C0 { xC0 = 0xC0 }
1642    ///
1643    /// // The only valid value of this type is the bytes `0xC0C0`.
1644    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1645    /// #[repr(C)]
1646    /// struct C0C0(C0, C0);
1647    ///
1648    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1649    /// #[repr(C)]
1650    /// struct Packet {
1651    ///     magic_number: C0C0,
1652    ///     mug_size: u8,
1653    ///     temperature: u8,
1654    ///     marshmallows: [[u8; 2]],
1655    /// }
1656    ///
1657    /// // These are more bytes than are needed to encode a `Packet`.
1658    /// let bytes = &[0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
1659    ///
1660    /// let (packet, suffix) = Packet::try_ref_from_prefix(bytes).unwrap();
1661    ///
1662    /// assert_eq!(packet.mug_size, 240);
1663    /// assert_eq!(packet.temperature, 77);
1664    /// assert_eq!(packet.marshmallows, [[0, 1], [2, 3], [4, 5]]);
1665    /// assert_eq!(suffix, &[6u8][..]);
1666    ///
1667    /// // These bytes are not valid instance of `Packet`.
1668    /// let bytes = &[0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
1669    /// assert!(Packet::try_ref_from_prefix(bytes).is_err());
1670    /// ```
1671    #[must_use = "has no side effects"]
1672    #[inline]
1673    fn try_ref_from_prefix(source: &[u8]) -> Result<(&Self, &[u8]), TryCastError<&[u8], Self>>
1674    where
1675        Self: KnownLayout + Immutable,
1676    {
1677        static_assert_dst_is_not_zst!(Self);
1678        try_ref_from_prefix_suffix(source, CastType::Prefix, None)
1679    }
1680
1681    /// Attempts to interpret the suffix of the given `source` as a `&Self`.
1682    ///
1683    /// This method computes the [largest possible size of `Self`][valid-size]
1684    /// that can fit in the trailing bytes of `source`. If that suffix is a
1685    /// valid instance of `Self`, this method returns a reference to those bytes
1686    /// interpreted as `Self`, and a reference to the preceding bytes. If there
1687    /// are insufficient bytes, or if the suffix of `source` would not be
1688    /// appropriately aligned, or if the suffix is not a valid instance of
1689    /// `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned], you
1690    /// can [infallibly discard the alignment error][ConvertError::from].
1691    ///
1692    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
1693    ///
1694    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
1695    /// [self-unaligned]: Unaligned
1696    /// [slice-dst]: KnownLayout#dynamically-sized-types
1697    ///
1698    /// # Compile-Time Assertions
1699    ///
1700    /// This method cannot yet be used on unsized types whose dynamically-sized
1701    /// component is zero-sized. Attempting to use this method on such types
1702    /// results in a compile-time assertion error; e.g.:
1703    ///
1704    /// ```compile_fail,E0080
1705    /// use zerocopy::*;
1706    /// # use zerocopy_derive::*;
1707    ///
1708    /// #[derive(TryFromBytes, Immutable, KnownLayout)]
1709    /// #[repr(C)]
1710    /// struct ZSTy {
1711    ///     leading_sized: u16,
1712    ///     trailing_dst: [()],
1713    /// }
1714    ///
1715    /// let _ = ZSTy::try_ref_from_suffix(0u16.as_bytes()); // âš  Compile Error!
1716    /// ```
1717    ///
1718    /// # Examples
1719    ///
1720    /// ```
1721    /// use zerocopy::TryFromBytes;
1722    /// # use zerocopy_derive::*;
1723    ///
1724    /// // The only valid value of this type is the byte `0xC0`
1725    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1726    /// #[repr(u8)]
1727    /// enum C0 { xC0 = 0xC0 }
1728    ///
1729    /// // The only valid value of this type is the bytes `0xC0C0`.
1730    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1731    /// #[repr(C)]
1732    /// struct C0C0(C0, C0);
1733    ///
1734    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1735    /// #[repr(C)]
1736    /// struct Packet {
1737    ///     magic_number: C0C0,
1738    ///     mug_size: u8,
1739    ///     temperature: u8,
1740    ///     marshmallows: [[u8; 2]],
1741    /// }
1742    ///
1743    /// // These are more bytes than are needed to encode a `Packet`.
1744    /// let bytes = &[0, 0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
1745    ///
1746    /// let (prefix, packet) = Packet::try_ref_from_suffix(bytes).unwrap();
1747    ///
1748    /// assert_eq!(packet.mug_size, 240);
1749    /// assert_eq!(packet.temperature, 77);
1750    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
1751    /// assert_eq!(prefix, &[0u8][..]);
1752    ///
1753    /// // These bytes are not valid instance of `Packet`.
1754    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 77, 240, 0xC0, 0x10][..];
1755    /// assert!(Packet::try_ref_from_suffix(bytes).is_err());
1756    /// ```
1757    #[must_use = "has no side effects"]
1758    #[inline]
1759    fn try_ref_from_suffix(source: &[u8]) -> Result<(&[u8], &Self), TryCastError<&[u8], Self>>
1760    where
1761        Self: KnownLayout + Immutable,
1762    {
1763        static_assert_dst_is_not_zst!(Self);
1764        try_ref_from_prefix_suffix(source, CastType::Suffix, None).map(swap)
1765    }
1766
1767    /// Attempts to interpret the given `source` as a `&mut Self` without
1768    /// copying.
1769    ///
1770    /// If the bytes of `source` are a valid instance of `Self`, this method
1771    /// returns a reference to those bytes interpreted as a `Self`. If the
1772    /// length of `source` is not a [valid size of `Self`][valid-size], or if
1773    /// `source` is not appropriately aligned, or if `source` is not a valid
1774    /// instance of `Self`, this returns `Err`. If [`Self:
1775    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
1776    /// error][ConvertError::from].
1777    ///
1778    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
1779    ///
1780    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
1781    /// [self-unaligned]: Unaligned
1782    /// [slice-dst]: KnownLayout#dynamically-sized-types
1783    ///
1784    /// # Compile-Time Assertions
1785    ///
1786    /// This method cannot yet be used on unsized types whose dynamically-sized
1787    /// component is zero-sized. Attempting to use this method on such types
1788    /// results in a compile-time assertion error; e.g.:
1789    ///
1790    /// ```compile_fail,E0080
1791    /// use zerocopy::*;
1792    /// # use zerocopy_derive::*;
1793    ///
1794    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
1795    /// #[repr(C, packed)]
1796    /// struct ZSTy {
1797    ///     leading_sized: [u8; 2],
1798    ///     trailing_dst: [()],
1799    /// }
1800    ///
1801    /// let mut source = [85, 85];
1802    /// let _ = ZSTy::try_mut_from_bytes(&mut source[..]); // âš  Compile Error!
1803    /// ```
1804    ///
1805    /// # Examples
1806    ///
1807    /// ```
1808    /// use zerocopy::TryFromBytes;
1809    /// # use zerocopy_derive::*;
1810    ///
1811    /// // The only valid value of this type is the byte `0xC0`
1812    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
1813    /// #[repr(u8)]
1814    /// enum C0 { xC0 = 0xC0 }
1815    ///
1816    /// // The only valid value of this type is the bytes `0xC0C0`.
1817    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
1818    /// #[repr(C)]
1819    /// struct C0C0(C0, C0);
1820    ///
1821    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
1822    /// #[repr(C, packed)]
1823    /// struct Packet {
1824    ///     magic_number: C0C0,
1825    ///     mug_size: u8,
1826    ///     temperature: u8,
1827    ///     marshmallows: [[u8; 2]],
1828    /// }
1829    ///
1830    /// let bytes = &mut [0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5][..];
1831    ///
1832    /// let packet = Packet::try_mut_from_bytes(bytes).unwrap();
1833    ///
1834    /// assert_eq!(packet.mug_size, 240);
1835    /// assert_eq!(packet.temperature, 77);
1836    /// assert_eq!(packet.marshmallows, [[0, 1], [2, 3], [4, 5]]);
1837    ///
1838    /// packet.temperature = 111;
1839    ///
1840    /// assert_eq!(bytes, [0xC0, 0xC0, 240, 111, 0, 1, 2, 3, 4, 5]);
1841    ///
1842    /// // These bytes are not valid instance of `Packet`.
1843    /// let bytes = &mut [0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
1844    /// assert!(Packet::try_mut_from_bytes(bytes).is_err());
1845    /// ```
1846    #[must_use = "has no side effects"]
1847    #[inline]
1848    fn try_mut_from_bytes(bytes: &mut [u8]) -> Result<&mut Self, TryCastError<&mut [u8], Self>>
1849    where
1850        Self: KnownLayout + IntoBytes,
1851    {
1852        static_assert_dst_is_not_zst!(Self);
1853        match Ptr::from_mut(bytes).try_cast_into_no_leftover::<Self, BecauseExclusive>(None) {
1854            Ok(source) => {
1855                // This call may panic. If that happens, it doesn't cause any soundness
1856                // issues, as we have not generated any invalid state which we need to
1857                // fix before returning.
1858                //
1859                // Note that one panic or post-monomorphization error condition is
1860                // calling `try_into_valid` (and thus `is_bit_valid`) with a shared
1861                // pointer when `Self: !Immutable`. Since `Self: Immutable`, this panic
1862                // condition will not happen.
1863                match source.try_into_valid() {
1864                    Ok(source) => Ok(source.as_mut()),
1865                    Err(e) => {
1866                        Err(e.map_src(|src| src.as_bytes::<BecauseExclusive>().as_mut()).into())
1867                    }
1868                }
1869            }
1870            Err(e) => Err(e.map_src(Ptr::as_mut).into()),
1871        }
1872    }
1873
1874    /// Attempts to interpret the prefix of the given `source` as a `&mut
1875    /// Self`.
1876    ///
1877    /// This method computes the [largest possible size of `Self`][valid-size]
1878    /// that can fit in the leading bytes of `source`. If that prefix is a valid
1879    /// instance of `Self`, this method returns a reference to those bytes
1880    /// interpreted as `Self`, and a reference to the remaining bytes. If there
1881    /// are insufficient bytes, or if `source` is not appropriately aligned, or
1882    /// if the bytes are not a valid instance of `Self`, this returns `Err`. If
1883    /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
1884    /// alignment error][ConvertError::from].
1885    ///
1886    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
1887    ///
1888    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
1889    /// [self-unaligned]: Unaligned
1890    /// [slice-dst]: KnownLayout#dynamically-sized-types
1891    ///
1892    /// # Compile-Time Assertions
1893    ///
1894    /// This method cannot yet be used on unsized types whose dynamically-sized
1895    /// component is zero-sized. Attempting to use this method on such types
1896    /// results in a compile-time assertion error; e.g.:
1897    ///
1898    /// ```compile_fail,E0080
1899    /// use zerocopy::*;
1900    /// # use zerocopy_derive::*;
1901    ///
1902    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
1903    /// #[repr(C, packed)]
1904    /// struct ZSTy {
1905    ///     leading_sized: [u8; 2],
1906    ///     trailing_dst: [()],
1907    /// }
1908    ///
1909    /// let mut source = [85, 85];
1910    /// let _ = ZSTy::try_mut_from_prefix(&mut source[..]); // âš  Compile Error!
1911    /// ```
1912    ///
1913    /// # Examples
1914    ///
1915    /// ```
1916    /// use zerocopy::TryFromBytes;
1917    /// # use zerocopy_derive::*;
1918    ///
1919    /// // The only valid value of this type is the byte `0xC0`
1920    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
1921    /// #[repr(u8)]
1922    /// enum C0 { xC0 = 0xC0 }
1923    ///
1924    /// // The only valid value of this type is the bytes `0xC0C0`.
1925    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
1926    /// #[repr(C)]
1927    /// struct C0C0(C0, C0);
1928    ///
1929    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
1930    /// #[repr(C, packed)]
1931    /// struct Packet {
1932    ///     magic_number: C0C0,
1933    ///     mug_size: u8,
1934    ///     temperature: u8,
1935    ///     marshmallows: [[u8; 2]],
1936    /// }
1937    ///
1938    /// // These are more bytes than are needed to encode a `Packet`.
1939    /// let bytes = &mut [0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
1940    ///
1941    /// let (packet, suffix) = Packet::try_mut_from_prefix(bytes).unwrap();
1942    ///
1943    /// assert_eq!(packet.mug_size, 240);
1944    /// assert_eq!(packet.temperature, 77);
1945    /// assert_eq!(packet.marshmallows, [[0, 1], [2, 3], [4, 5]]);
1946    /// assert_eq!(suffix, &[6u8][..]);
1947    ///
1948    /// packet.temperature = 111;
1949    /// suffix[0] = 222;
1950    ///
1951    /// assert_eq!(bytes, [0xC0, 0xC0, 240, 111, 0, 1, 2, 3, 4, 5, 222]);
1952    ///
1953    /// // These bytes are not valid instance of `Packet`.
1954    /// let bytes = &mut [0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
1955    /// assert!(Packet::try_mut_from_prefix(bytes).is_err());
1956    /// ```
1957    #[must_use = "has no side effects"]
1958    #[inline]
1959    fn try_mut_from_prefix(
1960        source: &mut [u8],
1961    ) -> Result<(&mut Self, &mut [u8]), TryCastError<&mut [u8], Self>>
1962    where
1963        Self: KnownLayout + IntoBytes,
1964    {
1965        static_assert_dst_is_not_zst!(Self);
1966        try_mut_from_prefix_suffix(source, CastType::Prefix, None)
1967    }
1968
1969    /// Attempts to interpret the suffix of the given `source` as a `&mut
1970    /// Self`.
1971    ///
1972    /// This method computes the [largest possible size of `Self`][valid-size]
1973    /// that can fit in the trailing bytes of `source`. If that suffix is a
1974    /// valid instance of `Self`, this method returns a reference to those bytes
1975    /// interpreted as `Self`, and a reference to the preceding bytes. If there
1976    /// are insufficient bytes, or if the suffix of `source` would not be
1977    /// appropriately aligned, or if the suffix is not a valid instance of
1978    /// `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned], you
1979    /// can [infallibly discard the alignment error][ConvertError::from].
1980    ///
1981    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
1982    ///
1983    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
1984    /// [self-unaligned]: Unaligned
1985    /// [slice-dst]: KnownLayout#dynamically-sized-types
1986    ///
1987    /// # Compile-Time Assertions
1988    ///
1989    /// This method cannot yet be used on unsized types whose dynamically-sized
1990    /// component is zero-sized. Attempting to use this method on such types
1991    /// results in a compile-time assertion error; e.g.:
1992    ///
1993    /// ```compile_fail,E0080
1994    /// use zerocopy::*;
1995    /// # use zerocopy_derive::*;
1996    ///
1997    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
1998    /// #[repr(C, packed)]
1999    /// struct ZSTy {
2000    ///     leading_sized: u16,
2001    ///     trailing_dst: [()],
2002    /// }
2003    ///
2004    /// let mut source = [85, 85];
2005    /// let _ = ZSTy::try_mut_from_suffix(&mut source[..]); // âš  Compile Error!
2006    /// ```
2007    ///
2008    /// # Examples
2009    ///
2010    /// ```
2011    /// use zerocopy::TryFromBytes;
2012    /// # use zerocopy_derive::*;
2013    ///
2014    /// // The only valid value of this type is the byte `0xC0`
2015    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2016    /// #[repr(u8)]
2017    /// enum C0 { xC0 = 0xC0 }
2018    ///
2019    /// // The only valid value of this type is the bytes `0xC0C0`.
2020    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2021    /// #[repr(C)]
2022    /// struct C0C0(C0, C0);
2023    ///
2024    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2025    /// #[repr(C, packed)]
2026    /// struct Packet {
2027    ///     magic_number: C0C0,
2028    ///     mug_size: u8,
2029    ///     temperature: u8,
2030    ///     marshmallows: [[u8; 2]],
2031    /// }
2032    ///
2033    /// // These are more bytes than are needed to encode a `Packet`.
2034    /// let bytes = &mut [0, 0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
2035    ///
2036    /// let (prefix, packet) = Packet::try_mut_from_suffix(bytes).unwrap();
2037    ///
2038    /// assert_eq!(packet.mug_size, 240);
2039    /// assert_eq!(packet.temperature, 77);
2040    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2041    /// assert_eq!(prefix, &[0u8][..]);
2042    ///
2043    /// prefix[0] = 111;
2044    /// packet.temperature = 222;
2045    ///
2046    /// assert_eq!(bytes, [111, 0xC0, 0xC0, 240, 222, 2, 3, 4, 5, 6, 7]);
2047    ///
2048    /// // These bytes are not valid instance of `Packet`.
2049    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 77, 240, 0xC0, 0x10][..];
2050    /// assert!(Packet::try_mut_from_suffix(bytes).is_err());
2051    /// ```
2052    #[must_use = "has no side effects"]
2053    #[inline]
2054    fn try_mut_from_suffix(
2055        source: &mut [u8],
2056    ) -> Result<(&mut [u8], &mut Self), TryCastError<&mut [u8], Self>>
2057    where
2058        Self: KnownLayout + IntoBytes,
2059    {
2060        static_assert_dst_is_not_zst!(Self);
2061        try_mut_from_prefix_suffix(source, CastType::Suffix, None).map(swap)
2062    }
2063
2064    /// Attempts to interpret the given `source` as a `&Self` with a DST length
2065    /// equal to `count`.
2066    ///
2067    /// This method attempts to return a reference to `source` interpreted as a
2068    /// `Self` with `count` trailing elements. If the length of `source` is not
2069    /// equal to the size of `Self` with `count` elements, if `source` is not
2070    /// appropriately aligned, or if `source` does not contain a valid instance
2071    /// of `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned],
2072    /// you can [infallibly discard the alignment error][ConvertError::from].
2073    ///
2074    /// [self-unaligned]: Unaligned
2075    /// [slice-dst]: KnownLayout#dynamically-sized-types
2076    ///
2077    /// # Examples
2078    ///
2079    /// ```
2080    /// # #![allow(non_camel_case_types)] // For C0::xC0
2081    /// use zerocopy::TryFromBytes;
2082    /// # use zerocopy_derive::*;
2083    ///
2084    /// // The only valid value of this type is the byte `0xC0`
2085    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2086    /// #[repr(u8)]
2087    /// enum C0 { xC0 = 0xC0 }
2088    ///
2089    /// // The only valid value of this type is the bytes `0xC0C0`.
2090    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2091    /// #[repr(C)]
2092    /// struct C0C0(C0, C0);
2093    ///
2094    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2095    /// #[repr(C)]
2096    /// struct Packet {
2097    ///     magic_number: C0C0,
2098    ///     mug_size: u8,
2099    ///     temperature: u8,
2100    ///     marshmallows: [[u8; 2]],
2101    /// }
2102    ///
2103    /// let bytes = &[0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
2104    ///
2105    /// let packet = Packet::try_ref_from_bytes_with_elems(bytes, 3).unwrap();
2106    ///
2107    /// assert_eq!(packet.mug_size, 240);
2108    /// assert_eq!(packet.temperature, 77);
2109    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2110    ///
2111    /// // These bytes are not valid instance of `Packet`.
2112    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 77, 240, 0xC0, 0xC0][..];
2113    /// assert!(Packet::try_ref_from_bytes_with_elems(bytes, 3).is_err());
2114    /// ```
2115    ///
2116    /// Since an explicit `count` is provided, this method supports types with
2117    /// zero-sized trailing slice elements. Methods such as [`try_ref_from_bytes`]
2118    /// which do not take an explicit count do not support such types.
2119    ///
2120    /// ```
2121    /// use core::num::NonZeroU16;
2122    /// use zerocopy::*;
2123    /// # use zerocopy_derive::*;
2124    ///
2125    /// #[derive(TryFromBytes, Immutable, KnownLayout)]
2126    /// #[repr(C)]
2127    /// struct ZSTy {
2128    ///     leading_sized: NonZeroU16,
2129    ///     trailing_dst: [()],
2130    /// }
2131    ///
2132    /// let src = 0xCAFEu16.as_bytes();
2133    /// let zsty = ZSTy::try_ref_from_bytes_with_elems(src, 42).unwrap();
2134    /// assert_eq!(zsty.trailing_dst.len(), 42);
2135    /// ```
2136    ///
2137    /// [`try_ref_from_bytes`]: TryFromBytes::try_ref_from_bytes
2138    #[must_use = "has no side effects"]
2139    #[inline]
2140    fn try_ref_from_bytes_with_elems(
2141        source: &[u8],
2142        count: usize,
2143    ) -> Result<&Self, TryCastError<&[u8], Self>>
2144    where
2145        Self: KnownLayout<PointerMetadata = usize> + Immutable,
2146    {
2147        match Ptr::from_ref(source).try_cast_into_no_leftover::<Self, BecauseImmutable>(Some(count))
2148        {
2149            Ok(source) => {
2150                // This call may panic. If that happens, it doesn't cause any soundness
2151                // issues, as we have not generated any invalid state which we need to
2152                // fix before returning.
2153                //
2154                // Note that one panic or post-monomorphization error condition is
2155                // calling `try_into_valid` (and thus `is_bit_valid`) with a shared
2156                // pointer when `Self: !Immutable`. Since `Self: Immutable`, this panic
2157                // condition will not happen.
2158                match source.try_into_valid() {
2159                    Ok(source) => Ok(source.as_ref()),
2160                    Err(e) => {
2161                        Err(e.map_src(|src| src.as_bytes::<BecauseImmutable>().as_ref()).into())
2162                    }
2163                }
2164            }
2165            Err(e) => Err(e.map_src(Ptr::as_ref).into()),
2166        }
2167    }
2168
2169    /// Attempts to interpret the prefix of the given `source` as a `&Self` with
2170    /// a DST length equal to `count`.
2171    ///
2172    /// This method attempts to return a reference to the prefix of `source`
2173    /// interpreted as a `Self` with `count` trailing elements, and a reference
2174    /// to the remaining bytes. If the length of `source` is less than the size
2175    /// of `Self` with `count` elements, if `source` is not appropriately
2176    /// aligned, or if the prefix of `source` does not contain a valid instance
2177    /// of `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned],
2178    /// you can [infallibly discard the alignment error][ConvertError::from].
2179    ///
2180    /// [self-unaligned]: Unaligned
2181    /// [slice-dst]: KnownLayout#dynamically-sized-types
2182    ///
2183    /// # Examples
2184    ///
2185    /// ```
2186    /// # #![allow(non_camel_case_types)] // For C0::xC0
2187    /// use zerocopy::TryFromBytes;
2188    /// # use zerocopy_derive::*;
2189    ///
2190    /// // The only valid value of this type is the byte `0xC0`
2191    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2192    /// #[repr(u8)]
2193    /// enum C0 { xC0 = 0xC0 }
2194    ///
2195    /// // The only valid value of this type is the bytes `0xC0C0`.
2196    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2197    /// #[repr(C)]
2198    /// struct C0C0(C0, C0);
2199    ///
2200    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2201    /// #[repr(C)]
2202    /// struct Packet {
2203    ///     magic_number: C0C0,
2204    ///     mug_size: u8,
2205    ///     temperature: u8,
2206    ///     marshmallows: [[u8; 2]],
2207    /// }
2208    ///
2209    /// let bytes = &[0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7, 8][..];
2210    ///
2211    /// let (packet, suffix) = Packet::try_ref_from_prefix_with_elems(bytes, 3).unwrap();
2212    ///
2213    /// assert_eq!(packet.mug_size, 240);
2214    /// assert_eq!(packet.temperature, 77);
2215    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2216    /// assert_eq!(suffix, &[8u8][..]);
2217    ///
2218    /// // These bytes are not valid instance of `Packet`.
2219    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 77, 240, 0xC0, 0xC0][..];
2220    /// assert!(Packet::try_ref_from_prefix_with_elems(bytes, 3).is_err());
2221    /// ```
2222    ///
2223    /// Since an explicit `count` is provided, this method supports types with
2224    /// zero-sized trailing slice elements. Methods such as [`try_ref_from_prefix`]
2225    /// which do not take an explicit count do not support such types.
2226    ///
2227    /// ```
2228    /// use core::num::NonZeroU16;
2229    /// use zerocopy::*;
2230    /// # use zerocopy_derive::*;
2231    ///
2232    /// #[derive(TryFromBytes, Immutable, KnownLayout)]
2233    /// #[repr(C)]
2234    /// struct ZSTy {
2235    ///     leading_sized: NonZeroU16,
2236    ///     trailing_dst: [()],
2237    /// }
2238    ///
2239    /// let src = 0xCAFEu16.as_bytes();
2240    /// let (zsty, _) = ZSTy::try_ref_from_prefix_with_elems(src, 42).unwrap();
2241    /// assert_eq!(zsty.trailing_dst.len(), 42);
2242    /// ```
2243    ///
2244    /// [`try_ref_from_prefix`]: TryFromBytes::try_ref_from_prefix
2245    #[must_use = "has no side effects"]
2246    #[inline]
2247    fn try_ref_from_prefix_with_elems(
2248        source: &[u8],
2249        count: usize,
2250    ) -> Result<(&Self, &[u8]), TryCastError<&[u8], Self>>
2251    where
2252        Self: KnownLayout<PointerMetadata = usize> + Immutable,
2253    {
2254        try_ref_from_prefix_suffix(source, CastType::Prefix, Some(count))
2255    }
2256
2257    /// Attempts to interpret the suffix of the given `source` as a `&Self` with
2258    /// a DST length equal to `count`.
2259    ///
2260    /// This method attempts to return a reference to the suffix of `source`
2261    /// interpreted as a `Self` with `count` trailing elements, and a reference
2262    /// to the preceding bytes. If the length of `source` is less than the size
2263    /// of `Self` with `count` elements, if the suffix of `source` is not
2264    /// appropriately aligned, or if the suffix of `source` does not contain a
2265    /// valid instance of `Self`, this returns `Err`. If [`Self:
2266    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
2267    /// error][ConvertError::from].
2268    ///
2269    /// [self-unaligned]: Unaligned
2270    /// [slice-dst]: KnownLayout#dynamically-sized-types
2271    ///
2272    /// # Examples
2273    ///
2274    /// ```
2275    /// # #![allow(non_camel_case_types)] // For C0::xC0
2276    /// use zerocopy::TryFromBytes;
2277    /// # use zerocopy_derive::*;
2278    ///
2279    /// // The only valid value of this type is the byte `0xC0`
2280    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2281    /// #[repr(u8)]
2282    /// enum C0 { xC0 = 0xC0 }
2283    ///
2284    /// // The only valid value of this type is the bytes `0xC0C0`.
2285    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2286    /// #[repr(C)]
2287    /// struct C0C0(C0, C0);
2288    ///
2289    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2290    /// #[repr(C)]
2291    /// struct Packet {
2292    ///     magic_number: C0C0,
2293    ///     mug_size: u8,
2294    ///     temperature: u8,
2295    ///     marshmallows: [[u8; 2]],
2296    /// }
2297    ///
2298    /// let bytes = &[123, 0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
2299    ///
2300    /// let (prefix, packet) = Packet::try_ref_from_suffix_with_elems(bytes, 3).unwrap();
2301    ///
2302    /// assert_eq!(packet.mug_size, 240);
2303    /// assert_eq!(packet.temperature, 77);
2304    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2305    /// assert_eq!(prefix, &[123u8][..]);
2306    ///
2307    /// // These bytes are not valid instance of `Packet`.
2308    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 77, 240, 0xC0, 0xC0][..];
2309    /// assert!(Packet::try_ref_from_suffix_with_elems(bytes, 3).is_err());
2310    /// ```
2311    ///
2312    /// Since an explicit `count` is provided, this method supports types with
2313    /// zero-sized trailing slice elements. Methods such as [`try_ref_from_prefix`]
2314    /// which do not take an explicit count do not support such types.
2315    ///
2316    /// ```
2317    /// use core::num::NonZeroU16;
2318    /// use zerocopy::*;
2319    /// # use zerocopy_derive::*;
2320    ///
2321    /// #[derive(TryFromBytes, Immutable, KnownLayout)]
2322    /// #[repr(C)]
2323    /// struct ZSTy {
2324    ///     leading_sized: NonZeroU16,
2325    ///     trailing_dst: [()],
2326    /// }
2327    ///
2328    /// let src = 0xCAFEu16.as_bytes();
2329    /// let (_, zsty) = ZSTy::try_ref_from_suffix_with_elems(src, 42).unwrap();
2330    /// assert_eq!(zsty.trailing_dst.len(), 42);
2331    /// ```
2332    ///
2333    /// [`try_ref_from_prefix`]: TryFromBytes::try_ref_from_prefix
2334    #[must_use = "has no side effects"]
2335    #[inline]
2336    fn try_ref_from_suffix_with_elems(
2337        source: &[u8],
2338        count: usize,
2339    ) -> Result<(&[u8], &Self), TryCastError<&[u8], Self>>
2340    where
2341        Self: KnownLayout<PointerMetadata = usize> + Immutable,
2342    {
2343        try_ref_from_prefix_suffix(source, CastType::Suffix, Some(count)).map(swap)
2344    }
2345
2346    /// Attempts to interpret the given `source` as a `&mut Self` with a DST
2347    /// length equal to `count`.
2348    ///
2349    /// This method attempts to return a reference to `source` interpreted as a
2350    /// `Self` with `count` trailing elements. If the length of `source` is not
2351    /// equal to the size of `Self` with `count` elements, if `source` is not
2352    /// appropriately aligned, or if `source` does not contain a valid instance
2353    /// of `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned],
2354    /// you can [infallibly discard the alignment error][ConvertError::from].
2355    ///
2356    /// [self-unaligned]: Unaligned
2357    /// [slice-dst]: KnownLayout#dynamically-sized-types
2358    ///
2359    /// # Examples
2360    ///
2361    /// ```
2362    /// # #![allow(non_camel_case_types)] // For C0::xC0
2363    /// use zerocopy::TryFromBytes;
2364    /// # use zerocopy_derive::*;
2365    ///
2366    /// // The only valid value of this type is the byte `0xC0`
2367    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2368    /// #[repr(u8)]
2369    /// enum C0 { xC0 = 0xC0 }
2370    ///
2371    /// // The only valid value of this type is the bytes `0xC0C0`.
2372    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2373    /// #[repr(C)]
2374    /// struct C0C0(C0, C0);
2375    ///
2376    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2377    /// #[repr(C, packed)]
2378    /// struct Packet {
2379    ///     magic_number: C0C0,
2380    ///     mug_size: u8,
2381    ///     temperature: u8,
2382    ///     marshmallows: [[u8; 2]],
2383    /// }
2384    ///
2385    /// let bytes = &mut [0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
2386    ///
2387    /// let packet = Packet::try_mut_from_bytes_with_elems(bytes, 3).unwrap();
2388    ///
2389    /// assert_eq!(packet.mug_size, 240);
2390    /// assert_eq!(packet.temperature, 77);
2391    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2392    ///
2393    /// packet.temperature = 111;
2394    ///
2395    /// assert_eq!(bytes, [0xC0, 0xC0, 240, 111, 2, 3, 4, 5, 6, 7]);
2396    ///
2397    /// // These bytes are not valid instance of `Packet`.
2398    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 77, 240, 0xC0, 0xC0][..];
2399    /// assert!(Packet::try_mut_from_bytes_with_elems(bytes, 3).is_err());
2400    /// ```
2401    ///
2402    /// Since an explicit `count` is provided, this method supports types with
2403    /// zero-sized trailing slice elements. Methods such as [`try_mut_from_bytes`]
2404    /// which do not take an explicit count do not support such types.
2405    ///
2406    /// ```
2407    /// use core::num::NonZeroU16;
2408    /// use zerocopy::*;
2409    /// # use zerocopy_derive::*;
2410    ///
2411    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2412    /// #[repr(C, packed)]
2413    /// struct ZSTy {
2414    ///     leading_sized: NonZeroU16,
2415    ///     trailing_dst: [()],
2416    /// }
2417    ///
2418    /// let mut src = 0xCAFEu16;
2419    /// let src = src.as_mut_bytes();
2420    /// let zsty = ZSTy::try_mut_from_bytes_with_elems(src, 42).unwrap();
2421    /// assert_eq!(zsty.trailing_dst.len(), 42);
2422    /// ```
2423    ///
2424    /// [`try_mut_from_bytes`]: TryFromBytes::try_mut_from_bytes
2425    #[must_use = "has no side effects"]
2426    #[inline]
2427    fn try_mut_from_bytes_with_elems(
2428        source: &mut [u8],
2429        count: usize,
2430    ) -> Result<&mut Self, TryCastError<&mut [u8], Self>>
2431    where
2432        Self: KnownLayout<PointerMetadata = usize> + IntoBytes,
2433    {
2434        match Ptr::from_mut(source).try_cast_into_no_leftover::<Self, BecauseExclusive>(Some(count))
2435        {
2436            Ok(source) => {
2437                // This call may panic. If that happens, it doesn't cause any soundness
2438                // issues, as we have not generated any invalid state which we need to
2439                // fix before returning.
2440                //
2441                // Note that one panic or post-monomorphization error condition is
2442                // calling `try_into_valid` (and thus `is_bit_valid`) with a shared
2443                // pointer when `Self: !Immutable`. Since `Self: Immutable`, this panic
2444                // condition will not happen.
2445                match source.try_into_valid() {
2446                    Ok(source) => Ok(source.as_mut()),
2447                    Err(e) => {
2448                        Err(e.map_src(|src| src.as_bytes::<BecauseExclusive>().as_mut()).into())
2449                    }
2450                }
2451            }
2452            Err(e) => Err(e.map_src(Ptr::as_mut).into()),
2453        }
2454    }
2455
2456    /// Attempts to interpret the prefix of the given `source` as a `&mut Self`
2457    /// with a DST length equal to `count`.
2458    ///
2459    /// This method attempts to return a reference to the prefix of `source`
2460    /// interpreted as a `Self` with `count` trailing elements, and a reference
2461    /// to the remaining bytes. If the length of `source` is less than the size
2462    /// of `Self` with `count` elements, if `source` is not appropriately
2463    /// aligned, or if the prefix of `source` does not contain a valid instance
2464    /// of `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned],
2465    /// you can [infallibly discard the alignment error][ConvertError::from].
2466    ///
2467    /// [self-unaligned]: Unaligned
2468    /// [slice-dst]: KnownLayout#dynamically-sized-types
2469    ///
2470    /// # Examples
2471    ///
2472    /// ```
2473    /// # #![allow(non_camel_case_types)] // For C0::xC0
2474    /// use zerocopy::TryFromBytes;
2475    /// # use zerocopy_derive::*;
2476    ///
2477    /// // The only valid value of this type is the byte `0xC0`
2478    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2479    /// #[repr(u8)]
2480    /// enum C0 { xC0 = 0xC0 }
2481    ///
2482    /// // The only valid value of this type is the bytes `0xC0C0`.
2483    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2484    /// #[repr(C)]
2485    /// struct C0C0(C0, C0);
2486    ///
2487    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2488    /// #[repr(C, packed)]
2489    /// struct Packet {
2490    ///     magic_number: C0C0,
2491    ///     mug_size: u8,
2492    ///     temperature: u8,
2493    ///     marshmallows: [[u8; 2]],
2494    /// }
2495    ///
2496    /// let bytes = &mut [0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7, 8][..];
2497    ///
2498    /// let (packet, suffix) = Packet::try_mut_from_prefix_with_elems(bytes, 3).unwrap();
2499    ///
2500    /// assert_eq!(packet.mug_size, 240);
2501    /// assert_eq!(packet.temperature, 77);
2502    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2503    /// assert_eq!(suffix, &[8u8][..]);
2504    ///
2505    /// packet.temperature = 111;
2506    /// suffix[0] = 222;
2507    ///
2508    /// assert_eq!(bytes, [0xC0, 0xC0, 240, 111, 2, 3, 4, 5, 6, 7, 222]);
2509    ///
2510    /// // These bytes are not valid instance of `Packet`.
2511    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 77, 240, 0xC0, 0xC0][..];
2512    /// assert!(Packet::try_mut_from_prefix_with_elems(bytes, 3).is_err());
2513    /// ```
2514    ///
2515    /// Since an explicit `count` is provided, this method supports types with
2516    /// zero-sized trailing slice elements. Methods such as [`try_mut_from_prefix`]
2517    /// which do not take an explicit count do not support such types.
2518    ///
2519    /// ```
2520    /// use core::num::NonZeroU16;
2521    /// use zerocopy::*;
2522    /// # use zerocopy_derive::*;
2523    ///
2524    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2525    /// #[repr(C, packed)]
2526    /// struct ZSTy {
2527    ///     leading_sized: NonZeroU16,
2528    ///     trailing_dst: [()],
2529    /// }
2530    ///
2531    /// let mut src = 0xCAFEu16;
2532    /// let src = src.as_mut_bytes();
2533    /// let (zsty, _) = ZSTy::try_mut_from_prefix_with_elems(src, 42).unwrap();
2534    /// assert_eq!(zsty.trailing_dst.len(), 42);
2535    /// ```
2536    ///
2537    /// [`try_mut_from_prefix`]: TryFromBytes::try_mut_from_prefix
2538    #[must_use = "has no side effects"]
2539    #[inline]
2540    fn try_mut_from_prefix_with_elems(
2541        source: &mut [u8],
2542        count: usize,
2543    ) -> Result<(&mut Self, &mut [u8]), TryCastError<&mut [u8], Self>>
2544    where
2545        Self: KnownLayout<PointerMetadata = usize> + IntoBytes,
2546    {
2547        try_mut_from_prefix_suffix(source, CastType::Prefix, Some(count))
2548    }
2549
2550    /// Attempts to interpret the suffix of the given `source` as a `&mut Self`
2551    /// with a DST length equal to `count`.
2552    ///
2553    /// This method attempts to return a reference to the suffix of `source`
2554    /// interpreted as a `Self` with `count` trailing elements, and a reference
2555    /// to the preceding bytes. If the length of `source` is less than the size
2556    /// of `Self` with `count` elements, if the suffix of `source` is not
2557    /// appropriately aligned, or if the suffix of `source` does not contain a
2558    /// valid instance of `Self`, this returns `Err`. If [`Self:
2559    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
2560    /// error][ConvertError::from].
2561    ///
2562    /// [self-unaligned]: Unaligned
2563    /// [slice-dst]: KnownLayout#dynamically-sized-types
2564    ///
2565    /// # Examples
2566    ///
2567    /// ```
2568    /// # #![allow(non_camel_case_types)] // For C0::xC0
2569    /// use zerocopy::TryFromBytes;
2570    /// # use zerocopy_derive::*;
2571    ///
2572    /// // The only valid value of this type is the byte `0xC0`
2573    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2574    /// #[repr(u8)]
2575    /// enum C0 { xC0 = 0xC0 }
2576    ///
2577    /// // The only valid value of this type is the bytes `0xC0C0`.
2578    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2579    /// #[repr(C)]
2580    /// struct C0C0(C0, C0);
2581    ///
2582    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2583    /// #[repr(C, packed)]
2584    /// struct Packet {
2585    ///     magic_number: C0C0,
2586    ///     mug_size: u8,
2587    ///     temperature: u8,
2588    ///     marshmallows: [[u8; 2]],
2589    /// }
2590    ///
2591    /// let bytes = &mut [123, 0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
2592    ///
2593    /// let (prefix, packet) = Packet::try_mut_from_suffix_with_elems(bytes, 3).unwrap();
2594    ///
2595    /// assert_eq!(packet.mug_size, 240);
2596    /// assert_eq!(packet.temperature, 77);
2597    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2598    /// assert_eq!(prefix, &[123u8][..]);
2599    ///
2600    /// prefix[0] = 111;
2601    /// packet.temperature = 222;
2602    ///
2603    /// assert_eq!(bytes, [111, 0xC0, 0xC0, 240, 222, 2, 3, 4, 5, 6, 7]);
2604    ///
2605    /// // These bytes are not valid instance of `Packet`.
2606    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 77, 240, 0xC0, 0xC0][..];
2607    /// assert!(Packet::try_mut_from_suffix_with_elems(bytes, 3).is_err());
2608    /// ```
2609    ///
2610    /// Since an explicit `count` is provided, this method supports types with
2611    /// zero-sized trailing slice elements. Methods such as [`try_mut_from_prefix`]
2612    /// which do not take an explicit count do not support such types.
2613    ///
2614    /// ```
2615    /// use core::num::NonZeroU16;
2616    /// use zerocopy::*;
2617    /// # use zerocopy_derive::*;
2618    ///
2619    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2620    /// #[repr(C, packed)]
2621    /// struct ZSTy {
2622    ///     leading_sized: NonZeroU16,
2623    ///     trailing_dst: [()],
2624    /// }
2625    ///
2626    /// let mut src = 0xCAFEu16;
2627    /// let src = src.as_mut_bytes();
2628    /// let (_, zsty) = ZSTy::try_mut_from_suffix_with_elems(src, 42).unwrap();
2629    /// assert_eq!(zsty.trailing_dst.len(), 42);
2630    /// ```
2631    ///
2632    /// [`try_mut_from_prefix`]: TryFromBytes::try_mut_from_prefix
2633    #[must_use = "has no side effects"]
2634    #[inline]
2635    fn try_mut_from_suffix_with_elems(
2636        source: &mut [u8],
2637        count: usize,
2638    ) -> Result<(&mut [u8], &mut Self), TryCastError<&mut [u8], Self>>
2639    where
2640        Self: KnownLayout<PointerMetadata = usize> + IntoBytes,
2641    {
2642        try_mut_from_prefix_suffix(source, CastType::Suffix, Some(count)).map(swap)
2643    }
2644
2645    /// Attempts to read the given `source` as a `Self`.
2646    ///
2647    /// If `source.len() != size_of::<Self>()` or the bytes are not a valid
2648    /// instance of `Self`, this returns `Err`.
2649    ///
2650    /// # Examples
2651    ///
2652    /// ```
2653    /// use zerocopy::TryFromBytes;
2654    /// # use zerocopy_derive::*;
2655    ///
2656    /// // The only valid value of this type is the byte `0xC0`
2657    /// #[derive(TryFromBytes)]
2658    /// #[repr(u8)]
2659    /// enum C0 { xC0 = 0xC0 }
2660    ///
2661    /// // The only valid value of this type is the bytes `0xC0C0`.
2662    /// #[derive(TryFromBytes)]
2663    /// #[repr(C)]
2664    /// struct C0C0(C0, C0);
2665    ///
2666    /// #[derive(TryFromBytes)]
2667    /// #[repr(C)]
2668    /// struct Packet {
2669    ///     magic_number: C0C0,
2670    ///     mug_size: u8,
2671    ///     temperature: u8,
2672    /// }
2673    ///
2674    /// let bytes = &[0xC0, 0xC0, 240, 77][..];
2675    ///
2676    /// let packet = Packet::try_read_from_bytes(bytes).unwrap();
2677    ///
2678    /// assert_eq!(packet.mug_size, 240);
2679    /// assert_eq!(packet.temperature, 77);
2680    ///
2681    /// // These bytes are not valid instance of `Packet`.
2682    /// let bytes = &mut [0x10, 0xC0, 240, 77][..];
2683    /// assert!(Packet::try_read_from_bytes(bytes).is_err());
2684    /// ```
2685    #[must_use = "has no side effects"]
2686    #[inline]
2687    fn try_read_from_bytes(source: &[u8]) -> Result<Self, TryReadError<&[u8], Self>>
2688    where
2689        Self: Sized,
2690    {
2691        let candidate = match CoreMaybeUninit::<Self>::read_from_bytes(source) {
2692            Ok(candidate) => candidate,
2693            Err(e) => {
2694                return Err(TryReadError::Size(e.with_dst()));
2695            }
2696        };
2697        // SAFETY: `candidate` was copied from from `source: &[u8]`, so all of
2698        // its bytes are initialized.
2699        unsafe { try_read_from(source, candidate) }
2700    }
2701
2702    /// Attempts to read a `Self` from the prefix of the given `source`.
2703    ///
2704    /// This attempts to read a `Self` from the first `size_of::<Self>()` bytes
2705    /// of `source`, returning that `Self` and any remaining bytes. If
2706    /// `source.len() < size_of::<Self>()` or the bytes are not a valid instance
2707    /// of `Self`, it returns `Err`.
2708    ///
2709    /// # Examples
2710    ///
2711    /// ```
2712    /// use zerocopy::TryFromBytes;
2713    /// # use zerocopy_derive::*;
2714    ///
2715    /// // The only valid value of this type is the byte `0xC0`
2716    /// #[derive(TryFromBytes)]
2717    /// #[repr(u8)]
2718    /// enum C0 { xC0 = 0xC0 }
2719    ///
2720    /// // The only valid value of this type is the bytes `0xC0C0`.
2721    /// #[derive(TryFromBytes)]
2722    /// #[repr(C)]
2723    /// struct C0C0(C0, C0);
2724    ///
2725    /// #[derive(TryFromBytes)]
2726    /// #[repr(C)]
2727    /// struct Packet {
2728    ///     magic_number: C0C0,
2729    ///     mug_size: u8,
2730    ///     temperature: u8,
2731    /// }
2732    ///
2733    /// // These are more bytes than are needed to encode a `Packet`.
2734    /// let bytes = &[0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
2735    ///
2736    /// let (packet, suffix) = Packet::try_read_from_prefix(bytes).unwrap();
2737    ///
2738    /// assert_eq!(packet.mug_size, 240);
2739    /// assert_eq!(packet.temperature, 77);
2740    /// assert_eq!(suffix, &[0u8, 1, 2, 3, 4, 5, 6][..]);
2741    ///
2742    /// // These bytes are not valid instance of `Packet`.
2743    /// let bytes = &[0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
2744    /// assert!(Packet::try_read_from_prefix(bytes).is_err());
2745    /// ```
2746    #[must_use = "has no side effects"]
2747    #[inline]
2748    fn try_read_from_prefix(source: &[u8]) -> Result<(Self, &[u8]), TryReadError<&[u8], Self>>
2749    where
2750        Self: Sized,
2751    {
2752        let (candidate, suffix) = match CoreMaybeUninit::<Self>::read_from_prefix(source) {
2753            Ok(candidate) => candidate,
2754            Err(e) => {
2755                return Err(TryReadError::Size(e.with_dst()));
2756            }
2757        };
2758        // SAFETY: `candidate` was copied from from `source: &[u8]`, so all of
2759        // its bytes are initialized.
2760        unsafe { try_read_from(source, candidate).map(|slf| (slf, suffix)) }
2761    }
2762
2763    /// Attempts to read a `Self` from the suffix of the given `source`.
2764    ///
2765    /// This attempts to read a `Self` from the last `size_of::<Self>()` bytes
2766    /// of `source`, returning that `Self` and any preceding bytes. If
2767    /// `source.len() < size_of::<Self>()` or the bytes are not a valid instance
2768    /// of `Self`, it returns `Err`.
2769    ///
2770    /// # Examples
2771    ///
2772    /// ```
2773    /// # #![allow(non_camel_case_types)] // For C0::xC0
2774    /// use zerocopy::TryFromBytes;
2775    /// # use zerocopy_derive::*;
2776    ///
2777    /// // The only valid value of this type is the byte `0xC0`
2778    /// #[derive(TryFromBytes)]
2779    /// #[repr(u8)]
2780    /// enum C0 { xC0 = 0xC0 }
2781    ///
2782    /// // The only valid value of this type is the bytes `0xC0C0`.
2783    /// #[derive(TryFromBytes)]
2784    /// #[repr(C)]
2785    /// struct C0C0(C0, C0);
2786    ///
2787    /// #[derive(TryFromBytes)]
2788    /// #[repr(C)]
2789    /// struct Packet {
2790    ///     magic_number: C0C0,
2791    ///     mug_size: u8,
2792    ///     temperature: u8,
2793    /// }
2794    ///
2795    /// // These are more bytes than are needed to encode a `Packet`.
2796    /// let bytes = &[0, 1, 2, 3, 4, 5, 0xC0, 0xC0, 240, 77][..];
2797    ///
2798    /// let (prefix, packet) = Packet::try_read_from_suffix(bytes).unwrap();
2799    ///
2800    /// assert_eq!(packet.mug_size, 240);
2801    /// assert_eq!(packet.temperature, 77);
2802    /// assert_eq!(prefix, &[0u8, 1, 2, 3, 4, 5][..]);
2803    ///
2804    /// // These bytes are not valid instance of `Packet`.
2805    /// let bytes = &[0, 1, 2, 3, 4, 5, 0x10, 0xC0, 240, 77][..];
2806    /// assert!(Packet::try_read_from_suffix(bytes).is_err());
2807    /// ```
2808    #[must_use = "has no side effects"]
2809    #[inline]
2810    fn try_read_from_suffix(source: &[u8]) -> Result<(&[u8], Self), TryReadError<&[u8], Self>>
2811    where
2812        Self: Sized,
2813    {
2814        let (prefix, candidate) = match CoreMaybeUninit::<Self>::read_from_suffix(source) {
2815            Ok(candidate) => candidate,
2816            Err(e) => {
2817                return Err(TryReadError::Size(e.with_dst()));
2818            }
2819        };
2820        // SAFETY: `candidate` was copied from from `source: &[u8]`, so all of
2821        // its bytes are initialized.
2822        unsafe { try_read_from(source, candidate).map(|slf| (prefix, slf)) }
2823    }
2824}
2825
2826#[inline(always)]
2827fn try_ref_from_prefix_suffix<T: TryFromBytes + KnownLayout + Immutable + ?Sized>(
2828    source: &[u8],
2829    cast_type: CastType,
2830    meta: Option<T::PointerMetadata>,
2831) -> Result<(&T, &[u8]), TryCastError<&[u8], T>> {
2832    match Ptr::from_ref(source).try_cast_into::<T, BecauseImmutable>(cast_type, meta) {
2833        Ok((source, prefix_suffix)) => {
2834            // This call may panic. If that happens, it doesn't cause any soundness
2835            // issues, as we have not generated any invalid state which we need to
2836            // fix before returning.
2837            //
2838            // Note that one panic or post-monomorphization error condition is
2839            // calling `try_into_valid` (and thus `is_bit_valid`) with a shared
2840            // pointer when `Self: !Immutable`. Since `Self: Immutable`, this panic
2841            // condition will not happen.
2842            match source.try_into_valid() {
2843                Ok(valid) => Ok((valid.as_ref(), prefix_suffix.as_ref())),
2844                Err(e) => Err(e.map_src(|src| src.as_bytes::<BecauseImmutable>().as_ref()).into()),
2845            }
2846        }
2847        Err(e) => Err(e.map_src(Ptr::as_ref).into()),
2848    }
2849}
2850
2851#[inline(always)]
2852fn try_mut_from_prefix_suffix<T: IntoBytes + TryFromBytes + KnownLayout + ?Sized>(
2853    candidate: &mut [u8],
2854    cast_type: CastType,
2855    meta: Option<T::PointerMetadata>,
2856) -> Result<(&mut T, &mut [u8]), TryCastError<&mut [u8], T>> {
2857    match Ptr::from_mut(candidate).try_cast_into::<T, BecauseExclusive>(cast_type, meta) {
2858        Ok((candidate, prefix_suffix)) => {
2859            // This call may panic. If that happens, it doesn't cause any soundness
2860            // issues, as we have not generated any invalid state which we need to
2861            // fix before returning.
2862            //
2863            // Note that one panic or post-monomorphization error condition is
2864            // calling `try_into_valid` (and thus `is_bit_valid`) with a shared
2865            // pointer when `Self: !Immutable`. Since `Self: Immutable`, this panic
2866            // condition will not happen.
2867            match candidate.try_into_valid() {
2868                Ok(valid) => Ok((valid.as_mut(), prefix_suffix.as_mut())),
2869                Err(e) => Err(e.map_src(|src| src.as_bytes::<BecauseExclusive>().as_mut()).into()),
2870            }
2871        }
2872        Err(e) => Err(e.map_src(Ptr::as_mut).into()),
2873    }
2874}
2875
2876#[inline(always)]
2877fn swap<T, U>((t, u): (T, U)) -> (U, T) {
2878    (u, t)
2879}
2880
2881/// # Safety
2882///
2883/// All bytes of `candidate` must be initialized.
2884#[inline(always)]
2885unsafe fn try_read_from<S, T: TryFromBytes>(
2886    source: S,
2887    mut candidate: CoreMaybeUninit<T>,
2888) -> Result<T, TryReadError<S, T>> {
2889    // We use `from_mut` despite not mutating via `c_ptr` so that we don't need
2890    // to add a `T: Immutable` bound.
2891    let c_ptr = Ptr::from_mut(&mut candidate);
2892    // SAFETY: `c_ptr` has no uninitialized sub-ranges because it derived from
2893    // `candidate`, which the caller promises is entirely initialized. Since
2894    // `candidate` is a `MaybeUninit`, it has no validity requirements, and so
2895    // no values written to an `Initialized` `c_ptr` can violate its validity.
2896    // Since `c_ptr` has `Exclusive` aliasing, no mutations may happen except
2897    // via `c_ptr` so long as it is live, so we don't need to worry about the
2898    // fact that `c_ptr` may have more restricted validity than `candidate`.
2899    let c_ptr = unsafe { c_ptr.assume_validity::<invariant::Initialized>() };
2900    let c_ptr = c_ptr.transmute();
2901
2902    // Since we don't have `T: KnownLayout`, we hack around that by using
2903    // `Wrapping<T>`, which implements `KnownLayout` even if `T` doesn't.
2904    //
2905    // This call may panic. If that happens, it doesn't cause any soundness
2906    // issues, as we have not generated any invalid state which we need to fix
2907    // before returning.
2908    //
2909    // Note that one panic or post-monomorphization error condition is calling
2910    // `try_into_valid` (and thus `is_bit_valid`) with a shared pointer when
2911    // `Self: !Immutable`. Since `Self: Immutable`, this panic condition will
2912    // not happen.
2913    if !Wrapping::<T>::is_bit_valid(c_ptr.forget_aligned()) {
2914        return Err(ValidityError::new(source).into());
2915    }
2916
2917    fn _assert_same_size_and_validity<T>()
2918    where
2919        Wrapping<T>: pointer::TransmuteFrom<T, invariant::Valid, invariant::Valid>,
2920        T: pointer::TransmuteFrom<Wrapping<T>, invariant::Valid, invariant::Valid>,
2921    {
2922    }
2923
2924    _assert_same_size_and_validity::<T>();
2925
2926    // SAFETY: We just validated that `candidate` contains a valid
2927    // `Wrapping<T>`, which has the same size and bit validity as `T`, as
2928    // guaranteed by the preceding type assertion.
2929    Ok(unsafe { candidate.assume_init() })
2930}
2931
2932/// Types for which a sequence of `0` bytes is a valid instance.
2933///
2934/// Any memory region of the appropriate length which is guaranteed to contain
2935/// only zero bytes can be viewed as any `FromZeros` type with no runtime
2936/// overhead. This is useful whenever memory is known to be in a zeroed state,
2937/// such memory returned from some allocation routines.
2938///
2939/// # Warning: Padding bytes
2940///
2941/// Note that, when a value is moved or copied, only the non-padding bytes of
2942/// that value are guaranteed to be preserved. It is unsound to assume that
2943/// values written to padding bytes are preserved after a move or copy. For more
2944/// details, see the [`FromBytes` docs][frombytes-warning-padding-bytes].
2945///
2946/// [frombytes-warning-padding-bytes]: FromBytes#warning-padding-bytes
2947///
2948/// # Implementation
2949///
2950/// **Do not implement this trait yourself!** Instead, use
2951/// [`#[derive(FromZeros)]`][derive]; e.g.:
2952///
2953/// ```
2954/// # use zerocopy_derive::{FromZeros, Immutable};
2955/// #[derive(FromZeros)]
2956/// struct MyStruct {
2957/// # /*
2958///     ...
2959/// # */
2960/// }
2961///
2962/// #[derive(FromZeros)]
2963/// #[repr(u8)]
2964/// enum MyEnum {
2965/// #   Variant0,
2966/// # /*
2967///     ...
2968/// # */
2969/// }
2970///
2971/// #[derive(FromZeros, Immutable)]
2972/// union MyUnion {
2973/// #   variant: u8,
2974/// # /*
2975///     ...
2976/// # */
2977/// }
2978/// ```
2979///
2980/// This derive performs a sophisticated, compile-time safety analysis to
2981/// determine whether a type is `FromZeros`.
2982///
2983/// # Safety
2984///
2985/// *This section describes what is required in order for `T: FromZeros`, and
2986/// what unsafe code may assume of such types. If you don't plan on implementing
2987/// `FromZeros` manually, and you don't plan on writing unsafe code that
2988/// operates on `FromZeros` types, then you don't need to read this section.*
2989///
2990/// If `T: FromZeros`, then unsafe code may assume that it is sound to produce a
2991/// `T` whose bytes are all initialized to zero. If a type is marked as
2992/// `FromZeros` which violates this contract, it may cause undefined behavior.
2993///
2994/// `#[derive(FromZeros)]` only permits [types which satisfy these
2995/// requirements][derive-analysis].
2996///
2997#[cfg_attr(
2998    feature = "derive",
2999    doc = "[derive]: zerocopy_derive::FromZeros",
3000    doc = "[derive-analysis]: zerocopy_derive::FromZeros#analysis"
3001)]
3002#[cfg_attr(
3003    not(feature = "derive"),
3004    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromZeros.html"),
3005    doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromZeros.html#analysis"),
3006)]
3007#[cfg_attr(
3008    zerocopy_diagnostic_on_unimplemented_1_78_0,
3009    diagnostic::on_unimplemented(note = "Consider adding `#[derive(FromZeros)]` to `{Self}`")
3010)]
3011pub unsafe trait FromZeros: TryFromBytes {
3012    // The `Self: Sized` bound makes it so that `FromZeros` is still object
3013    // safe.
3014    #[doc(hidden)]
3015    fn only_derive_is_allowed_to_implement_this_trait()
3016    where
3017        Self: Sized;
3018
3019    /// Overwrites `self` with zeros.
3020    ///
3021    /// Sets every byte in `self` to 0. While this is similar to doing `*self =
3022    /// Self::new_zeroed()`, it differs in that `zero` does not semantically
3023    /// drop the current value and replace it with a new one — it simply
3024    /// modifies the bytes of the existing value.
3025    ///
3026    /// # Examples
3027    ///
3028    /// ```
3029    /// # use zerocopy::FromZeros;
3030    /// # use zerocopy_derive::*;
3031    /// #
3032    /// #[derive(FromZeros)]
3033    /// #[repr(C)]
3034    /// struct PacketHeader {
3035    ///     src_port: [u8; 2],
3036    ///     dst_port: [u8; 2],
3037    ///     length: [u8; 2],
3038    ///     checksum: [u8; 2],
3039    /// }
3040    ///
3041    /// let mut header = PacketHeader {
3042    ///     src_port: 100u16.to_be_bytes(),
3043    ///     dst_port: 200u16.to_be_bytes(),
3044    ///     length: 300u16.to_be_bytes(),
3045    ///     checksum: 400u16.to_be_bytes(),
3046    /// };
3047    ///
3048    /// header.zero();
3049    ///
3050    /// assert_eq!(header.src_port, [0, 0]);
3051    /// assert_eq!(header.dst_port, [0, 0]);
3052    /// assert_eq!(header.length, [0, 0]);
3053    /// assert_eq!(header.checksum, [0, 0]);
3054    /// ```
3055    #[inline(always)]
3056    fn zero(&mut self) {
3057        let slf: *mut Self = self;
3058        let len = mem::size_of_val(self);
3059        // SAFETY:
3060        // - `self` is guaranteed by the type system to be valid for writes of
3061        //   size `size_of_val(self)`.
3062        // - `u8`'s alignment is 1, and thus `self` is guaranteed to be aligned
3063        //   as required by `u8`.
3064        // - Since `Self: FromZeros`, the all-zeros instance is a valid instance
3065        //   of `Self.`
3066        //
3067        // FIXME(#429): Add references to docs and quotes.
3068        unsafe { ptr::write_bytes(slf.cast::<u8>(), 0, len) };
3069    }
3070
3071    /// Creates an instance of `Self` from zeroed bytes.
3072    ///
3073    /// # Examples
3074    ///
3075    /// ```
3076    /// # use zerocopy::FromZeros;
3077    /// # use zerocopy_derive::*;
3078    /// #
3079    /// #[derive(FromZeros)]
3080    /// #[repr(C)]
3081    /// struct PacketHeader {
3082    ///     src_port: [u8; 2],
3083    ///     dst_port: [u8; 2],
3084    ///     length: [u8; 2],
3085    ///     checksum: [u8; 2],
3086    /// }
3087    ///
3088    /// let header: PacketHeader = FromZeros::new_zeroed();
3089    ///
3090    /// assert_eq!(header.src_port, [0, 0]);
3091    /// assert_eq!(header.dst_port, [0, 0]);
3092    /// assert_eq!(header.length, [0, 0]);
3093    /// assert_eq!(header.checksum, [0, 0]);
3094    /// ```
3095    #[must_use = "has no side effects"]
3096    #[inline(always)]
3097    fn new_zeroed() -> Self
3098    where
3099        Self: Sized,
3100    {
3101        // SAFETY: `FromZeros` says that the all-zeros bit pattern is legal.
3102        unsafe { mem::zeroed() }
3103    }
3104
3105    /// Creates a `Box<Self>` from zeroed bytes.
3106    ///
3107    /// This function is useful for allocating large values on the heap and
3108    /// zero-initializing them, without ever creating a temporary instance of
3109    /// `Self` on the stack. For example, `<[u8; 1048576]>::new_box_zeroed()`
3110    /// will allocate `[u8; 1048576]` directly on the heap; it does not require
3111    /// storing `[u8; 1048576]` in a temporary variable on the stack.
3112    ///
3113    /// On systems that use a heap implementation that supports allocating from
3114    /// pre-zeroed memory, using `new_box_zeroed` (or related functions) may
3115    /// have performance benefits.
3116    ///
3117    /// # Errors
3118    ///
3119    /// Returns an error on allocation failure. Allocation failure is guaranteed
3120    /// never to cause a panic or an abort.
3121    #[must_use = "has no side effects (other than allocation)"]
3122    #[cfg(any(feature = "alloc", test))]
3123    #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
3124    #[inline]
3125    fn new_box_zeroed() -> Result<Box<Self>, AllocError>
3126    where
3127        Self: Sized,
3128    {
3129        // If `T` is a ZST, then return a proper boxed instance of it. There is
3130        // no allocation, but `Box` does require a correct dangling pointer.
3131        let layout = Layout::new::<Self>();
3132        if layout.size() == 0 {
3133            // Construct the `Box` from a dangling pointer to avoid calling
3134            // `Self::new_zeroed`. This ensures that stack space is never
3135            // allocated for `Self` even on lower opt-levels where this branch
3136            // might not get optimized out.
3137
3138            // SAFETY: Per [1], when `T` is a ZST, `Box<T>`'s only validity
3139            // requirements are that the pointer is non-null and sufficiently
3140            // aligned. Per [2], `NonNull::dangling` produces a pointer which
3141            // is sufficiently aligned. Since the produced pointer is a
3142            // `NonNull`, it is non-null.
3143            //
3144            // [1] Per https://doc.rust-lang.org/nightly/std/boxed/index.html#memory-layout:
3145            //
3146            //   For zero-sized values, the `Box` pointer has to be non-null and sufficiently aligned.
3147            //
3148            // [2] Per https://doc.rust-lang.org/std/ptr/struct.NonNull.html#method.dangling:
3149            //
3150            //   Creates a new `NonNull` that is dangling, but well-aligned.
3151            return Ok(unsafe { Box::from_raw(NonNull::dangling().as_ptr()) });
3152        }
3153
3154        // FIXME(#429): Add a "SAFETY" comment and remove this `allow`.
3155        #[allow(clippy::undocumented_unsafe_blocks)]
3156        let ptr = unsafe { alloc::alloc::alloc_zeroed(layout).cast::<Self>() };
3157        if ptr.is_null() {
3158            return Err(AllocError);
3159        }
3160        // FIXME(#429): Add a "SAFETY" comment and remove this `allow`.
3161        #[allow(clippy::undocumented_unsafe_blocks)]
3162        Ok(unsafe { Box::from_raw(ptr) })
3163    }
3164
3165    /// Creates a `Box<[Self]>` (a boxed slice) from zeroed bytes.
3166    ///
3167    /// This function is useful for allocating large values of `[Self]` on the
3168    /// heap and zero-initializing them, without ever creating a temporary
3169    /// instance of `[Self; _]` on the stack. For example,
3170    /// `u8::new_box_slice_zeroed(1048576)` will allocate the slice directly on
3171    /// the heap; it does not require storing the slice on the stack.
3172    ///
3173    /// On systems that use a heap implementation that supports allocating from
3174    /// pre-zeroed memory, using `new_box_slice_zeroed` may have performance
3175    /// benefits.
3176    ///
3177    /// If `Self` is a zero-sized type, then this function will return a
3178    /// `Box<[Self]>` that has the correct `len`. Such a box cannot contain any
3179    /// actual information, but its `len()` property will report the correct
3180    /// value.
3181    ///
3182    /// # Errors
3183    ///
3184    /// Returns an error on allocation failure. Allocation failure is
3185    /// guaranteed never to cause a panic or an abort.
3186    #[must_use = "has no side effects (other than allocation)"]
3187    #[cfg(feature = "alloc")]
3188    #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
3189    #[inline]
3190    fn new_box_zeroed_with_elems(count: usize) -> Result<Box<Self>, AllocError>
3191    where
3192        Self: KnownLayout<PointerMetadata = usize>,
3193    {
3194        // SAFETY: `alloc::alloc::alloc_zeroed` is a valid argument of
3195        // `new_box`. The referent of the pointer returned by `alloc_zeroed`
3196        // (and, consequently, the `Box` derived from it) is a valid instance of
3197        // `Self`, because `Self` is `FromZeros`.
3198        unsafe { crate::util::new_box(count, alloc::alloc::alloc_zeroed) }
3199    }
3200
3201    #[deprecated(since = "0.8.0", note = "renamed to `FromZeros::new_box_zeroed_with_elems`")]
3202    #[doc(hidden)]
3203    #[cfg(feature = "alloc")]
3204    #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
3205    #[must_use = "has no side effects (other than allocation)"]
3206    #[inline(always)]
3207    fn new_box_slice_zeroed(len: usize) -> Result<Box<[Self]>, AllocError>
3208    where
3209        Self: Sized,
3210    {
3211        <[Self]>::new_box_zeroed_with_elems(len)
3212    }
3213
3214    /// Creates a `Vec<Self>` from zeroed bytes.
3215    ///
3216    /// This function is useful for allocating large values of `Vec`s and
3217    /// zero-initializing them, without ever creating a temporary instance of
3218    /// `[Self; _]` (or many temporary instances of `Self`) on the stack. For
3219    /// example, `u8::new_vec_zeroed(1048576)` will allocate directly on the
3220    /// heap; it does not require storing intermediate values on the stack.
3221    ///
3222    /// On systems that use a heap implementation that supports allocating from
3223    /// pre-zeroed memory, using `new_vec_zeroed` may have performance benefits.
3224    ///
3225    /// If `Self` is a zero-sized type, then this function will return a
3226    /// `Vec<Self>` that has the correct `len`. Such a `Vec` cannot contain any
3227    /// actual information, but its `len()` property will report the correct
3228    /// value.
3229    ///
3230    /// # Errors
3231    ///
3232    /// Returns an error on allocation failure. Allocation failure is
3233    /// guaranteed never to cause a panic or an abort.
3234    #[must_use = "has no side effects (other than allocation)"]
3235    #[cfg(feature = "alloc")]
3236    #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
3237    #[inline(always)]
3238    fn new_vec_zeroed(len: usize) -> Result<Vec<Self>, AllocError>
3239    where
3240        Self: Sized,
3241    {
3242        <[Self]>::new_box_zeroed_with_elems(len).map(Into::into)
3243    }
3244
3245    /// Extends a `Vec<Self>` by pushing `additional` new items onto the end of
3246    /// the vector. The new items are initialized with zeros.
3247    #[cfg(zerocopy_panic_in_const_and_vec_try_reserve_1_57_0)]
3248    #[cfg(feature = "alloc")]
3249    #[cfg_attr(doc_cfg, doc(cfg(all(rust = "1.57.0", feature = "alloc"))))]
3250    #[inline(always)]
3251    fn extend_vec_zeroed(v: &mut Vec<Self>, additional: usize) -> Result<(), AllocError>
3252    where
3253        Self: Sized,
3254    {
3255        // PANICS: We pass `v.len()` for `position`, so the `position > v.len()`
3256        // panic condition is not satisfied.
3257        <Self as FromZeros>::insert_vec_zeroed(v, v.len(), additional)
3258    }
3259
3260    /// Inserts `additional` new items into `Vec<Self>` at `position`. The new
3261    /// items are initialized with zeros.
3262    ///
3263    /// # Panics
3264    ///
3265    /// Panics if `position > v.len()`.
3266    #[cfg(zerocopy_panic_in_const_and_vec_try_reserve_1_57_0)]
3267    #[cfg(feature = "alloc")]
3268    #[cfg_attr(doc_cfg, doc(cfg(all(rust = "1.57.0", feature = "alloc"))))]
3269    #[inline]
3270    fn insert_vec_zeroed(
3271        v: &mut Vec<Self>,
3272        position: usize,
3273        additional: usize,
3274    ) -> Result<(), AllocError>
3275    where
3276        Self: Sized,
3277    {
3278        assert!(position <= v.len());
3279        // We only conditionally compile on versions on which `try_reserve` is
3280        // stable; the Clippy lint is a false positive.
3281        v.try_reserve(additional).map_err(|_| AllocError)?;
3282        // SAFETY: The `try_reserve` call guarantees that these cannot overflow:
3283        // * `ptr.add(position)`
3284        // * `position + additional`
3285        // * `v.len() + additional`
3286        //
3287        // `v.len() - position` cannot overflow because we asserted that
3288        // `position <= v.len()`.
3289        unsafe {
3290            // This is a potentially overlapping copy.
3291            let ptr = v.as_mut_ptr();
3292            #[allow(clippy::arithmetic_side_effects)]
3293            ptr.add(position).copy_to(ptr.add(position + additional), v.len() - position);
3294            ptr.add(position).write_bytes(0, additional);
3295            #[allow(clippy::arithmetic_side_effects)]
3296            v.set_len(v.len() + additional);
3297        }
3298
3299        Ok(())
3300    }
3301}
3302
3303/// Analyzes whether a type is [`FromBytes`].
3304///
3305/// This derive analyzes, at compile time, whether the annotated type satisfies
3306/// the [safety conditions] of `FromBytes` and implements `FromBytes` and its
3307/// supertraits if it is sound to do so. This derive can be applied to structs,
3308/// enums, and unions;
3309/// e.g.:
3310///
3311/// ```
3312/// # use zerocopy_derive::{FromBytes, FromZeros, Immutable};
3313/// #[derive(FromBytes)]
3314/// struct MyStruct {
3315/// # /*
3316///     ...
3317/// # */
3318/// }
3319///
3320/// #[derive(FromBytes)]
3321/// #[repr(u8)]
3322/// enum MyEnum {
3323/// #   V00, V01, V02, V03, V04, V05, V06, V07, V08, V09, V0A, V0B, V0C, V0D, V0E,
3324/// #   V0F, V10, V11, V12, V13, V14, V15, V16, V17, V18, V19, V1A, V1B, V1C, V1D,
3325/// #   V1E, V1F, V20, V21, V22, V23, V24, V25, V26, V27, V28, V29, V2A, V2B, V2C,
3326/// #   V2D, V2E, V2F, V30, V31, V32, V33, V34, V35, V36, V37, V38, V39, V3A, V3B,
3327/// #   V3C, V3D, V3E, V3F, V40, V41, V42, V43, V44, V45, V46, V47, V48, V49, V4A,
3328/// #   V4B, V4C, V4D, V4E, V4F, V50, V51, V52, V53, V54, V55, V56, V57, V58, V59,
3329/// #   V5A, V5B, V5C, V5D, V5E, V5F, V60, V61, V62, V63, V64, V65, V66, V67, V68,
3330/// #   V69, V6A, V6B, V6C, V6D, V6E, V6F, V70, V71, V72, V73, V74, V75, V76, V77,
3331/// #   V78, V79, V7A, V7B, V7C, V7D, V7E, V7F, V80, V81, V82, V83, V84, V85, V86,
3332/// #   V87, V88, V89, V8A, V8B, V8C, V8D, V8E, V8F, V90, V91, V92, V93, V94, V95,
3333/// #   V96, V97, V98, V99, V9A, V9B, V9C, V9D, V9E, V9F, VA0, VA1, VA2, VA3, VA4,
3334/// #   VA5, VA6, VA7, VA8, VA9, VAA, VAB, VAC, VAD, VAE, VAF, VB0, VB1, VB2, VB3,
3335/// #   VB4, VB5, VB6, VB7, VB8, VB9, VBA, VBB, VBC, VBD, VBE, VBF, VC0, VC1, VC2,
3336/// #   VC3, VC4, VC5, VC6, VC7, VC8, VC9, VCA, VCB, VCC, VCD, VCE, VCF, VD0, VD1,
3337/// #   VD2, VD3, VD4, VD5, VD6, VD7, VD8, VD9, VDA, VDB, VDC, VDD, VDE, VDF, VE0,
3338/// #   VE1, VE2, VE3, VE4, VE5, VE6, VE7, VE8, VE9, VEA, VEB, VEC, VED, VEE, VEF,
3339/// #   VF0, VF1, VF2, VF3, VF4, VF5, VF6, VF7, VF8, VF9, VFA, VFB, VFC, VFD, VFE,
3340/// #   VFF,
3341/// # /*
3342///     ...
3343/// # */
3344/// }
3345///
3346/// #[derive(FromBytes, Immutable)]
3347/// union MyUnion {
3348/// #   variant: u8,
3349/// # /*
3350///     ...
3351/// # */
3352/// }
3353/// ```
3354///
3355/// [safety conditions]: trait@FromBytes#safety
3356///
3357/// # Analysis
3358///
3359/// *This section describes, roughly, the analysis performed by this derive to
3360/// determine whether it is sound to implement `FromBytes` for a given type.
3361/// Unless you are modifying the implementation of this derive, or attempting to
3362/// manually implement `FromBytes` for a type yourself, you don't need to read
3363/// this section.*
3364///
3365/// If a type has the following properties, then this derive can implement
3366/// `FromBytes` for that type:
3367///
3368/// - If the type is a struct, all of its fields must be `FromBytes`.
3369/// - If the type is an enum:
3370///   - It must have a defined representation (`repr`s `u8`, `u16`, `u32`,
3371///     `u64`, `usize`, `i8`, `i16`, `i32`, `i64`, or `isize`).
3372///   - The maximum number of discriminants must be used (so that every possible
3373///     bit pattern is a valid one). Be very careful when using the `usize` or
3374///     `isize` representations, as their size is platform-dependent.
3375///   - Its fields must be `FromBytes`.
3376///
3377/// This analysis is subject to change. Unsafe code may *only* rely on the
3378/// documented [safety conditions] of `FromBytes`, and must *not* rely on the
3379/// implementation details of this derive.
3380///
3381/// ## Why isn't an explicit representation required for structs?
3382///
3383/// Neither this derive, nor the [safety conditions] of `FromBytes`, requires
3384/// that structs are marked with `#[repr(C)]`.
3385///
3386/// Per the [Rust reference](reference),
3387///
3388/// > The representation of a type can change the padding between fields, but
3389/// > does not change the layout of the fields themselves.
3390///
3391/// [reference]: https://doc.rust-lang.org/reference/type-layout.html#representations
3392///
3393/// Since the layout of structs only consists of padding bytes and field bytes,
3394/// a struct is soundly `FromBytes` if:
3395/// 1. its padding is soundly `FromBytes`, and
3396/// 2. its fields are soundly `FromBytes`.
3397///
3398/// The answer to the first question is always yes: padding bytes do not have
3399/// any validity constraints. A [discussion] of this question in the Unsafe Code
3400/// Guidelines Working Group concluded that it would be virtually unimaginable
3401/// for future versions of rustc to add validity constraints to padding bytes.
3402///
3403/// [discussion]: https://github.com/rust-lang/unsafe-code-guidelines/issues/174
3404///
3405/// Whether a struct is soundly `FromBytes` therefore solely depends on whether
3406/// its fields are `FromBytes`.
3407#[cfg(any(feature = "derive", test))]
3408#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
3409pub use zerocopy_derive::FromBytes;
3410
3411/// Types for which any bit pattern is valid.
3412///
3413/// Any memory region of the appropriate length which contains initialized bytes
3414/// can be viewed as any `FromBytes` type with no runtime overhead. This is
3415/// useful for efficiently parsing bytes as structured data.
3416///
3417/// # Warning: Padding bytes
3418///
3419/// Note that, when a value is moved or copied, only the non-padding bytes of
3420/// that value are guaranteed to be preserved. It is unsound to assume that
3421/// values written to padding bytes are preserved after a move or copy. For
3422/// example, the following is unsound:
3423///
3424/// ```rust,no_run
3425/// use core::mem::{size_of, transmute};
3426/// use zerocopy::FromZeros;
3427/// # use zerocopy_derive::*;
3428///
3429/// // Assume `Foo` is a type with padding bytes.
3430/// #[derive(FromZeros, Default)]
3431/// struct Foo {
3432/// # /*
3433///     ...
3434/// # */
3435/// }
3436///
3437/// let mut foo: Foo = Foo::default();
3438/// FromZeros::zero(&mut foo);
3439/// // UNSOUND: Although `FromZeros::zero` writes zeros to all bytes of `foo`,
3440/// // those writes are not guaranteed to be preserved in padding bytes when
3441/// // `foo` is moved, so this may expose padding bytes as `u8`s.
3442/// let foo_bytes: [u8; size_of::<Foo>()] = unsafe { transmute(foo) };
3443/// ```
3444///
3445/// # Implementation
3446///
3447/// **Do not implement this trait yourself!** Instead, use
3448/// [`#[derive(FromBytes)]`][derive]; e.g.:
3449///
3450/// ```
3451/// # use zerocopy_derive::{FromBytes, Immutable};
3452/// #[derive(FromBytes)]
3453/// struct MyStruct {
3454/// # /*
3455///     ...
3456/// # */
3457/// }
3458///
3459/// #[derive(FromBytes)]
3460/// #[repr(u8)]
3461/// enum MyEnum {
3462/// #   V00, V01, V02, V03, V04, V05, V06, V07, V08, V09, V0A, V0B, V0C, V0D, V0E,
3463/// #   V0F, V10, V11, V12, V13, V14, V15, V16, V17, V18, V19, V1A, V1B, V1C, V1D,
3464/// #   V1E, V1F, V20, V21, V22, V23, V24, V25, V26, V27, V28, V29, V2A, V2B, V2C,
3465/// #   V2D, V2E, V2F, V30, V31, V32, V33, V34, V35, V36, V37, V38, V39, V3A, V3B,
3466/// #   V3C, V3D, V3E, V3F, V40, V41, V42, V43, V44, V45, V46, V47, V48, V49, V4A,
3467/// #   V4B, V4C, V4D, V4E, V4F, V50, V51, V52, V53, V54, V55, V56, V57, V58, V59,
3468/// #   V5A, V5B, V5C, V5D, V5E, V5F, V60, V61, V62, V63, V64, V65, V66, V67, V68,
3469/// #   V69, V6A, V6B, V6C, V6D, V6E, V6F, V70, V71, V72, V73, V74, V75, V76, V77,
3470/// #   V78, V79, V7A, V7B, V7C, V7D, V7E, V7F, V80, V81, V82, V83, V84, V85, V86,
3471/// #   V87, V88, V89, V8A, V8B, V8C, V8D, V8E, V8F, V90, V91, V92, V93, V94, V95,
3472/// #   V96, V97, V98, V99, V9A, V9B, V9C, V9D, V9E, V9F, VA0, VA1, VA2, VA3, VA4,
3473/// #   VA5, VA6, VA7, VA8, VA9, VAA, VAB, VAC, VAD, VAE, VAF, VB0, VB1, VB2, VB3,
3474/// #   VB4, VB5, VB6, VB7, VB8, VB9, VBA, VBB, VBC, VBD, VBE, VBF, VC0, VC1, VC2,
3475/// #   VC3, VC4, VC5, VC6, VC7, VC8, VC9, VCA, VCB, VCC, VCD, VCE, VCF, VD0, VD1,
3476/// #   VD2, VD3, VD4, VD5, VD6, VD7, VD8, VD9, VDA, VDB, VDC, VDD, VDE, VDF, VE0,
3477/// #   VE1, VE2, VE3, VE4, VE5, VE6, VE7, VE8, VE9, VEA, VEB, VEC, VED, VEE, VEF,
3478/// #   VF0, VF1, VF2, VF3, VF4, VF5, VF6, VF7, VF8, VF9, VFA, VFB, VFC, VFD, VFE,
3479/// #   VFF,
3480/// # /*
3481///     ...
3482/// # */
3483/// }
3484///
3485/// #[derive(FromBytes, Immutable)]
3486/// union MyUnion {
3487/// #   variant: u8,
3488/// # /*
3489///     ...
3490/// # */
3491/// }
3492/// ```
3493///
3494/// This derive performs a sophisticated, compile-time safety analysis to
3495/// determine whether a type is `FromBytes`.
3496///
3497/// # Safety
3498///
3499/// *This section describes what is required in order for `T: FromBytes`, and
3500/// what unsafe code may assume of such types. If you don't plan on implementing
3501/// `FromBytes` manually, and you don't plan on writing unsafe code that
3502/// operates on `FromBytes` types, then you don't need to read this section.*
3503///
3504/// If `T: FromBytes`, then unsafe code may assume that it is sound to produce a
3505/// `T` whose bytes are initialized to any sequence of valid `u8`s (in other
3506/// words, any byte value which is not uninitialized). If a type is marked as
3507/// `FromBytes` which violates this contract, it may cause undefined behavior.
3508///
3509/// `#[derive(FromBytes)]` only permits [types which satisfy these
3510/// requirements][derive-analysis].
3511///
3512#[cfg_attr(
3513    feature = "derive",
3514    doc = "[derive]: zerocopy_derive::FromBytes",
3515    doc = "[derive-analysis]: zerocopy_derive::FromBytes#analysis"
3516)]
3517#[cfg_attr(
3518    not(feature = "derive"),
3519    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromBytes.html"),
3520    doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromBytes.html#analysis"),
3521)]
3522#[cfg_attr(
3523    zerocopy_diagnostic_on_unimplemented_1_78_0,
3524    diagnostic::on_unimplemented(note = "Consider adding `#[derive(FromBytes)]` to `{Self}`")
3525)]
3526pub unsafe trait FromBytes: FromZeros {
3527    // The `Self: Sized` bound makes it so that `FromBytes` is still object
3528    // safe.
3529    #[doc(hidden)]
3530    fn only_derive_is_allowed_to_implement_this_trait()
3531    where
3532        Self: Sized;
3533
3534    /// Interprets the given `source` as a `&Self`.
3535    ///
3536    /// This method attempts to return a reference to `source` interpreted as a
3537    /// `Self`. If the length of `source` is not a [valid size of
3538    /// `Self`][valid-size], or if `source` is not appropriately aligned, this
3539    /// returns `Err`. If [`Self: Unaligned`][self-unaligned], you can
3540    /// [infallibly discard the alignment error][size-error-from].
3541    ///
3542    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
3543    ///
3544    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
3545    /// [self-unaligned]: Unaligned
3546    /// [size-error-from]: error/struct.SizeError.html#method.from-1
3547    /// [slice-dst]: KnownLayout#dynamically-sized-types
3548    ///
3549    /// # Compile-Time Assertions
3550    ///
3551    /// This method cannot yet be used on unsized types whose dynamically-sized
3552    /// component is zero-sized. Attempting to use this method on such types
3553    /// results in a compile-time assertion error; e.g.:
3554    ///
3555    /// ```compile_fail,E0080
3556    /// use zerocopy::*;
3557    /// # use zerocopy_derive::*;
3558    ///
3559    /// #[derive(FromBytes, Immutable, KnownLayout)]
3560    /// #[repr(C)]
3561    /// struct ZSTy {
3562    ///     leading_sized: u16,
3563    ///     trailing_dst: [()],
3564    /// }
3565    ///
3566    /// let _ = ZSTy::ref_from_bytes(0u16.as_bytes()); // âš  Compile Error!
3567    /// ```
3568    ///
3569    /// # Examples
3570    ///
3571    /// ```
3572    /// use zerocopy::FromBytes;
3573    /// # use zerocopy_derive::*;
3574    ///
3575    /// #[derive(FromBytes, KnownLayout, Immutable)]
3576    /// #[repr(C)]
3577    /// struct PacketHeader {
3578    ///     src_port: [u8; 2],
3579    ///     dst_port: [u8; 2],
3580    ///     length: [u8; 2],
3581    ///     checksum: [u8; 2],
3582    /// }
3583    ///
3584    /// #[derive(FromBytes, KnownLayout, Immutable)]
3585    /// #[repr(C)]
3586    /// struct Packet {
3587    ///     header: PacketHeader,
3588    ///     body: [u8],
3589    /// }
3590    ///
3591    /// // These bytes encode a `Packet`.
3592    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11][..];
3593    ///
3594    /// let packet = Packet::ref_from_bytes(bytes).unwrap();
3595    ///
3596    /// assert_eq!(packet.header.src_port, [0, 1]);
3597    /// assert_eq!(packet.header.dst_port, [2, 3]);
3598    /// assert_eq!(packet.header.length, [4, 5]);
3599    /// assert_eq!(packet.header.checksum, [6, 7]);
3600    /// assert_eq!(packet.body, [8, 9, 10, 11]);
3601    /// ```
3602    #[must_use = "has no side effects"]
3603    #[inline]
3604    fn ref_from_bytes(source: &[u8]) -> Result<&Self, CastError<&[u8], Self>>
3605    where
3606        Self: KnownLayout + Immutable,
3607    {
3608        static_assert_dst_is_not_zst!(Self);
3609        match Ptr::from_ref(source).try_cast_into_no_leftover::<_, BecauseImmutable>(None) {
3610            Ok(ptr) => Ok(ptr.recall_validity().as_ref()),
3611            Err(err) => Err(err.map_src(|src| src.as_ref())),
3612        }
3613    }
3614
3615    /// Interprets the prefix of the given `source` as a `&Self` without
3616    /// copying.
3617    ///
3618    /// This method computes the [largest possible size of `Self`][valid-size]
3619    /// that can fit in the leading bytes of `source`, then attempts to return
3620    /// both a reference to those bytes interpreted as a `Self`, and a reference
3621    /// to the remaining bytes. If there are insufficient bytes, or if `source`
3622    /// is not appropriately aligned, this returns `Err`. If [`Self:
3623    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
3624    /// error][size-error-from].
3625    ///
3626    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
3627    ///
3628    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
3629    /// [self-unaligned]: Unaligned
3630    /// [size-error-from]: error/struct.SizeError.html#method.from-1
3631    /// [slice-dst]: KnownLayout#dynamically-sized-types
3632    ///
3633    /// # Compile-Time Assertions
3634    ///
3635    /// This method cannot yet be used on unsized types whose dynamically-sized
3636    /// component is zero-sized. See [`ref_from_prefix_with_elems`], which does
3637    /// support such types. Attempting to use this method on such types results
3638    /// in a compile-time assertion error; e.g.:
3639    ///
3640    /// ```compile_fail,E0080
3641    /// use zerocopy::*;
3642    /// # use zerocopy_derive::*;
3643    ///
3644    /// #[derive(FromBytes, Immutable, KnownLayout)]
3645    /// #[repr(C)]
3646    /// struct ZSTy {
3647    ///     leading_sized: u16,
3648    ///     trailing_dst: [()],
3649    /// }
3650    ///
3651    /// let _ = ZSTy::ref_from_prefix(0u16.as_bytes()); // âš  Compile Error!
3652    /// ```
3653    ///
3654    /// [`ref_from_prefix_with_elems`]: FromBytes::ref_from_prefix_with_elems
3655    ///
3656    /// # Examples
3657    ///
3658    /// ```
3659    /// use zerocopy::FromBytes;
3660    /// # use zerocopy_derive::*;
3661    ///
3662    /// #[derive(FromBytes, KnownLayout, Immutable)]
3663    /// #[repr(C)]
3664    /// struct PacketHeader {
3665    ///     src_port: [u8; 2],
3666    ///     dst_port: [u8; 2],
3667    ///     length: [u8; 2],
3668    ///     checksum: [u8; 2],
3669    /// }
3670    ///
3671    /// #[derive(FromBytes, KnownLayout, Immutable)]
3672    /// #[repr(C)]
3673    /// struct Packet {
3674    ///     header: PacketHeader,
3675    ///     body: [[u8; 2]],
3676    /// }
3677    ///
3678    /// // These are more bytes than are needed to encode a `Packet`.
3679    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14][..];
3680    ///
3681    /// let (packet, suffix) = Packet::ref_from_prefix(bytes).unwrap();
3682    ///
3683    /// assert_eq!(packet.header.src_port, [0, 1]);
3684    /// assert_eq!(packet.header.dst_port, [2, 3]);
3685    /// assert_eq!(packet.header.length, [4, 5]);
3686    /// assert_eq!(packet.header.checksum, [6, 7]);
3687    /// assert_eq!(packet.body, [[8, 9], [10, 11], [12, 13]]);
3688    /// assert_eq!(suffix, &[14u8][..]);
3689    /// ```
3690    #[must_use = "has no side effects"]
3691    #[inline]
3692    fn ref_from_prefix(source: &[u8]) -> Result<(&Self, &[u8]), CastError<&[u8], Self>>
3693    where
3694        Self: KnownLayout + Immutable,
3695    {
3696        static_assert_dst_is_not_zst!(Self);
3697        ref_from_prefix_suffix(source, None, CastType::Prefix)
3698    }
3699
3700    /// Interprets the suffix of the given bytes as a `&Self`.
3701    ///
3702    /// This method computes the [largest possible size of `Self`][valid-size]
3703    /// that can fit in the trailing bytes of `source`, then attempts to return
3704    /// both a reference to those bytes interpreted as a `Self`, and a reference
3705    /// to the preceding bytes. If there are insufficient bytes, or if that
3706    /// suffix of `source` is not appropriately aligned, this returns `Err`. If
3707    /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
3708    /// alignment error][size-error-from].
3709    ///
3710    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
3711    ///
3712    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
3713    /// [self-unaligned]: Unaligned
3714    /// [size-error-from]: error/struct.SizeError.html#method.from-1
3715    /// [slice-dst]: KnownLayout#dynamically-sized-types
3716    ///
3717    /// # Compile-Time Assertions
3718    ///
3719    /// This method cannot yet be used on unsized types whose dynamically-sized
3720    /// component is zero-sized. See [`ref_from_suffix_with_elems`], which does
3721    /// support such types. Attempting to use this method on such types results
3722    /// in a compile-time assertion error; e.g.:
3723    ///
3724    /// ```compile_fail,E0080
3725    /// use zerocopy::*;
3726    /// # use zerocopy_derive::*;
3727    ///
3728    /// #[derive(FromBytes, Immutable, KnownLayout)]
3729    /// #[repr(C)]
3730    /// struct ZSTy {
3731    ///     leading_sized: u16,
3732    ///     trailing_dst: [()],
3733    /// }
3734    ///
3735    /// let _ = ZSTy::ref_from_suffix(0u16.as_bytes()); // âš  Compile Error!
3736    /// ```
3737    ///
3738    /// [`ref_from_suffix_with_elems`]: FromBytes::ref_from_suffix_with_elems
3739    ///
3740    /// # Examples
3741    ///
3742    /// ```
3743    /// use zerocopy::FromBytes;
3744    /// # use zerocopy_derive::*;
3745    ///
3746    /// #[derive(FromBytes, Immutable, KnownLayout)]
3747    /// #[repr(C)]
3748    /// struct PacketTrailer {
3749    ///     frame_check_sequence: [u8; 4],
3750    /// }
3751    ///
3752    /// // These are more bytes than are needed to encode a `PacketTrailer`.
3753    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
3754    ///
3755    /// let (prefix, trailer) = PacketTrailer::ref_from_suffix(bytes).unwrap();
3756    ///
3757    /// assert_eq!(prefix, &[0, 1, 2, 3, 4, 5][..]);
3758    /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]);
3759    /// ```
3760    #[must_use = "has no side effects"]
3761    #[inline]
3762    fn ref_from_suffix(source: &[u8]) -> Result<(&[u8], &Self), CastError<&[u8], Self>>
3763    where
3764        Self: Immutable + KnownLayout,
3765    {
3766        static_assert_dst_is_not_zst!(Self);
3767        ref_from_prefix_suffix(source, None, CastType::Suffix).map(swap)
3768    }
3769
3770    /// Interprets the given `source` as a `&mut Self`.
3771    ///
3772    /// This method attempts to return a reference to `source` interpreted as a
3773    /// `Self`. If the length of `source` is not a [valid size of
3774    /// `Self`][valid-size], or if `source` is not appropriately aligned, this
3775    /// returns `Err`. If [`Self: Unaligned`][self-unaligned], you can
3776    /// [infallibly discard the alignment error][size-error-from].
3777    ///
3778    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
3779    ///
3780    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
3781    /// [self-unaligned]: Unaligned
3782    /// [size-error-from]: error/struct.SizeError.html#method.from-1
3783    /// [slice-dst]: KnownLayout#dynamically-sized-types
3784    ///
3785    /// # Compile-Time Assertions
3786    ///
3787    /// This method cannot yet be used on unsized types whose dynamically-sized
3788    /// component is zero-sized. See [`mut_from_prefix_with_elems`], which does
3789    /// support such types. Attempting to use this method on such types results
3790    /// in a compile-time assertion error; e.g.:
3791    ///
3792    /// ```compile_fail,E0080
3793    /// use zerocopy::*;
3794    /// # use zerocopy_derive::*;
3795    ///
3796    /// #[derive(FromBytes, Immutable, IntoBytes, KnownLayout)]
3797    /// #[repr(C, packed)]
3798    /// struct ZSTy {
3799    ///     leading_sized: [u8; 2],
3800    ///     trailing_dst: [()],
3801    /// }
3802    ///
3803    /// let mut source = [85, 85];
3804    /// let _ = ZSTy::mut_from_bytes(&mut source[..]); // âš  Compile Error!
3805    /// ```
3806    ///
3807    /// [`mut_from_prefix_with_elems`]: FromBytes::mut_from_prefix_with_elems
3808    ///
3809    /// # Examples
3810    ///
3811    /// ```
3812    /// use zerocopy::FromBytes;
3813    /// # use zerocopy_derive::*;
3814    ///
3815    /// #[derive(FromBytes, IntoBytes, KnownLayout, Immutable)]
3816    /// #[repr(C)]
3817    /// struct PacketHeader {
3818    ///     src_port: [u8; 2],
3819    ///     dst_port: [u8; 2],
3820    ///     length: [u8; 2],
3821    ///     checksum: [u8; 2],
3822    /// }
3823    ///
3824    /// // These bytes encode a `PacketHeader`.
3825    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7][..];
3826    ///
3827    /// let header = PacketHeader::mut_from_bytes(bytes).unwrap();
3828    ///
3829    /// assert_eq!(header.src_port, [0, 1]);
3830    /// assert_eq!(header.dst_port, [2, 3]);
3831    /// assert_eq!(header.length, [4, 5]);
3832    /// assert_eq!(header.checksum, [6, 7]);
3833    ///
3834    /// header.checksum = [0, 0];
3835    ///
3836    /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 0, 0]);
3837    /// ```
3838    #[must_use = "has no side effects"]
3839    #[inline]
3840    fn mut_from_bytes(source: &mut [u8]) -> Result<&mut Self, CastError<&mut [u8], Self>>
3841    where
3842        Self: IntoBytes + KnownLayout,
3843    {
3844        static_assert_dst_is_not_zst!(Self);
3845        match Ptr::from_mut(source).try_cast_into_no_leftover::<_, BecauseExclusive>(None) {
3846            Ok(ptr) => Ok(ptr.recall_validity::<_, (_, (_, _))>().as_mut()),
3847            Err(err) => Err(err.map_src(|src| src.as_mut())),
3848        }
3849    }
3850
3851    /// Interprets the prefix of the given `source` as a `&mut Self` without
3852    /// copying.
3853    ///
3854    /// This method computes the [largest possible size of `Self`][valid-size]
3855    /// that can fit in the leading bytes of `source`, then attempts to return
3856    /// both a reference to those bytes interpreted as a `Self`, and a reference
3857    /// to the remaining bytes. If there are insufficient bytes, or if `source`
3858    /// is not appropriately aligned, this returns `Err`. If [`Self:
3859    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
3860    /// error][size-error-from].
3861    ///
3862    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
3863    ///
3864    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
3865    /// [self-unaligned]: Unaligned
3866    /// [size-error-from]: error/struct.SizeError.html#method.from-1
3867    /// [slice-dst]: KnownLayout#dynamically-sized-types
3868    ///
3869    /// # Compile-Time Assertions
3870    ///
3871    /// This method cannot yet be used on unsized types whose dynamically-sized
3872    /// component is zero-sized. See [`mut_from_suffix_with_elems`], which does
3873    /// support such types. Attempting to use this method on such types results
3874    /// in a compile-time assertion error; e.g.:
3875    ///
3876    /// ```compile_fail,E0080
3877    /// use zerocopy::*;
3878    /// # use zerocopy_derive::*;
3879    ///
3880    /// #[derive(FromBytes, Immutable, IntoBytes, KnownLayout)]
3881    /// #[repr(C, packed)]
3882    /// struct ZSTy {
3883    ///     leading_sized: [u8; 2],
3884    ///     trailing_dst: [()],
3885    /// }
3886    ///
3887    /// let mut source = [85, 85];
3888    /// let _ = ZSTy::mut_from_prefix(&mut source[..]); // âš  Compile Error!
3889    /// ```
3890    ///
3891    /// [`mut_from_suffix_with_elems`]: FromBytes::mut_from_suffix_with_elems
3892    ///
3893    /// # Examples
3894    ///
3895    /// ```
3896    /// use zerocopy::FromBytes;
3897    /// # use zerocopy_derive::*;
3898    ///
3899    /// #[derive(FromBytes, IntoBytes, KnownLayout, Immutable)]
3900    /// #[repr(C)]
3901    /// struct PacketHeader {
3902    ///     src_port: [u8; 2],
3903    ///     dst_port: [u8; 2],
3904    ///     length: [u8; 2],
3905    ///     checksum: [u8; 2],
3906    /// }
3907    ///
3908    /// // These are more bytes than are needed to encode a `PacketHeader`.
3909    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
3910    ///
3911    /// let (header, body) = PacketHeader::mut_from_prefix(bytes).unwrap();
3912    ///
3913    /// assert_eq!(header.src_port, [0, 1]);
3914    /// assert_eq!(header.dst_port, [2, 3]);
3915    /// assert_eq!(header.length, [4, 5]);
3916    /// assert_eq!(header.checksum, [6, 7]);
3917    /// assert_eq!(body, &[8, 9][..]);
3918    ///
3919    /// header.checksum = [0, 0];
3920    /// body.fill(1);
3921    ///
3922    /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 0, 0, 1, 1]);
3923    /// ```
3924    #[must_use = "has no side effects"]
3925    #[inline]
3926    fn mut_from_prefix(
3927        source: &mut [u8],
3928    ) -> Result<(&mut Self, &mut [u8]), CastError<&mut [u8], Self>>
3929    where
3930        Self: IntoBytes + KnownLayout,
3931    {
3932        static_assert_dst_is_not_zst!(Self);
3933        mut_from_prefix_suffix(source, None, CastType::Prefix)
3934    }
3935
3936    /// Interprets the suffix of the given `source` as a `&mut Self` without
3937    /// copying.
3938    ///
3939    /// This method computes the [largest possible size of `Self`][valid-size]
3940    /// that can fit in the trailing bytes of `source`, then attempts to return
3941    /// both a reference to those bytes interpreted as a `Self`, and a reference
3942    /// to the preceding bytes. If there are insufficient bytes, or if that
3943    /// suffix of `source` is not appropriately aligned, this returns `Err`. If
3944    /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
3945    /// alignment error][size-error-from].
3946    ///
3947    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
3948    ///
3949    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
3950    /// [self-unaligned]: Unaligned
3951    /// [size-error-from]: error/struct.SizeError.html#method.from-1
3952    /// [slice-dst]: KnownLayout#dynamically-sized-types
3953    ///
3954    /// # Compile-Time Assertions
3955    ///
3956    /// This method cannot yet be used on unsized types whose dynamically-sized
3957    /// component is zero-sized. Attempting to use this method on such types
3958    /// results in a compile-time assertion error; e.g.:
3959    ///
3960    /// ```compile_fail,E0080
3961    /// use zerocopy::*;
3962    /// # use zerocopy_derive::*;
3963    ///
3964    /// #[derive(FromBytes, Immutable, IntoBytes, KnownLayout)]
3965    /// #[repr(C, packed)]
3966    /// struct ZSTy {
3967    ///     leading_sized: [u8; 2],
3968    ///     trailing_dst: [()],
3969    /// }
3970    ///
3971    /// let mut source = [85, 85];
3972    /// let _ = ZSTy::mut_from_suffix(&mut source[..]); // âš  Compile Error!
3973    /// ```
3974    ///
3975    /// # Examples
3976    ///
3977    /// ```
3978    /// use zerocopy::FromBytes;
3979    /// # use zerocopy_derive::*;
3980    ///
3981    /// #[derive(FromBytes, IntoBytes, KnownLayout, Immutable)]
3982    /// #[repr(C)]
3983    /// struct PacketTrailer {
3984    ///     frame_check_sequence: [u8; 4],
3985    /// }
3986    ///
3987    /// // These are more bytes than are needed to encode a `PacketTrailer`.
3988    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
3989    ///
3990    /// let (prefix, trailer) = PacketTrailer::mut_from_suffix(bytes).unwrap();
3991    ///
3992    /// assert_eq!(prefix, &[0u8, 1, 2, 3, 4, 5][..]);
3993    /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]);
3994    ///
3995    /// prefix.fill(0);
3996    /// trailer.frame_check_sequence.fill(1);
3997    ///
3998    /// assert_eq!(bytes, [0, 0, 0, 0, 0, 0, 1, 1, 1, 1]);
3999    /// ```
4000    #[must_use = "has no side effects"]
4001    #[inline]
4002    fn mut_from_suffix(
4003        source: &mut [u8],
4004    ) -> Result<(&mut [u8], &mut Self), CastError<&mut [u8], Self>>
4005    where
4006        Self: IntoBytes + KnownLayout,
4007    {
4008        static_assert_dst_is_not_zst!(Self);
4009        mut_from_prefix_suffix(source, None, CastType::Suffix).map(swap)
4010    }
4011
4012    /// Interprets the given `source` as a `&Self` with a DST length equal to
4013    /// `count`.
4014    ///
4015    /// This method attempts to return a reference to `source` interpreted as a
4016    /// `Self` with `count` trailing elements. If the length of `source` is not
4017    /// equal to the size of `Self` with `count` elements, or if `source` is not
4018    /// appropriately aligned, this returns `Err`. If [`Self:
4019    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
4020    /// error][size-error-from].
4021    ///
4022    /// [self-unaligned]: Unaligned
4023    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4024    ///
4025    /// # Examples
4026    ///
4027    /// ```
4028    /// use zerocopy::FromBytes;
4029    /// # use zerocopy_derive::*;
4030    ///
4031    /// # #[derive(Debug, PartialEq, Eq)]
4032    /// #[derive(FromBytes, Immutable)]
4033    /// #[repr(C)]
4034    /// struct Pixel {
4035    ///     r: u8,
4036    ///     g: u8,
4037    ///     b: u8,
4038    ///     a: u8,
4039    /// }
4040    ///
4041    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7][..];
4042    ///
4043    /// let pixels = <[Pixel]>::ref_from_bytes_with_elems(bytes, 2).unwrap();
4044    ///
4045    /// assert_eq!(pixels, &[
4046    ///     Pixel { r: 0, g: 1, b: 2, a: 3 },
4047    ///     Pixel { r: 4, g: 5, b: 6, a: 7 },
4048    /// ]);
4049    ///
4050    /// ```
4051    ///
4052    /// Since an explicit `count` is provided, this method supports types with
4053    /// zero-sized trailing slice elements. Methods such as [`ref_from_bytes`]
4054    /// which do not take an explicit count do not support such types.
4055    ///
4056    /// ```
4057    /// use zerocopy::*;
4058    /// # use zerocopy_derive::*;
4059    ///
4060    /// #[derive(FromBytes, Immutable, KnownLayout)]
4061    /// #[repr(C)]
4062    /// struct ZSTy {
4063    ///     leading_sized: [u8; 2],
4064    ///     trailing_dst: [()],
4065    /// }
4066    ///
4067    /// let src = &[85, 85][..];
4068    /// let zsty = ZSTy::ref_from_bytes_with_elems(src, 42).unwrap();
4069    /// assert_eq!(zsty.trailing_dst.len(), 42);
4070    /// ```
4071    ///
4072    /// [`ref_from_bytes`]: FromBytes::ref_from_bytes
4073    #[must_use = "has no side effects"]
4074    #[inline]
4075    fn ref_from_bytes_with_elems(
4076        source: &[u8],
4077        count: usize,
4078    ) -> Result<&Self, CastError<&[u8], Self>>
4079    where
4080        Self: KnownLayout<PointerMetadata = usize> + Immutable,
4081    {
4082        let source = Ptr::from_ref(source);
4083        let maybe_slf = source.try_cast_into_no_leftover::<_, BecauseImmutable>(Some(count));
4084        match maybe_slf {
4085            Ok(slf) => Ok(slf.recall_validity().as_ref()),
4086            Err(err) => Err(err.map_src(|s| s.as_ref())),
4087        }
4088    }
4089
4090    /// Interprets the prefix of the given `source` as a DST `&Self` with length
4091    /// equal to `count`.
4092    ///
4093    /// This method attempts to return a reference to the prefix of `source`
4094    /// interpreted as a `Self` with `count` trailing elements, and a reference
4095    /// to the remaining bytes. If there are insufficient bytes, or if `source`
4096    /// is not appropriately aligned, this returns `Err`. If [`Self:
4097    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
4098    /// error][size-error-from].
4099    ///
4100    /// [self-unaligned]: Unaligned
4101    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4102    ///
4103    /// # Examples
4104    ///
4105    /// ```
4106    /// use zerocopy::FromBytes;
4107    /// # use zerocopy_derive::*;
4108    ///
4109    /// # #[derive(Debug, PartialEq, Eq)]
4110    /// #[derive(FromBytes, Immutable)]
4111    /// #[repr(C)]
4112    /// struct Pixel {
4113    ///     r: u8,
4114    ///     g: u8,
4115    ///     b: u8,
4116    ///     a: u8,
4117    /// }
4118    ///
4119    /// // These are more bytes than are needed to encode two `Pixel`s.
4120    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4121    ///
4122    /// let (pixels, suffix) = <[Pixel]>::ref_from_prefix_with_elems(bytes, 2).unwrap();
4123    ///
4124    /// assert_eq!(pixels, &[
4125    ///     Pixel { r: 0, g: 1, b: 2, a: 3 },
4126    ///     Pixel { r: 4, g: 5, b: 6, a: 7 },
4127    /// ]);
4128    ///
4129    /// assert_eq!(suffix, &[8, 9]);
4130    /// ```
4131    ///
4132    /// Since an explicit `count` is provided, this method supports types with
4133    /// zero-sized trailing slice elements. Methods such as [`ref_from_prefix`]
4134    /// which do not take an explicit count do not support such types.
4135    ///
4136    /// ```
4137    /// use zerocopy::*;
4138    /// # use zerocopy_derive::*;
4139    ///
4140    /// #[derive(FromBytes, Immutable, KnownLayout)]
4141    /// #[repr(C)]
4142    /// struct ZSTy {
4143    ///     leading_sized: [u8; 2],
4144    ///     trailing_dst: [()],
4145    /// }
4146    ///
4147    /// let src = &[85, 85][..];
4148    /// let (zsty, _) = ZSTy::ref_from_prefix_with_elems(src, 42).unwrap();
4149    /// assert_eq!(zsty.trailing_dst.len(), 42);
4150    /// ```
4151    ///
4152    /// [`ref_from_prefix`]: FromBytes::ref_from_prefix
4153    #[must_use = "has no side effects"]
4154    #[inline]
4155    fn ref_from_prefix_with_elems(
4156        source: &[u8],
4157        count: usize,
4158    ) -> Result<(&Self, &[u8]), CastError<&[u8], Self>>
4159    where
4160        Self: KnownLayout<PointerMetadata = usize> + Immutable,
4161    {
4162        ref_from_prefix_suffix(source, Some(count), CastType::Prefix)
4163    }
4164
4165    /// Interprets the suffix of the given `source` as a DST `&Self` with length
4166    /// equal to `count`.
4167    ///
4168    /// This method attempts to return a reference to the suffix of `source`
4169    /// interpreted as a `Self` with `count` trailing elements, and a reference
4170    /// to the preceding bytes. If there are insufficient bytes, or if that
4171    /// suffix of `source` is not appropriately aligned, this returns `Err`. If
4172    /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
4173    /// alignment error][size-error-from].
4174    ///
4175    /// [self-unaligned]: Unaligned
4176    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4177    ///
4178    /// # Examples
4179    ///
4180    /// ```
4181    /// use zerocopy::FromBytes;
4182    /// # use zerocopy_derive::*;
4183    ///
4184    /// # #[derive(Debug, PartialEq, Eq)]
4185    /// #[derive(FromBytes, Immutable)]
4186    /// #[repr(C)]
4187    /// struct Pixel {
4188    ///     r: u8,
4189    ///     g: u8,
4190    ///     b: u8,
4191    ///     a: u8,
4192    /// }
4193    ///
4194    /// // These are more bytes than are needed to encode two `Pixel`s.
4195    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4196    ///
4197    /// let (prefix, pixels) = <[Pixel]>::ref_from_suffix_with_elems(bytes, 2).unwrap();
4198    ///
4199    /// assert_eq!(prefix, &[0, 1]);
4200    ///
4201    /// assert_eq!(pixels, &[
4202    ///     Pixel { r: 2, g: 3, b: 4, a: 5 },
4203    ///     Pixel { r: 6, g: 7, b: 8, a: 9 },
4204    /// ]);
4205    /// ```
4206    ///
4207    /// Since an explicit `count` is provided, this method supports types with
4208    /// zero-sized trailing slice elements. Methods such as [`ref_from_suffix`]
4209    /// which do not take an explicit count do not support such types.
4210    ///
4211    /// ```
4212    /// use zerocopy::*;
4213    /// # use zerocopy_derive::*;
4214    ///
4215    /// #[derive(FromBytes, Immutable, KnownLayout)]
4216    /// #[repr(C)]
4217    /// struct ZSTy {
4218    ///     leading_sized: [u8; 2],
4219    ///     trailing_dst: [()],
4220    /// }
4221    ///
4222    /// let src = &[85, 85][..];
4223    /// let (_, zsty) = ZSTy::ref_from_suffix_with_elems(src, 42).unwrap();
4224    /// assert_eq!(zsty.trailing_dst.len(), 42);
4225    /// ```
4226    ///
4227    /// [`ref_from_suffix`]: FromBytes::ref_from_suffix
4228    #[must_use = "has no side effects"]
4229    #[inline]
4230    fn ref_from_suffix_with_elems(
4231        source: &[u8],
4232        count: usize,
4233    ) -> Result<(&[u8], &Self), CastError<&[u8], Self>>
4234    where
4235        Self: KnownLayout<PointerMetadata = usize> + Immutable,
4236    {
4237        ref_from_prefix_suffix(source, Some(count), CastType::Suffix).map(swap)
4238    }
4239
4240    /// Interprets the given `source` as a `&mut Self` with a DST length equal
4241    /// to `count`.
4242    ///
4243    /// This method attempts to return a reference to `source` interpreted as a
4244    /// `Self` with `count` trailing elements. If the length of `source` is not
4245    /// equal to the size of `Self` with `count` elements, or if `source` is not
4246    /// appropriately aligned, this returns `Err`. If [`Self:
4247    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
4248    /// error][size-error-from].
4249    ///
4250    /// [self-unaligned]: Unaligned
4251    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4252    ///
4253    /// # Examples
4254    ///
4255    /// ```
4256    /// use zerocopy::FromBytes;
4257    /// # use zerocopy_derive::*;
4258    ///
4259    /// # #[derive(Debug, PartialEq, Eq)]
4260    /// #[derive(KnownLayout, FromBytes, IntoBytes, Immutable)]
4261    /// #[repr(C)]
4262    /// struct Pixel {
4263    ///     r: u8,
4264    ///     g: u8,
4265    ///     b: u8,
4266    ///     a: u8,
4267    /// }
4268    ///
4269    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7][..];
4270    ///
4271    /// let pixels = <[Pixel]>::mut_from_bytes_with_elems(bytes, 2).unwrap();
4272    ///
4273    /// assert_eq!(pixels, &[
4274    ///     Pixel { r: 0, g: 1, b: 2, a: 3 },
4275    ///     Pixel { r: 4, g: 5, b: 6, a: 7 },
4276    /// ]);
4277    ///
4278    /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 };
4279    ///
4280    /// assert_eq!(bytes, [0, 1, 2, 3, 0, 0, 0, 0]);
4281    /// ```
4282    ///
4283    /// Since an explicit `count` is provided, this method supports types with
4284    /// zero-sized trailing slice elements. Methods such as [`mut_from`] which
4285    /// do not take an explicit count do not support such types.
4286    ///
4287    /// ```
4288    /// use zerocopy::*;
4289    /// # use zerocopy_derive::*;
4290    ///
4291    /// #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)]
4292    /// #[repr(C, packed)]
4293    /// struct ZSTy {
4294    ///     leading_sized: [u8; 2],
4295    ///     trailing_dst: [()],
4296    /// }
4297    ///
4298    /// let src = &mut [85, 85][..];
4299    /// let zsty = ZSTy::mut_from_bytes_with_elems(src, 42).unwrap();
4300    /// assert_eq!(zsty.trailing_dst.len(), 42);
4301    /// ```
4302    ///
4303    /// [`mut_from`]: FromBytes::mut_from
4304    #[must_use = "has no side effects"]
4305    #[inline]
4306    fn mut_from_bytes_with_elems(
4307        source: &mut [u8],
4308        count: usize,
4309    ) -> Result<&mut Self, CastError<&mut [u8], Self>>
4310    where
4311        Self: IntoBytes + KnownLayout<PointerMetadata = usize> + Immutable,
4312    {
4313        let source = Ptr::from_mut(source);
4314        let maybe_slf = source.try_cast_into_no_leftover::<_, BecauseImmutable>(Some(count));
4315        match maybe_slf {
4316            Ok(slf) => Ok(slf
4317                .recall_validity::<_, (_, (_, (BecauseExclusive, BecauseExclusive)))>()
4318                .as_mut()),
4319            Err(err) => Err(err.map_src(|s| s.as_mut())),
4320        }
4321    }
4322
4323    /// Interprets the prefix of the given `source` as a `&mut Self` with DST
4324    /// length equal to `count`.
4325    ///
4326    /// This method attempts to return a reference to the prefix of `source`
4327    /// interpreted as a `Self` with `count` trailing elements, and a reference
4328    /// to the preceding bytes. If there are insufficient bytes, or if `source`
4329    /// is not appropriately aligned, this returns `Err`. If [`Self:
4330    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
4331    /// error][size-error-from].
4332    ///
4333    /// [self-unaligned]: Unaligned
4334    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4335    ///
4336    /// # Examples
4337    ///
4338    /// ```
4339    /// use zerocopy::FromBytes;
4340    /// # use zerocopy_derive::*;
4341    ///
4342    /// # #[derive(Debug, PartialEq, Eq)]
4343    /// #[derive(KnownLayout, FromBytes, IntoBytes, Immutable)]
4344    /// #[repr(C)]
4345    /// struct Pixel {
4346    ///     r: u8,
4347    ///     g: u8,
4348    ///     b: u8,
4349    ///     a: u8,
4350    /// }
4351    ///
4352    /// // These are more bytes than are needed to encode two `Pixel`s.
4353    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4354    ///
4355    /// let (pixels, suffix) = <[Pixel]>::mut_from_prefix_with_elems(bytes, 2).unwrap();
4356    ///
4357    /// assert_eq!(pixels, &[
4358    ///     Pixel { r: 0, g: 1, b: 2, a: 3 },
4359    ///     Pixel { r: 4, g: 5, b: 6, a: 7 },
4360    /// ]);
4361    ///
4362    /// assert_eq!(suffix, &[8, 9]);
4363    ///
4364    /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 };
4365    /// suffix.fill(1);
4366    ///
4367    /// assert_eq!(bytes, [0, 1, 2, 3, 0, 0, 0, 0, 1, 1]);
4368    /// ```
4369    ///
4370    /// Since an explicit `count` is provided, this method supports types with
4371    /// zero-sized trailing slice elements. Methods such as [`mut_from_prefix`]
4372    /// which do not take an explicit count do not support such types.
4373    ///
4374    /// ```
4375    /// use zerocopy::*;
4376    /// # use zerocopy_derive::*;
4377    ///
4378    /// #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)]
4379    /// #[repr(C, packed)]
4380    /// struct ZSTy {
4381    ///     leading_sized: [u8; 2],
4382    ///     trailing_dst: [()],
4383    /// }
4384    ///
4385    /// let src = &mut [85, 85][..];
4386    /// let (zsty, _) = ZSTy::mut_from_prefix_with_elems(src, 42).unwrap();
4387    /// assert_eq!(zsty.trailing_dst.len(), 42);
4388    /// ```
4389    ///
4390    /// [`mut_from_prefix`]: FromBytes::mut_from_prefix
4391    #[must_use = "has no side effects"]
4392    #[inline]
4393    fn mut_from_prefix_with_elems(
4394        source: &mut [u8],
4395        count: usize,
4396    ) -> Result<(&mut Self, &mut [u8]), CastError<&mut [u8], Self>>
4397    where
4398        Self: IntoBytes + KnownLayout<PointerMetadata = usize>,
4399    {
4400        mut_from_prefix_suffix(source, Some(count), CastType::Prefix)
4401    }
4402
4403    /// Interprets the suffix of the given `source` as a `&mut Self` with DST
4404    /// length equal to `count`.
4405    ///
4406    /// This method attempts to return a reference to the suffix of `source`
4407    /// interpreted as a `Self` with `count` trailing elements, and a reference
4408    /// to the remaining bytes. If there are insufficient bytes, or if that
4409    /// suffix of `source` is not appropriately aligned, this returns `Err`. If
4410    /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
4411    /// alignment error][size-error-from].
4412    ///
4413    /// [self-unaligned]: Unaligned
4414    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4415    ///
4416    /// # Examples
4417    ///
4418    /// ```
4419    /// use zerocopy::FromBytes;
4420    /// # use zerocopy_derive::*;
4421    ///
4422    /// # #[derive(Debug, PartialEq, Eq)]
4423    /// #[derive(FromBytes, IntoBytes, Immutable)]
4424    /// #[repr(C)]
4425    /// struct Pixel {
4426    ///     r: u8,
4427    ///     g: u8,
4428    ///     b: u8,
4429    ///     a: u8,
4430    /// }
4431    ///
4432    /// // These are more bytes than are needed to encode two `Pixel`s.
4433    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4434    ///
4435    /// let (prefix, pixels) = <[Pixel]>::mut_from_suffix_with_elems(bytes, 2).unwrap();
4436    ///
4437    /// assert_eq!(prefix, &[0, 1]);
4438    ///
4439    /// assert_eq!(pixels, &[
4440    ///     Pixel { r: 2, g: 3, b: 4, a: 5 },
4441    ///     Pixel { r: 6, g: 7, b: 8, a: 9 },
4442    /// ]);
4443    ///
4444    /// prefix.fill(9);
4445    /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 };
4446    ///
4447    /// assert_eq!(bytes, [9, 9, 2, 3, 4, 5, 0, 0, 0, 0]);
4448    /// ```
4449    ///
4450    /// Since an explicit `count` is provided, this method supports types with
4451    /// zero-sized trailing slice elements. Methods such as [`mut_from_suffix`]
4452    /// which do not take an explicit count do not support such types.
4453    ///
4454    /// ```
4455    /// use zerocopy::*;
4456    /// # use zerocopy_derive::*;
4457    ///
4458    /// #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)]
4459    /// #[repr(C, packed)]
4460    /// struct ZSTy {
4461    ///     leading_sized: [u8; 2],
4462    ///     trailing_dst: [()],
4463    /// }
4464    ///
4465    /// let src = &mut [85, 85][..];
4466    /// let (_, zsty) = ZSTy::mut_from_suffix_with_elems(src, 42).unwrap();
4467    /// assert_eq!(zsty.trailing_dst.len(), 42);
4468    /// ```
4469    ///
4470    /// [`mut_from_suffix`]: FromBytes::mut_from_suffix
4471    #[must_use = "has no side effects"]
4472    #[inline]
4473    fn mut_from_suffix_with_elems(
4474        source: &mut [u8],
4475        count: usize,
4476    ) -> Result<(&mut [u8], &mut Self), CastError<&mut [u8], Self>>
4477    where
4478        Self: IntoBytes + KnownLayout<PointerMetadata = usize>,
4479    {
4480        mut_from_prefix_suffix(source, Some(count), CastType::Suffix).map(swap)
4481    }
4482
4483    /// Reads a copy of `Self` from the given `source`.
4484    ///
4485    /// If `source.len() != size_of::<Self>()`, `read_from_bytes` returns `Err`.
4486    ///
4487    /// # Examples
4488    ///
4489    /// ```
4490    /// use zerocopy::FromBytes;
4491    /// # use zerocopy_derive::*;
4492    ///
4493    /// #[derive(FromBytes)]
4494    /// #[repr(C)]
4495    /// struct PacketHeader {
4496    ///     src_port: [u8; 2],
4497    ///     dst_port: [u8; 2],
4498    ///     length: [u8; 2],
4499    ///     checksum: [u8; 2],
4500    /// }
4501    ///
4502    /// // These bytes encode a `PacketHeader`.
4503    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7][..];
4504    ///
4505    /// let header = PacketHeader::read_from_bytes(bytes).unwrap();
4506    ///
4507    /// assert_eq!(header.src_port, [0, 1]);
4508    /// assert_eq!(header.dst_port, [2, 3]);
4509    /// assert_eq!(header.length, [4, 5]);
4510    /// assert_eq!(header.checksum, [6, 7]);
4511    /// ```
4512    #[must_use = "has no side effects"]
4513    #[inline]
4514    fn read_from_bytes(source: &[u8]) -> Result<Self, SizeError<&[u8], Self>>
4515    where
4516        Self: Sized,
4517    {
4518        match Ref::<_, Unalign<Self>>::sized_from(source) {
4519            Ok(r) => Ok(Ref::read(&r).into_inner()),
4520            Err(CastError::Size(e)) => Err(e.with_dst()),
4521            Err(CastError::Alignment(_)) => {
4522                // SAFETY: `Unalign<Self>` is trivially aligned, so
4523                // `Ref::sized_from` cannot fail due to unmet alignment
4524                // requirements.
4525                unsafe { core::hint::unreachable_unchecked() }
4526            }
4527            Err(CastError::Validity(i)) => match i {},
4528        }
4529    }
4530
4531    /// Reads a copy of `Self` from the prefix of the given `source`.
4532    ///
4533    /// This attempts to read a `Self` from the first `size_of::<Self>()` bytes
4534    /// of `source`, returning that `Self` and any remaining bytes. If
4535    /// `source.len() < size_of::<Self>()`, it returns `Err`.
4536    ///
4537    /// # Examples
4538    ///
4539    /// ```
4540    /// use zerocopy::FromBytes;
4541    /// # use zerocopy_derive::*;
4542    ///
4543    /// #[derive(FromBytes)]
4544    /// #[repr(C)]
4545    /// struct PacketHeader {
4546    ///     src_port: [u8; 2],
4547    ///     dst_port: [u8; 2],
4548    ///     length: [u8; 2],
4549    ///     checksum: [u8; 2],
4550    /// }
4551    ///
4552    /// // These are more bytes than are needed to encode a `PacketHeader`.
4553    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4554    ///
4555    /// let (header, body) = PacketHeader::read_from_prefix(bytes).unwrap();
4556    ///
4557    /// assert_eq!(header.src_port, [0, 1]);
4558    /// assert_eq!(header.dst_port, [2, 3]);
4559    /// assert_eq!(header.length, [4, 5]);
4560    /// assert_eq!(header.checksum, [6, 7]);
4561    /// assert_eq!(body, [8, 9]);
4562    /// ```
4563    #[must_use = "has no side effects"]
4564    #[inline]
4565    fn read_from_prefix(source: &[u8]) -> Result<(Self, &[u8]), SizeError<&[u8], Self>>
4566    where
4567        Self: Sized,
4568    {
4569        match Ref::<_, Unalign<Self>>::sized_from_prefix(source) {
4570            Ok((r, suffix)) => Ok((Ref::read(&r).into_inner(), suffix)),
4571            Err(CastError::Size(e)) => Err(e.with_dst()),
4572            Err(CastError::Alignment(_)) => {
4573                // SAFETY: `Unalign<Self>` is trivially aligned, so
4574                // `Ref::sized_from_prefix` cannot fail due to unmet alignment
4575                // requirements.
4576                unsafe { core::hint::unreachable_unchecked() }
4577            }
4578            Err(CastError::Validity(i)) => match i {},
4579        }
4580    }
4581
4582    /// Reads a copy of `Self` from the suffix of the given `source`.
4583    ///
4584    /// This attempts to read a `Self` from the last `size_of::<Self>()` bytes
4585    /// of `source`, returning that `Self` and any preceding bytes. If
4586    /// `source.len() < size_of::<Self>()`, it returns `Err`.
4587    ///
4588    /// # Examples
4589    ///
4590    /// ```
4591    /// use zerocopy::FromBytes;
4592    /// # use zerocopy_derive::*;
4593    ///
4594    /// #[derive(FromBytes)]
4595    /// #[repr(C)]
4596    /// struct PacketTrailer {
4597    ///     frame_check_sequence: [u8; 4],
4598    /// }
4599    ///
4600    /// // These are more bytes than are needed to encode a `PacketTrailer`.
4601    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4602    ///
4603    /// let (prefix, trailer) = PacketTrailer::read_from_suffix(bytes).unwrap();
4604    ///
4605    /// assert_eq!(prefix, [0, 1, 2, 3, 4, 5]);
4606    /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]);
4607    /// ```
4608    #[must_use = "has no side effects"]
4609    #[inline]
4610    fn read_from_suffix(source: &[u8]) -> Result<(&[u8], Self), SizeError<&[u8], Self>>
4611    where
4612        Self: Sized,
4613    {
4614        match Ref::<_, Unalign<Self>>::sized_from_suffix(source) {
4615            Ok((prefix, r)) => Ok((prefix, Ref::read(&r).into_inner())),
4616            Err(CastError::Size(e)) => Err(e.with_dst()),
4617            Err(CastError::Alignment(_)) => {
4618                // SAFETY: `Unalign<Self>` is trivially aligned, so
4619                // `Ref::sized_from_suffix` cannot fail due to unmet alignment
4620                // requirements.
4621                unsafe { core::hint::unreachable_unchecked() }
4622            }
4623            Err(CastError::Validity(i)) => match i {},
4624        }
4625    }
4626
4627    /// Reads a copy of `self` from an `io::Read`.
4628    ///
4629    /// This is useful for interfacing with operating system byte sinks (files,
4630    /// sockets, etc.).
4631    ///
4632    /// # Examples
4633    ///
4634    /// ```no_run
4635    /// use zerocopy::{byteorder::big_endian::*, FromBytes};
4636    /// use std::fs::File;
4637    /// # use zerocopy_derive::*;
4638    ///
4639    /// #[derive(FromBytes)]
4640    /// #[repr(C)]
4641    /// struct BitmapFileHeader {
4642    ///     signature: [u8; 2],
4643    ///     size: U32,
4644    ///     reserved: U64,
4645    ///     offset: U64,
4646    /// }
4647    ///
4648    /// let mut file = File::open("image.bin").unwrap();
4649    /// let header = BitmapFileHeader::read_from_io(&mut file).unwrap();
4650    /// ```
4651    #[cfg(feature = "std")]
4652    #[inline(always)]
4653    fn read_from_io<R>(mut src: R) -> io::Result<Self>
4654    where
4655        Self: Sized,
4656        R: io::Read,
4657    {
4658        // NOTE(#2319, #2320): We do `buf.zero()` separately rather than
4659        // constructing `let buf = CoreMaybeUninit::zeroed()` because, if `Self`
4660        // contains padding bytes, then a typed copy of `CoreMaybeUninit<Self>`
4661        // will not necessarily preserve zeros written to those padding byte
4662        // locations, and so `buf` could contain uninitialized bytes.
4663        let mut buf = CoreMaybeUninit::<Self>::uninit();
4664        buf.zero();
4665
4666        let ptr = Ptr::from_mut(&mut buf);
4667        // SAFETY: After `buf.zero()`, `buf` consists entirely of initialized,
4668        // zeroed bytes. Since `MaybeUninit` has no validity requirements, `ptr`
4669        // cannot be used to write values which will violate `buf`'s bit
4670        // validity. Since `ptr` has `Exclusive` aliasing, nothing other than
4671        // `ptr` may be used to mutate `ptr`'s referent, and so its bit validity
4672        // cannot be violated even though `buf` may have more permissive bit
4673        // validity than `ptr`.
4674        let ptr = unsafe { ptr.assume_validity::<invariant::Initialized>() };
4675        let ptr = ptr.as_bytes::<BecauseExclusive>();
4676        src.read_exact(ptr.as_mut())?;
4677        // SAFETY: `buf` entirely consists of initialized bytes, and `Self` is
4678        // `FromBytes`.
4679        Ok(unsafe { buf.assume_init() })
4680    }
4681
4682    #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::ref_from_bytes`")]
4683    #[doc(hidden)]
4684    #[must_use = "has no side effects"]
4685    #[inline(always)]
4686    fn ref_from(source: &[u8]) -> Option<&Self>
4687    where
4688        Self: KnownLayout + Immutable,
4689    {
4690        Self::ref_from_bytes(source).ok()
4691    }
4692
4693    #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::mut_from_bytes`")]
4694    #[doc(hidden)]
4695    #[must_use = "has no side effects"]
4696    #[inline(always)]
4697    fn mut_from(source: &mut [u8]) -> Option<&mut Self>
4698    where
4699        Self: KnownLayout + IntoBytes,
4700    {
4701        Self::mut_from_bytes(source).ok()
4702    }
4703
4704    #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::ref_from_prefix_with_elems`")]
4705    #[doc(hidden)]
4706    #[must_use = "has no side effects"]
4707    #[inline(always)]
4708    fn slice_from_prefix(source: &[u8], count: usize) -> Option<(&[Self], &[u8])>
4709    where
4710        Self: Sized + Immutable,
4711    {
4712        <[Self]>::ref_from_prefix_with_elems(source, count).ok()
4713    }
4714
4715    #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::ref_from_suffix_with_elems`")]
4716    #[doc(hidden)]
4717    #[must_use = "has no side effects"]
4718    #[inline(always)]
4719    fn slice_from_suffix(source: &[u8], count: usize) -> Option<(&[u8], &[Self])>
4720    where
4721        Self: Sized + Immutable,
4722    {
4723        <[Self]>::ref_from_suffix_with_elems(source, count).ok()
4724    }
4725
4726    #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::mut_from_prefix_with_elems`")]
4727    #[doc(hidden)]
4728    #[must_use = "has no side effects"]
4729    #[inline(always)]
4730    fn mut_slice_from_prefix(source: &mut [u8], count: usize) -> Option<(&mut [Self], &mut [u8])>
4731    where
4732        Self: Sized + IntoBytes,
4733    {
4734        <[Self]>::mut_from_prefix_with_elems(source, count).ok()
4735    }
4736
4737    #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::mut_from_suffix_with_elems`")]
4738    #[doc(hidden)]
4739    #[must_use = "has no side effects"]
4740    #[inline(always)]
4741    fn mut_slice_from_suffix(source: &mut [u8], count: usize) -> Option<(&mut [u8], &mut [Self])>
4742    where
4743        Self: Sized + IntoBytes,
4744    {
4745        <[Self]>::mut_from_suffix_with_elems(source, count).ok()
4746    }
4747
4748    #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::read_from_bytes`")]
4749    #[doc(hidden)]
4750    #[must_use = "has no side effects"]
4751    #[inline(always)]
4752    fn read_from(source: &[u8]) -> Option<Self>
4753    where
4754        Self: Sized,
4755    {
4756        Self::read_from_bytes(source).ok()
4757    }
4758}
4759
4760/// Interprets the given affix of the given bytes as a `&Self`.
4761///
4762/// This method computes the largest possible size of `Self` that can fit in the
4763/// prefix or suffix bytes of `source`, then attempts to return both a reference
4764/// to those bytes interpreted as a `Self`, and a reference to the excess bytes.
4765/// If there are insufficient bytes, or if that affix of `source` is not
4766/// appropriately aligned, this returns `Err`.
4767#[inline(always)]
4768fn ref_from_prefix_suffix<T: FromBytes + KnownLayout + Immutable + ?Sized>(
4769    source: &[u8],
4770    meta: Option<T::PointerMetadata>,
4771    cast_type: CastType,
4772) -> Result<(&T, &[u8]), CastError<&[u8], T>> {
4773    let (slf, prefix_suffix) = Ptr::from_ref(source)
4774        .try_cast_into::<_, BecauseImmutable>(cast_type, meta)
4775        .map_err(|err| err.map_src(|s| s.as_ref()))?;
4776    Ok((slf.recall_validity().as_ref(), prefix_suffix.as_ref()))
4777}
4778
4779/// Interprets the given affix of the given bytes as a `&mut Self` without
4780/// copying.
4781///
4782/// This method computes the largest possible size of `Self` that can fit in the
4783/// prefix or suffix bytes of `source`, then attempts to return both a reference
4784/// to those bytes interpreted as a `Self`, and a reference to the excess bytes.
4785/// If there are insufficient bytes, or if that affix of `source` is not
4786/// appropriately aligned, this returns `Err`.
4787#[inline(always)]
4788fn mut_from_prefix_suffix<T: FromBytes + IntoBytes + KnownLayout + ?Sized>(
4789    source: &mut [u8],
4790    meta: Option<T::PointerMetadata>,
4791    cast_type: CastType,
4792) -> Result<(&mut T, &mut [u8]), CastError<&mut [u8], T>> {
4793    let (slf, prefix_suffix) = Ptr::from_mut(source)
4794        .try_cast_into::<_, BecauseExclusive>(cast_type, meta)
4795        .map_err(|err| err.map_src(|s| s.as_mut()))?;
4796    Ok((slf.recall_validity::<_, (_, (_, _))>().as_mut(), prefix_suffix.as_mut()))
4797}
4798
4799/// Analyzes whether a type is [`IntoBytes`].
4800///
4801/// This derive analyzes, at compile time, whether the annotated type satisfies
4802/// the [safety conditions] of `IntoBytes` and implements `IntoBytes` if it is
4803/// sound to do so. This derive can be applied to structs and enums (see below
4804/// for union support); e.g.:
4805///
4806/// ```
4807/// # use zerocopy_derive::{IntoBytes};
4808/// #[derive(IntoBytes)]
4809/// #[repr(C)]
4810/// struct MyStruct {
4811/// # /*
4812///     ...
4813/// # */
4814/// }
4815///
4816/// #[derive(IntoBytes)]
4817/// #[repr(u8)]
4818/// enum MyEnum {
4819/// #   Variant,
4820/// # /*
4821///     ...
4822/// # */
4823/// }
4824/// ```
4825///
4826/// [safety conditions]: trait@IntoBytes#safety
4827///
4828/// # Error Messages
4829///
4830/// On Rust toolchains prior to 1.78.0, due to the way that the custom derive
4831/// for `IntoBytes` is implemented, you may get an error like this:
4832///
4833/// ```text
4834/// error[E0277]: the trait bound `(): PaddingFree<Foo, true>` is not satisfied
4835///   --> lib.rs:23:10
4836///    |
4837///  1 | #[derive(IntoBytes)]
4838///    |          ^^^^^^^^^ the trait `PaddingFree<Foo, true>` is not implemented for `()`
4839///    |
4840///    = help: the following implementations were found:
4841///                   <() as PaddingFree<T, false>>
4842/// ```
4843///
4844/// This error indicates that the type being annotated has padding bytes, which
4845/// is illegal for `IntoBytes` types. Consider reducing the alignment of some
4846/// fields by using types in the [`byteorder`] module, wrapping field types in
4847/// [`Unalign`], adding explicit struct fields where those padding bytes would
4848/// be, or using `#[repr(packed)]`. See the Rust Reference's page on [type
4849/// layout] for more information about type layout and padding.
4850///
4851/// [type layout]: https://doc.rust-lang.org/reference/type-layout.html
4852///
4853/// # Unions
4854///
4855/// Currently, union bit validity is [up in the air][union-validity], and so
4856/// zerocopy does not support `#[derive(IntoBytes)]` on unions by default.
4857/// However, implementing `IntoBytes` on a union type is likely sound on all
4858/// existing Rust toolchains - it's just that it may become unsound in the
4859/// future. You can opt-in to `#[derive(IntoBytes)]` support on unions by
4860/// passing the unstable `zerocopy_derive_union_into_bytes` cfg:
4861///
4862/// ```shell
4863/// $ RUSTFLAGS='--cfg zerocopy_derive_union_into_bytes' cargo build
4864/// ```
4865///
4866/// However, it is your responsibility to ensure that this derive is sound on
4867/// the specific versions of the Rust toolchain you are using! We make no
4868/// stability or soundness guarantees regarding this cfg, and may remove it at
4869/// any point.
4870///
4871/// We are actively working with Rust to stabilize the necessary language
4872/// guarantees to support this in a forwards-compatible way, which will enable
4873/// us to remove the cfg gate. As part of this effort, we need to know how much
4874/// demand there is for this feature. If you would like to use `IntoBytes` on
4875/// unions, [please let us know][discussion].
4876///
4877/// [union-validity]: https://github.com/rust-lang/unsafe-code-guidelines/issues/438
4878/// [discussion]: https://github.com/google/zerocopy/discussions/1802
4879///
4880/// # Analysis
4881///
4882/// *This section describes, roughly, the analysis performed by this derive to
4883/// determine whether it is sound to implement `IntoBytes` for a given type.
4884/// Unless you are modifying the implementation of this derive, or attempting to
4885/// manually implement `IntoBytes` for a type yourself, you don't need to read
4886/// this section.*
4887///
4888/// If a type has the following properties, then this derive can implement
4889/// `IntoBytes` for that type:
4890///
4891/// - If the type is a struct, its fields must be [`IntoBytes`]. Additionally:
4892///     - if the type is `repr(transparent)` or `repr(packed)`, it is
4893///       [`IntoBytes`] if its fields are [`IntoBytes`]; else,
4894///     - if the type is `repr(C)` with at most one field, it is [`IntoBytes`]
4895///       if its field is [`IntoBytes`]; else,
4896///     - if the type has no generic parameters, it is [`IntoBytes`] if the type
4897///       is sized and has no padding bytes; else,
4898///     - if the type is `repr(C)`, its fields must be [`Unaligned`].
4899/// - If the type is an enum:
4900///   - It must have a defined representation (`repr`s `C`, `u8`, `u16`, `u32`,
4901///     `u64`, `usize`, `i8`, `i16`, `i32`, `i64`, or `isize`).
4902///   - It must have no padding bytes.
4903///   - Its fields must be [`IntoBytes`].
4904///
4905/// This analysis is subject to change. Unsafe code may *only* rely on the
4906/// documented [safety conditions] of `FromBytes`, and must *not* rely on the
4907/// implementation details of this derive.
4908///
4909/// [Rust Reference]: https://doc.rust-lang.org/reference/type-layout.html
4910#[cfg(any(feature = "derive", test))]
4911#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
4912pub use zerocopy_derive::IntoBytes;
4913
4914/// Types that can be converted to an immutable slice of initialized bytes.
4915///
4916/// Any `IntoBytes` type can be converted to a slice of initialized bytes of the
4917/// same size. This is useful for efficiently serializing structured data as raw
4918/// bytes.
4919///
4920/// # Implementation
4921///
4922/// **Do not implement this trait yourself!** Instead, use
4923/// [`#[derive(IntoBytes)]`][derive]; e.g.:
4924///
4925/// ```
4926/// # use zerocopy_derive::IntoBytes;
4927/// #[derive(IntoBytes)]
4928/// #[repr(C)]
4929/// struct MyStruct {
4930/// # /*
4931///     ...
4932/// # */
4933/// }
4934///
4935/// #[derive(IntoBytes)]
4936/// #[repr(u8)]
4937/// enum MyEnum {
4938/// #   Variant0,
4939/// # /*
4940///     ...
4941/// # */
4942/// }
4943/// ```
4944///
4945/// This derive performs a sophisticated, compile-time safety analysis to
4946/// determine whether a type is `IntoBytes`. See the [derive
4947/// documentation][derive] for guidance on how to interpret error messages
4948/// produced by the derive's analysis.
4949///
4950/// # Safety
4951///
4952/// *This section describes what is required in order for `T: IntoBytes`, and
4953/// what unsafe code may assume of such types. If you don't plan on implementing
4954/// `IntoBytes` manually, and you don't plan on writing unsafe code that
4955/// operates on `IntoBytes` types, then you don't need to read this section.*
4956///
4957/// If `T: IntoBytes`, then unsafe code may assume that it is sound to treat any
4958/// `t: T` as an immutable `[u8]` of length `size_of_val(t)`. If a type is
4959/// marked as `IntoBytes` which violates this contract, it may cause undefined
4960/// behavior.
4961///
4962/// `#[derive(IntoBytes)]` only permits [types which satisfy these
4963/// requirements][derive-analysis].
4964///
4965#[cfg_attr(
4966    feature = "derive",
4967    doc = "[derive]: zerocopy_derive::IntoBytes",
4968    doc = "[derive-analysis]: zerocopy_derive::IntoBytes#analysis"
4969)]
4970#[cfg_attr(
4971    not(feature = "derive"),
4972    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.IntoBytes.html"),
4973    doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.IntoBytes.html#analysis"),
4974)]
4975#[cfg_attr(
4976    zerocopy_diagnostic_on_unimplemented_1_78_0,
4977    diagnostic::on_unimplemented(note = "Consider adding `#[derive(IntoBytes)]` to `{Self}`")
4978)]
4979pub unsafe trait IntoBytes {
4980    // The `Self: Sized` bound makes it so that this function doesn't prevent
4981    // `IntoBytes` from being object safe. Note that other `IntoBytes` methods
4982    // prevent object safety, but those provide a benefit in exchange for object
4983    // safety. If at some point we remove those methods, change their type
4984    // signatures, or move them out of this trait so that `IntoBytes` is object
4985    // safe again, it's important that this function not prevent object safety.
4986    #[doc(hidden)]
4987    fn only_derive_is_allowed_to_implement_this_trait()
4988    where
4989        Self: Sized;
4990
4991    /// Gets the bytes of this value.
4992    ///
4993    /// # Examples
4994    ///
4995    /// ```
4996    /// use zerocopy::IntoBytes;
4997    /// # use zerocopy_derive::*;
4998    ///
4999    /// #[derive(IntoBytes, Immutable)]
5000    /// #[repr(C)]
5001    /// struct PacketHeader {
5002    ///     src_port: [u8; 2],
5003    ///     dst_port: [u8; 2],
5004    ///     length: [u8; 2],
5005    ///     checksum: [u8; 2],
5006    /// }
5007    ///
5008    /// let header = PacketHeader {
5009    ///     src_port: [0, 1],
5010    ///     dst_port: [2, 3],
5011    ///     length: [4, 5],
5012    ///     checksum: [6, 7],
5013    /// };
5014    ///
5015    /// let bytes = header.as_bytes();
5016    ///
5017    /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]);
5018    /// ```
5019    #[must_use = "has no side effects"]
5020    #[inline(always)]
5021    fn as_bytes(&self) -> &[u8]
5022    where
5023        Self: Immutable,
5024    {
5025        // Note that this method does not have a `Self: Sized` bound;
5026        // `size_of_val` works for unsized values too.
5027        let len = mem::size_of_val(self);
5028        let slf: *const Self = self;
5029
5030        // SAFETY:
5031        // - `slf.cast::<u8>()` is valid for reads for `len * size_of::<u8>()`
5032        //   many bytes because...
5033        //   - `slf` is the same pointer as `self`, and `self` is a reference
5034        //     which points to an object whose size is `len`. Thus...
5035        //     - The entire region of `len` bytes starting at `slf` is contained
5036        //       within a single allocation.
5037        //     - `slf` is non-null.
5038        //   - `slf` is trivially aligned to `align_of::<u8>() == 1`.
5039        // - `Self: IntoBytes` ensures that all of the bytes of `slf` are
5040        //   initialized.
5041        // - Since `slf` is derived from `self`, and `self` is an immutable
5042        //   reference, the only other references to this memory region that
5043        //   could exist are other immutable references, and those don't allow
5044        //   mutation. `Self: Immutable` prohibits types which contain
5045        //   `UnsafeCell`s, which are the only types for which this rule
5046        //   wouldn't be sufficient.
5047        // - The total size of the resulting slice is no larger than
5048        //   `isize::MAX` because no allocation produced by safe code can be
5049        //   larger than `isize::MAX`.
5050        //
5051        // FIXME(#429): Add references to docs and quotes.
5052        unsafe { slice::from_raw_parts(slf.cast::<u8>(), len) }
5053    }
5054
5055    /// Gets the bytes of this value mutably.
5056    ///
5057    /// # Examples
5058    ///
5059    /// ```
5060    /// use zerocopy::IntoBytes;
5061    /// # use zerocopy_derive::*;
5062    ///
5063    /// # #[derive(Eq, PartialEq, Debug)]
5064    /// #[derive(FromBytes, IntoBytes, Immutable)]
5065    /// #[repr(C)]
5066    /// struct PacketHeader {
5067    ///     src_port: [u8; 2],
5068    ///     dst_port: [u8; 2],
5069    ///     length: [u8; 2],
5070    ///     checksum: [u8; 2],
5071    /// }
5072    ///
5073    /// let mut header = PacketHeader {
5074    ///     src_port: [0, 1],
5075    ///     dst_port: [2, 3],
5076    ///     length: [4, 5],
5077    ///     checksum: [6, 7],
5078    /// };
5079    ///
5080    /// let bytes = header.as_mut_bytes();
5081    ///
5082    /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]);
5083    ///
5084    /// bytes.reverse();
5085    ///
5086    /// assert_eq!(header, PacketHeader {
5087    ///     src_port: [7, 6],
5088    ///     dst_port: [5, 4],
5089    ///     length: [3, 2],
5090    ///     checksum: [1, 0],
5091    /// });
5092    /// ```
5093    #[must_use = "has no side effects"]
5094    #[inline(always)]
5095    fn as_mut_bytes(&mut self) -> &mut [u8]
5096    where
5097        Self: FromBytes,
5098    {
5099        // Note that this method does not have a `Self: Sized` bound;
5100        // `size_of_val` works for unsized values too.
5101        let len = mem::size_of_val(self);
5102        let slf: *mut Self = self;
5103
5104        // SAFETY:
5105        // - `slf.cast::<u8>()` is valid for reads and writes for `len *
5106        //   size_of::<u8>()` many bytes because...
5107        //   - `slf` is the same pointer as `self`, and `self` is a reference
5108        //     which points to an object whose size is `len`. Thus...
5109        //     - The entire region of `len` bytes starting at `slf` is contained
5110        //       within a single allocation.
5111        //     - `slf` is non-null.
5112        //   - `slf` is trivially aligned to `align_of::<u8>() == 1`.
5113        // - `Self: IntoBytes` ensures that all of the bytes of `slf` are
5114        //   initialized.
5115        // - `Self: FromBytes` ensures that no write to this memory region
5116        //   could result in it containing an invalid `Self`.
5117        // - Since `slf` is derived from `self`, and `self` is a mutable
5118        //   reference, no other references to this memory region can exist.
5119        // - The total size of the resulting slice is no larger than
5120        //   `isize::MAX` because no allocation produced by safe code can be
5121        //   larger than `isize::MAX`.
5122        //
5123        // FIXME(#429): Add references to docs and quotes.
5124        unsafe { slice::from_raw_parts_mut(slf.cast::<u8>(), len) }
5125    }
5126
5127    /// Writes a copy of `self` to `dst`.
5128    ///
5129    /// If `dst.len() != size_of_val(self)`, `write_to` returns `Err`.
5130    ///
5131    /// # Examples
5132    ///
5133    /// ```
5134    /// use zerocopy::IntoBytes;
5135    /// # use zerocopy_derive::*;
5136    ///
5137    /// #[derive(IntoBytes, Immutable)]
5138    /// #[repr(C)]
5139    /// struct PacketHeader {
5140    ///     src_port: [u8; 2],
5141    ///     dst_port: [u8; 2],
5142    ///     length: [u8; 2],
5143    ///     checksum: [u8; 2],
5144    /// }
5145    ///
5146    /// let header = PacketHeader {
5147    ///     src_port: [0, 1],
5148    ///     dst_port: [2, 3],
5149    ///     length: [4, 5],
5150    ///     checksum: [6, 7],
5151    /// };
5152    ///
5153    /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0];
5154    ///
5155    /// header.write_to(&mut bytes[..]);
5156    ///
5157    /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]);
5158    /// ```
5159    ///
5160    /// If too many or too few target bytes are provided, `write_to` returns
5161    /// `Err` and leaves the target bytes unmodified:
5162    ///
5163    /// ```
5164    /// # use zerocopy::IntoBytes;
5165    /// # let header = u128::MAX;
5166    /// let mut excessive_bytes = &mut [0u8; 128][..];
5167    ///
5168    /// let write_result = header.write_to(excessive_bytes);
5169    ///
5170    /// assert!(write_result.is_err());
5171    /// assert_eq!(excessive_bytes, [0u8; 128]);
5172    /// ```
5173    #[must_use = "callers should check the return value to see if the operation succeeded"]
5174    #[inline]
5175    #[allow(clippy::mut_from_ref)] // False positive: `&self -> &mut [u8]`
5176    fn write_to(&self, dst: &mut [u8]) -> Result<(), SizeError<&Self, &mut [u8]>>
5177    where
5178        Self: Immutable,
5179    {
5180        let src = self.as_bytes();
5181        if dst.len() == src.len() {
5182            // SAFETY: Within this branch of the conditional, we have ensured
5183            // that `dst.len()` is equal to `src.len()`. Neither the size of the
5184            // source nor the size of the destination change between the above
5185            // size check and the invocation of `copy_unchecked`.
5186            unsafe { util::copy_unchecked(src, dst) }
5187            Ok(())
5188        } else {
5189            Err(SizeError::new(self))
5190        }
5191    }
5192
5193    /// Writes a copy of `self` to the prefix of `dst`.
5194    ///
5195    /// `write_to_prefix` writes `self` to the first `size_of_val(self)` bytes
5196    /// of `dst`. If `dst.len() < size_of_val(self)`, it returns `Err`.
5197    ///
5198    /// # Examples
5199    ///
5200    /// ```
5201    /// use zerocopy::IntoBytes;
5202    /// # use zerocopy_derive::*;
5203    ///
5204    /// #[derive(IntoBytes, Immutable)]
5205    /// #[repr(C)]
5206    /// struct PacketHeader {
5207    ///     src_port: [u8; 2],
5208    ///     dst_port: [u8; 2],
5209    ///     length: [u8; 2],
5210    ///     checksum: [u8; 2],
5211    /// }
5212    ///
5213    /// let header = PacketHeader {
5214    ///     src_port: [0, 1],
5215    ///     dst_port: [2, 3],
5216    ///     length: [4, 5],
5217    ///     checksum: [6, 7],
5218    /// };
5219    ///
5220    /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
5221    ///
5222    /// header.write_to_prefix(&mut bytes[..]);
5223    ///
5224    /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7, 0, 0]);
5225    /// ```
5226    ///
5227    /// If insufficient target bytes are provided, `write_to_prefix` returns
5228    /// `Err` and leaves the target bytes unmodified:
5229    ///
5230    /// ```
5231    /// # use zerocopy::IntoBytes;
5232    /// # let header = u128::MAX;
5233    /// let mut insufficient_bytes = &mut [0, 0][..];
5234    ///
5235    /// let write_result = header.write_to_suffix(insufficient_bytes);
5236    ///
5237    /// assert!(write_result.is_err());
5238    /// assert_eq!(insufficient_bytes, [0, 0]);
5239    /// ```
5240    #[must_use = "callers should check the return value to see if the operation succeeded"]
5241    #[inline]
5242    #[allow(clippy::mut_from_ref)] // False positive: `&self -> &mut [u8]`
5243    fn write_to_prefix(&self, dst: &mut [u8]) -> Result<(), SizeError<&Self, &mut [u8]>>
5244    where
5245        Self: Immutable,
5246    {
5247        let src = self.as_bytes();
5248        match dst.get_mut(..src.len()) {
5249            Some(dst) => {
5250                // SAFETY: Within this branch of the `match`, we have ensured
5251                // through fallible subslicing that `dst.len()` is equal to
5252                // `src.len()`. Neither the size of the source nor the size of
5253                // the destination change between the above subslicing operation
5254                // and the invocation of `copy_unchecked`.
5255                unsafe { util::copy_unchecked(src, dst) }
5256                Ok(())
5257            }
5258            None => Err(SizeError::new(self)),
5259        }
5260    }
5261
5262    /// Writes a copy of `self` to the suffix of `dst`.
5263    ///
5264    /// `write_to_suffix` writes `self` to the last `size_of_val(self)` bytes of
5265    /// `dst`. If `dst.len() < size_of_val(self)`, it returns `Err`.
5266    ///
5267    /// # Examples
5268    ///
5269    /// ```
5270    /// use zerocopy::IntoBytes;
5271    /// # use zerocopy_derive::*;
5272    ///
5273    /// #[derive(IntoBytes, Immutable)]
5274    /// #[repr(C)]
5275    /// struct PacketHeader {
5276    ///     src_port: [u8; 2],
5277    ///     dst_port: [u8; 2],
5278    ///     length: [u8; 2],
5279    ///     checksum: [u8; 2],
5280    /// }
5281    ///
5282    /// let header = PacketHeader {
5283    ///     src_port: [0, 1],
5284    ///     dst_port: [2, 3],
5285    ///     length: [4, 5],
5286    ///     checksum: [6, 7],
5287    /// };
5288    ///
5289    /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
5290    ///
5291    /// header.write_to_suffix(&mut bytes[..]);
5292    ///
5293    /// assert_eq!(bytes, [0, 0, 0, 1, 2, 3, 4, 5, 6, 7]);
5294    ///
5295    /// let mut insufficient_bytes = &mut [0, 0][..];
5296    ///
5297    /// let write_result = header.write_to_suffix(insufficient_bytes);
5298    ///
5299    /// assert!(write_result.is_err());
5300    /// assert_eq!(insufficient_bytes, [0, 0]);
5301    /// ```
5302    ///
5303    /// If insufficient target bytes are provided, `write_to_suffix` returns
5304    /// `Err` and leaves the target bytes unmodified:
5305    ///
5306    /// ```
5307    /// # use zerocopy::IntoBytes;
5308    /// # let header = u128::MAX;
5309    /// let mut insufficient_bytes = &mut [0, 0][..];
5310    ///
5311    /// let write_result = header.write_to_suffix(insufficient_bytes);
5312    ///
5313    /// assert!(write_result.is_err());
5314    /// assert_eq!(insufficient_bytes, [0, 0]);
5315    /// ```
5316    #[must_use = "callers should check the return value to see if the operation succeeded"]
5317    #[inline]
5318    #[allow(clippy::mut_from_ref)] // False positive: `&self -> &mut [u8]`
5319    fn write_to_suffix(&self, dst: &mut [u8]) -> Result<(), SizeError<&Self, &mut [u8]>>
5320    where
5321        Self: Immutable,
5322    {
5323        let src = self.as_bytes();
5324        let start = if let Some(start) = dst.len().checked_sub(src.len()) {
5325            start
5326        } else {
5327            return Err(SizeError::new(self));
5328        };
5329        let dst = if let Some(dst) = dst.get_mut(start..) {
5330            dst
5331        } else {
5332            // get_mut() should never return None here. We return a `SizeError`
5333            // rather than .unwrap() because in the event the branch is not
5334            // optimized away, returning a value is generally lighter-weight
5335            // than panicking.
5336            return Err(SizeError::new(self));
5337        };
5338        // SAFETY: Through fallible subslicing of `dst`, we have ensured that
5339        // `dst.len()` is equal to `src.len()`. Neither the size of the source
5340        // nor the size of the destination change between the above subslicing
5341        // operation and the invocation of `copy_unchecked`.
5342        unsafe {
5343            util::copy_unchecked(src, dst);
5344        }
5345        Ok(())
5346    }
5347
5348    /// Writes a copy of `self` to an `io::Write`.
5349    ///
5350    /// This is a shorthand for `dst.write_all(self.as_bytes())`, and is useful
5351    /// for interfacing with operating system byte sinks (files, sockets, etc.).
5352    ///
5353    /// # Examples
5354    ///
5355    /// ```no_run
5356    /// use zerocopy::{byteorder::big_endian::U16, FromBytes, IntoBytes};
5357    /// use std::fs::File;
5358    /// # use zerocopy_derive::*;
5359    ///
5360    /// #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)]
5361    /// #[repr(C, packed)]
5362    /// struct GrayscaleImage {
5363    ///     height: U16,
5364    ///     width: U16,
5365    ///     pixels: [U16],
5366    /// }
5367    ///
5368    /// let image = GrayscaleImage::ref_from_bytes(&[0, 0, 0, 0][..]).unwrap();
5369    /// let mut file = File::create("image.bin").unwrap();
5370    /// image.write_to_io(&mut file).unwrap();
5371    /// ```
5372    ///
5373    /// If the write fails, `write_to_io` returns `Err` and a partial write may
5374    /// have occurred; e.g.:
5375    ///
5376    /// ```
5377    /// # use zerocopy::IntoBytes;
5378    ///
5379    /// let src = u128::MAX;
5380    /// let mut dst = [0u8; 2];
5381    ///
5382    /// let write_result = src.write_to_io(&mut dst[..]);
5383    ///
5384    /// assert!(write_result.is_err());
5385    /// assert_eq!(dst, [255, 255]);
5386    /// ```
5387    #[cfg(feature = "std")]
5388    #[inline(always)]
5389    fn write_to_io<W>(&self, mut dst: W) -> io::Result<()>
5390    where
5391        Self: Immutable,
5392        W: io::Write,
5393    {
5394        dst.write_all(self.as_bytes())
5395    }
5396
5397    #[deprecated(since = "0.8.0", note = "`IntoBytes::as_bytes_mut` was renamed to `as_mut_bytes`")]
5398    #[doc(hidden)]
5399    #[inline]
5400    fn as_bytes_mut(&mut self) -> &mut [u8]
5401    where
5402        Self: FromBytes,
5403    {
5404        self.as_mut_bytes()
5405    }
5406}
5407
5408/// Analyzes whether a type is [`Unaligned`].
5409///
5410/// This derive analyzes, at compile time, whether the annotated type satisfies
5411/// the [safety conditions] of `Unaligned` and implements `Unaligned` if it is
5412/// sound to do so. This derive can be applied to structs, enums, and unions;
5413/// e.g.:
5414///
5415/// ```
5416/// # use zerocopy_derive::Unaligned;
5417/// #[derive(Unaligned)]
5418/// #[repr(C)]
5419/// struct MyStruct {
5420/// # /*
5421///     ...
5422/// # */
5423/// }
5424///
5425/// #[derive(Unaligned)]
5426/// #[repr(u8)]
5427/// enum MyEnum {
5428/// #   Variant0,
5429/// # /*
5430///     ...
5431/// # */
5432/// }
5433///
5434/// #[derive(Unaligned)]
5435/// #[repr(packed)]
5436/// union MyUnion {
5437/// #   variant: u8,
5438/// # /*
5439///     ...
5440/// # */
5441/// }
5442/// ```
5443///
5444/// # Analysis
5445///
5446/// *This section describes, roughly, the analysis performed by this derive to
5447/// determine whether it is sound to implement `Unaligned` for a given type.
5448/// Unless you are modifying the implementation of this derive, or attempting to
5449/// manually implement `Unaligned` for a type yourself, you don't need to read
5450/// this section.*
5451///
5452/// If a type has the following properties, then this derive can implement
5453/// `Unaligned` for that type:
5454///
5455/// - If the type is a struct or union:
5456///   - If `repr(align(N))` is provided, `N` must equal 1.
5457///   - If the type is `repr(C)` or `repr(transparent)`, all fields must be
5458///     [`Unaligned`].
5459///   - If the type is not `repr(C)` or `repr(transparent)`, it must be
5460///     `repr(packed)` or `repr(packed(1))`.
5461/// - If the type is an enum:
5462///   - If `repr(align(N))` is provided, `N` must equal 1.
5463///   - It must be a field-less enum (meaning that all variants have no fields).
5464///   - It must be `repr(i8)` or `repr(u8)`.
5465///
5466/// [safety conditions]: trait@Unaligned#safety
5467#[cfg(any(feature = "derive", test))]
5468#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
5469pub use zerocopy_derive::Unaligned;
5470
5471/// Types with no alignment requirement.
5472///
5473/// If `T: Unaligned`, then `align_of::<T>() == 1`.
5474///
5475/// # Implementation
5476///
5477/// **Do not implement this trait yourself!** Instead, use
5478/// [`#[derive(Unaligned)]`][derive]; e.g.:
5479///
5480/// ```
5481/// # use zerocopy_derive::Unaligned;
5482/// #[derive(Unaligned)]
5483/// #[repr(C)]
5484/// struct MyStruct {
5485/// # /*
5486///     ...
5487/// # */
5488/// }
5489///
5490/// #[derive(Unaligned)]
5491/// #[repr(u8)]
5492/// enum MyEnum {
5493/// #   Variant0,
5494/// # /*
5495///     ...
5496/// # */
5497/// }
5498///
5499/// #[derive(Unaligned)]
5500/// #[repr(packed)]
5501/// union MyUnion {
5502/// #   variant: u8,
5503/// # /*
5504///     ...
5505/// # */
5506/// }
5507/// ```
5508///
5509/// This derive performs a sophisticated, compile-time safety analysis to
5510/// determine whether a type is `Unaligned`.
5511///
5512/// # Safety
5513///
5514/// *This section describes what is required in order for `T: Unaligned`, and
5515/// what unsafe code may assume of such types. If you don't plan on implementing
5516/// `Unaligned` manually, and you don't plan on writing unsafe code that
5517/// operates on `Unaligned` types, then you don't need to read this section.*
5518///
5519/// If `T: Unaligned`, then unsafe code may assume that it is sound to produce a
5520/// reference to `T` at any memory location regardless of alignment. If a type
5521/// is marked as `Unaligned` which violates this contract, it may cause
5522/// undefined behavior.
5523///
5524/// `#[derive(Unaligned)]` only permits [types which satisfy these
5525/// requirements][derive-analysis].
5526///
5527#[cfg_attr(
5528    feature = "derive",
5529    doc = "[derive]: zerocopy_derive::Unaligned",
5530    doc = "[derive-analysis]: zerocopy_derive::Unaligned#analysis"
5531)]
5532#[cfg_attr(
5533    not(feature = "derive"),
5534    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.Unaligned.html"),
5535    doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.Unaligned.html#analysis"),
5536)]
5537#[cfg_attr(
5538    zerocopy_diagnostic_on_unimplemented_1_78_0,
5539    diagnostic::on_unimplemented(note = "Consider adding `#[derive(Unaligned)]` to `{Self}`")
5540)]
5541pub unsafe trait Unaligned {
5542    // The `Self: Sized` bound makes it so that `Unaligned` is still object
5543    // safe.
5544    #[doc(hidden)]
5545    fn only_derive_is_allowed_to_implement_this_trait()
5546    where
5547        Self: Sized;
5548}
5549
5550/// Derives optimized [`PartialEq`] and [`Eq`] implementations.
5551///
5552/// This derive can be applied to structs and enums implementing both
5553/// [`Immutable`] and [`IntoBytes`]; e.g.:
5554///
5555/// ```
5556/// # use zerocopy_derive::{ByteEq, Immutable, IntoBytes};
5557/// #[derive(ByteEq, Immutable, IntoBytes)]
5558/// #[repr(C)]
5559/// struct MyStruct {
5560/// # /*
5561///     ...
5562/// # */
5563/// }
5564///
5565/// #[derive(ByteEq, Immutable, IntoBytes)]
5566/// #[repr(u8)]
5567/// enum MyEnum {
5568/// #   Variant,
5569/// # /*
5570///     ...
5571/// # */
5572/// }
5573/// ```
5574///
5575/// The standard library's [`derive(Eq, PartialEq)`][derive@PartialEq] computes
5576/// equality by individually comparing each field. Instead, the implementation
5577/// of [`PartialEq::eq`] emitted by `derive(ByteHash)` converts the entirety of
5578/// `self` and `other` to byte slices and compares those slices for equality.
5579/// This may have performance advantages.
5580#[cfg(any(feature = "derive", test))]
5581#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
5582pub use zerocopy_derive::ByteEq;
5583/// Derives an optimized [`Hash`] implementation.
5584///
5585/// This derive can be applied to structs and enums implementing both
5586/// [`Immutable`] and [`IntoBytes`]; e.g.:
5587///
5588/// ```
5589/// # use zerocopy_derive::{ByteHash, Immutable, IntoBytes};
5590/// #[derive(ByteHash, Immutable, IntoBytes)]
5591/// #[repr(C)]
5592/// struct MyStruct {
5593/// # /*
5594///     ...
5595/// # */
5596/// }
5597///
5598/// #[derive(ByteHash, Immutable, IntoBytes)]
5599/// #[repr(u8)]
5600/// enum MyEnum {
5601/// #   Variant,
5602/// # /*
5603///     ...
5604/// # */
5605/// }
5606/// ```
5607///
5608/// The standard library's [`derive(Hash)`][derive@Hash] produces hashes by
5609/// individually hashing each field and combining the results. Instead, the
5610/// implementations of [`Hash::hash()`] and [`Hash::hash_slice()`] generated by
5611/// `derive(ByteHash)` convert the entirety of `self` to a byte slice and hashes
5612/// it in a single call to [`Hasher::write()`]. This may have performance
5613/// advantages.
5614///
5615/// [`Hash`]: core::hash::Hash
5616/// [`Hash::hash()`]: core::hash::Hash::hash()
5617/// [`Hash::hash_slice()`]: core::hash::Hash::hash_slice()
5618#[cfg(any(feature = "derive", test))]
5619#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
5620pub use zerocopy_derive::ByteHash;
5621/// Implements [`SplitAt`].
5622///
5623/// This derive can be applied to structs; e.g.:
5624///
5625/// ```
5626/// # use zerocopy_derive::{ByteEq, Immutable, IntoBytes};
5627/// #[derive(ByteEq, Immutable, IntoBytes)]
5628/// #[repr(C)]
5629/// struct MyStruct {
5630/// # /*
5631///     ...
5632/// # */
5633/// }
5634/// ```
5635#[cfg(any(feature = "derive", test))]
5636#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
5637pub use zerocopy_derive::SplitAt;
5638
5639#[cfg(feature = "alloc")]
5640#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
5641#[cfg(zerocopy_panic_in_const_and_vec_try_reserve_1_57_0)]
5642mod alloc_support {
5643    use super::*;
5644
5645    /// Extends a `Vec<T>` by pushing `additional` new items onto the end of the
5646    /// vector. The new items are initialized with zeros.
5647    #[cfg(zerocopy_panic_in_const_and_vec_try_reserve_1_57_0)]
5648    #[doc(hidden)]
5649    #[deprecated(since = "0.8.0", note = "moved to `FromZeros`")]
5650    #[inline(always)]
5651    pub fn extend_vec_zeroed<T: FromZeros>(
5652        v: &mut Vec<T>,
5653        additional: usize,
5654    ) -> Result<(), AllocError> {
5655        <T as FromZeros>::extend_vec_zeroed(v, additional)
5656    }
5657
5658    /// Inserts `additional` new items into `Vec<T>` at `position`. The new
5659    /// items are initialized with zeros.
5660    ///
5661    /// # Panics
5662    ///
5663    /// Panics if `position > v.len()`.
5664    #[cfg(zerocopy_panic_in_const_and_vec_try_reserve_1_57_0)]
5665    #[doc(hidden)]
5666    #[deprecated(since = "0.8.0", note = "moved to `FromZeros`")]
5667    #[inline(always)]
5668    pub fn insert_vec_zeroed<T: FromZeros>(
5669        v: &mut Vec<T>,
5670        position: usize,
5671        additional: usize,
5672    ) -> Result<(), AllocError> {
5673        <T as FromZeros>::insert_vec_zeroed(v, position, additional)
5674    }
5675}
5676
5677#[cfg(feature = "alloc")]
5678#[cfg(zerocopy_panic_in_const_and_vec_try_reserve_1_57_0)]
5679#[doc(hidden)]
5680pub use alloc_support::*;
5681
5682#[cfg(test)]
5683#[allow(clippy::assertions_on_result_states, clippy::unreadable_literal)]
5684mod tests {
5685    use static_assertions::assert_impl_all;
5686
5687    use super::*;
5688    use crate::util::testutil::*;
5689
5690    // An unsized type.
5691    //
5692    // This is used to test the custom derives of our traits. The `[u8]` type
5693    // gets a hand-rolled impl, so it doesn't exercise our custom derives.
5694    #[derive(Debug, Eq, PartialEq, FromBytes, IntoBytes, Unaligned, Immutable)]
5695    #[repr(transparent)]
5696    struct Unsized([u8]);
5697
5698    impl Unsized {
5699        fn from_mut_slice(slc: &mut [u8]) -> &mut Unsized {
5700            // SAFETY: This *probably* sound - since the layouts of `[u8]` and
5701            // `Unsized` are the same, so are the layouts of `&mut [u8]` and
5702            // `&mut Unsized`. [1] Even if it turns out that this isn't actually
5703            // guaranteed by the language spec, we can just change this since
5704            // it's in test code.
5705            //
5706            // [1] https://github.com/rust-lang/unsafe-code-guidelines/issues/375
5707            unsafe { mem::transmute(slc) }
5708        }
5709    }
5710
5711    #[test]
5712    fn test_known_layout() {
5713        // Test that `$ty` and `ManuallyDrop<$ty>` have the expected layout.
5714        // Test that `PhantomData<$ty>` has the same layout as `()` regardless
5715        // of `$ty`.
5716        macro_rules! test {
5717            ($ty:ty, $expect:expr) => {
5718                let expect = $expect;
5719                assert_eq!(<$ty as KnownLayout>::LAYOUT, expect);
5720                assert_eq!(<ManuallyDrop<$ty> as KnownLayout>::LAYOUT, expect);
5721                assert_eq!(<PhantomData<$ty> as KnownLayout>::LAYOUT, <() as KnownLayout>::LAYOUT);
5722            };
5723        }
5724
5725        let layout = |offset, align, _trailing_slice_elem_size| DstLayout {
5726            align: NonZeroUsize::new(align).unwrap(),
5727            size_info: match _trailing_slice_elem_size {
5728                None => SizeInfo::Sized { size: offset },
5729                Some(elem_size) => SizeInfo::SliceDst(TrailingSliceLayout { offset, elem_size }),
5730            },
5731        };
5732
5733        test!((), layout(0, 1, None));
5734        test!(u8, layout(1, 1, None));
5735        // Use `align_of` because `u64` alignment may be smaller than 8 on some
5736        // platforms.
5737        test!(u64, layout(8, mem::align_of::<u64>(), None));
5738        test!(AU64, layout(8, 8, None));
5739
5740        test!(Option<&'static ()>, usize::LAYOUT);
5741
5742        test!([()], layout(0, 1, Some(0)));
5743        test!([u8], layout(0, 1, Some(1)));
5744        test!(str, layout(0, 1, Some(1)));
5745    }
5746
5747    #[cfg(feature = "derive")]
5748    #[test]
5749    fn test_known_layout_derive() {
5750        // In this and other files (`late_compile_pass.rs`,
5751        // `mid_compile_pass.rs`, and `struct.rs`), we test success and failure
5752        // modes of `derive(KnownLayout)` for the following combination of
5753        // properties:
5754        //
5755        // +------------+--------------------------------------+-----------+
5756        // |            |      trailing field properties       |           |
5757        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
5758        // |------------+----------+----------------+----------+-----------|
5759        // |          N |        N |              N |        N |      KL00 |
5760        // |          N |        N |              N |        Y |      KL01 |
5761        // |          N |        N |              Y |        N |      KL02 |
5762        // |          N |        N |              Y |        Y |      KL03 |
5763        // |          N |        Y |              N |        N |      KL04 |
5764        // |          N |        Y |              N |        Y |      KL05 |
5765        // |          N |        Y |              Y |        N |      KL06 |
5766        // |          N |        Y |              Y |        Y |      KL07 |
5767        // |          Y |        N |              N |        N |      KL08 |
5768        // |          Y |        N |              N |        Y |      KL09 |
5769        // |          Y |        N |              Y |        N |      KL10 |
5770        // |          Y |        N |              Y |        Y |      KL11 |
5771        // |          Y |        Y |              N |        N |      KL12 |
5772        // |          Y |        Y |              N |        Y |      KL13 |
5773        // |          Y |        Y |              Y |        N |      KL14 |
5774        // |          Y |        Y |              Y |        Y |      KL15 |
5775        // +------------+----------+----------------+----------+-----------+
5776
5777        struct NotKnownLayout<T = ()> {
5778            _t: T,
5779        }
5780
5781        #[derive(KnownLayout)]
5782        #[repr(C)]
5783        struct AlignSize<const ALIGN: usize, const SIZE: usize>
5784        where
5785            elain::Align<ALIGN>: elain::Alignment,
5786        {
5787            _align: elain::Align<ALIGN>,
5788            size: [u8; SIZE],
5789        }
5790
5791        type AU16 = AlignSize<2, 2>;
5792        type AU32 = AlignSize<4, 4>;
5793
5794        fn _assert_kl<T: ?Sized + KnownLayout>(_: &T) {}
5795
5796        let sized_layout = |align, size| DstLayout {
5797            align: NonZeroUsize::new(align).unwrap(),
5798            size_info: SizeInfo::Sized { size },
5799        };
5800
5801        let unsized_layout = |align, elem_size, offset| DstLayout {
5802            align: NonZeroUsize::new(align).unwrap(),
5803            size_info: SizeInfo::SliceDst(TrailingSliceLayout { offset, elem_size }),
5804        };
5805
5806        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
5807        // |          N |        N |              N |        Y |      KL01 |
5808        #[allow(dead_code)]
5809        #[derive(KnownLayout)]
5810        struct KL01(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
5811
5812        let expected = DstLayout::for_type::<KL01>();
5813
5814        assert_eq!(<KL01 as KnownLayout>::LAYOUT, expected);
5815        assert_eq!(<KL01 as KnownLayout>::LAYOUT, sized_layout(4, 8));
5816
5817        // ...with `align(N)`:
5818        #[allow(dead_code)]
5819        #[derive(KnownLayout)]
5820        #[repr(align(64))]
5821        struct KL01Align(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
5822
5823        let expected = DstLayout::for_type::<KL01Align>();
5824
5825        assert_eq!(<KL01Align as KnownLayout>::LAYOUT, expected);
5826        assert_eq!(<KL01Align as KnownLayout>::LAYOUT, sized_layout(64, 64));
5827
5828        // ...with `packed`:
5829        #[allow(dead_code)]
5830        #[derive(KnownLayout)]
5831        #[repr(packed)]
5832        struct KL01Packed(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
5833
5834        let expected = DstLayout::for_type::<KL01Packed>();
5835
5836        assert_eq!(<KL01Packed as KnownLayout>::LAYOUT, expected);
5837        assert_eq!(<KL01Packed as KnownLayout>::LAYOUT, sized_layout(1, 6));
5838
5839        // ...with `packed(N)`:
5840        #[allow(dead_code)]
5841        #[derive(KnownLayout)]
5842        #[repr(packed(2))]
5843        struct KL01PackedN(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
5844
5845        assert_impl_all!(KL01PackedN: KnownLayout);
5846
5847        let expected = DstLayout::for_type::<KL01PackedN>();
5848
5849        assert_eq!(<KL01PackedN as KnownLayout>::LAYOUT, expected);
5850        assert_eq!(<KL01PackedN as KnownLayout>::LAYOUT, sized_layout(2, 6));
5851
5852        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
5853        // |          N |        N |              Y |        Y |      KL03 |
5854        #[allow(dead_code)]
5855        #[derive(KnownLayout)]
5856        struct KL03(NotKnownLayout, u8);
5857
5858        let expected = DstLayout::for_type::<KL03>();
5859
5860        assert_eq!(<KL03 as KnownLayout>::LAYOUT, expected);
5861        assert_eq!(<KL03 as KnownLayout>::LAYOUT, sized_layout(1, 1));
5862
5863        // ... with `align(N)`
5864        #[allow(dead_code)]
5865        #[derive(KnownLayout)]
5866        #[repr(align(64))]
5867        struct KL03Align(NotKnownLayout<AU32>, u8);
5868
5869        let expected = DstLayout::for_type::<KL03Align>();
5870
5871        assert_eq!(<KL03Align as KnownLayout>::LAYOUT, expected);
5872        assert_eq!(<KL03Align as KnownLayout>::LAYOUT, sized_layout(64, 64));
5873
5874        // ... with `packed`:
5875        #[allow(dead_code)]
5876        #[derive(KnownLayout)]
5877        #[repr(packed)]
5878        struct KL03Packed(NotKnownLayout<AU32>, u8);
5879
5880        let expected = DstLayout::for_type::<KL03Packed>();
5881
5882        assert_eq!(<KL03Packed as KnownLayout>::LAYOUT, expected);
5883        assert_eq!(<KL03Packed as KnownLayout>::LAYOUT, sized_layout(1, 5));
5884
5885        // ... with `packed(N)`
5886        #[allow(dead_code)]
5887        #[derive(KnownLayout)]
5888        #[repr(packed(2))]
5889        struct KL03PackedN(NotKnownLayout<AU32>, u8);
5890
5891        assert_impl_all!(KL03PackedN: KnownLayout);
5892
5893        let expected = DstLayout::for_type::<KL03PackedN>();
5894
5895        assert_eq!(<KL03PackedN as KnownLayout>::LAYOUT, expected);
5896        assert_eq!(<KL03PackedN as KnownLayout>::LAYOUT, sized_layout(2, 6));
5897
5898        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
5899        // |          N |        Y |              N |        Y |      KL05 |
5900        #[allow(dead_code)]
5901        #[derive(KnownLayout)]
5902        struct KL05<T>(u8, T);
5903
5904        fn _test_kl05<T>(t: T) -> impl KnownLayout {
5905            KL05(0u8, t)
5906        }
5907
5908        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
5909        // |          N |        Y |              Y |        Y |      KL07 |
5910        #[allow(dead_code)]
5911        #[derive(KnownLayout)]
5912        struct KL07<T: KnownLayout>(u8, T);
5913
5914        fn _test_kl07<T: KnownLayout>(t: T) -> impl KnownLayout {
5915            let _ = KL07(0u8, t);
5916        }
5917
5918        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
5919        // |          Y |        N |              Y |        N |      KL10 |
5920        #[allow(dead_code)]
5921        #[derive(KnownLayout)]
5922        #[repr(C)]
5923        struct KL10(NotKnownLayout<AU32>, [u8]);
5924
5925        let expected = DstLayout::new_zst(None)
5926            .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), None)
5927            .extend(<[u8] as KnownLayout>::LAYOUT, None)
5928            .pad_to_align();
5929
5930        assert_eq!(<KL10 as KnownLayout>::LAYOUT, expected);
5931        assert_eq!(<KL10 as KnownLayout>::LAYOUT, unsized_layout(4, 1, 4));
5932
5933        // ...with `align(N)`:
5934        #[allow(dead_code)]
5935        #[derive(KnownLayout)]
5936        #[repr(C, align(64))]
5937        struct KL10Align(NotKnownLayout<AU32>, [u8]);
5938
5939        let repr_align = NonZeroUsize::new(64);
5940
5941        let expected = DstLayout::new_zst(repr_align)
5942            .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), None)
5943            .extend(<[u8] as KnownLayout>::LAYOUT, None)
5944            .pad_to_align();
5945
5946        assert_eq!(<KL10Align as KnownLayout>::LAYOUT, expected);
5947        assert_eq!(<KL10Align as KnownLayout>::LAYOUT, unsized_layout(64, 1, 4));
5948
5949        // ...with `packed`:
5950        #[allow(dead_code)]
5951        #[derive(KnownLayout)]
5952        #[repr(C, packed)]
5953        struct KL10Packed(NotKnownLayout<AU32>, [u8]);
5954
5955        let repr_packed = NonZeroUsize::new(1);
5956
5957        let expected = DstLayout::new_zst(None)
5958            .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), repr_packed)
5959            .extend(<[u8] as KnownLayout>::LAYOUT, repr_packed)
5960            .pad_to_align();
5961
5962        assert_eq!(<KL10Packed as KnownLayout>::LAYOUT, expected);
5963        assert_eq!(<KL10Packed as KnownLayout>::LAYOUT, unsized_layout(1, 1, 4));
5964
5965        // ...with `packed(N)`:
5966        #[allow(dead_code)]
5967        #[derive(KnownLayout)]
5968        #[repr(C, packed(2))]
5969        struct KL10PackedN(NotKnownLayout<AU32>, [u8]);
5970
5971        let repr_packed = NonZeroUsize::new(2);
5972
5973        let expected = DstLayout::new_zst(None)
5974            .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), repr_packed)
5975            .extend(<[u8] as KnownLayout>::LAYOUT, repr_packed)
5976            .pad_to_align();
5977
5978        assert_eq!(<KL10PackedN as KnownLayout>::LAYOUT, expected);
5979        assert_eq!(<KL10PackedN as KnownLayout>::LAYOUT, unsized_layout(2, 1, 4));
5980
5981        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
5982        // |          Y |        N |              Y |        Y |      KL11 |
5983        #[allow(dead_code)]
5984        #[derive(KnownLayout)]
5985        #[repr(C)]
5986        struct KL11(NotKnownLayout<AU64>, u8);
5987
5988        let expected = DstLayout::new_zst(None)
5989            .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), None)
5990            .extend(<u8 as KnownLayout>::LAYOUT, None)
5991            .pad_to_align();
5992
5993        assert_eq!(<KL11 as KnownLayout>::LAYOUT, expected);
5994        assert_eq!(<KL11 as KnownLayout>::LAYOUT, sized_layout(8, 16));
5995
5996        // ...with `align(N)`:
5997        #[allow(dead_code)]
5998        #[derive(KnownLayout)]
5999        #[repr(C, align(64))]
6000        struct KL11Align(NotKnownLayout<AU64>, u8);
6001
6002        let repr_align = NonZeroUsize::new(64);
6003
6004        let expected = DstLayout::new_zst(repr_align)
6005            .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), None)
6006            .extend(<u8 as KnownLayout>::LAYOUT, None)
6007            .pad_to_align();
6008
6009        assert_eq!(<KL11Align as KnownLayout>::LAYOUT, expected);
6010        assert_eq!(<KL11Align as KnownLayout>::LAYOUT, sized_layout(64, 64));
6011
6012        // ...with `packed`:
6013        #[allow(dead_code)]
6014        #[derive(KnownLayout)]
6015        #[repr(C, packed)]
6016        struct KL11Packed(NotKnownLayout<AU64>, u8);
6017
6018        let repr_packed = NonZeroUsize::new(1);
6019
6020        let expected = DstLayout::new_zst(None)
6021            .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), repr_packed)
6022            .extend(<u8 as KnownLayout>::LAYOUT, repr_packed)
6023            .pad_to_align();
6024
6025        assert_eq!(<KL11Packed as KnownLayout>::LAYOUT, expected);
6026        assert_eq!(<KL11Packed as KnownLayout>::LAYOUT, sized_layout(1, 9));
6027
6028        // ...with `packed(N)`:
6029        #[allow(dead_code)]
6030        #[derive(KnownLayout)]
6031        #[repr(C, packed(2))]
6032        struct KL11PackedN(NotKnownLayout<AU64>, u8);
6033
6034        let repr_packed = NonZeroUsize::new(2);
6035
6036        let expected = DstLayout::new_zst(None)
6037            .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), repr_packed)
6038            .extend(<u8 as KnownLayout>::LAYOUT, repr_packed)
6039            .pad_to_align();
6040
6041        assert_eq!(<KL11PackedN as KnownLayout>::LAYOUT, expected);
6042        assert_eq!(<KL11PackedN as KnownLayout>::LAYOUT, sized_layout(2, 10));
6043
6044        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6045        // |          Y |        Y |              Y |        N |      KL14 |
6046        #[allow(dead_code)]
6047        #[derive(KnownLayout)]
6048        #[repr(C)]
6049        struct KL14<T: ?Sized + KnownLayout>(u8, T);
6050
6051        fn _test_kl14<T: ?Sized + KnownLayout>(kl: &KL14<T>) {
6052            _assert_kl(kl)
6053        }
6054
6055        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6056        // |          Y |        Y |              Y |        Y |      KL15 |
6057        #[allow(dead_code)]
6058        #[derive(KnownLayout)]
6059        #[repr(C)]
6060        struct KL15<T: KnownLayout>(u8, T);
6061
6062        fn _test_kl15<T: KnownLayout>(t: T) -> impl KnownLayout {
6063            let _ = KL15(0u8, t);
6064        }
6065
6066        // Test a variety of combinations of field types:
6067        //  - ()
6068        //  - u8
6069        //  - AU16
6070        //  - [()]
6071        //  - [u8]
6072        //  - [AU16]
6073
6074        #[allow(clippy::upper_case_acronyms, dead_code)]
6075        #[derive(KnownLayout)]
6076        #[repr(C)]
6077        struct KLTU<T, U: ?Sized>(T, U);
6078
6079        assert_eq!(<KLTU<(), ()> as KnownLayout>::LAYOUT, sized_layout(1, 0));
6080
6081        assert_eq!(<KLTU<(), u8> as KnownLayout>::LAYOUT, sized_layout(1, 1));
6082
6083        assert_eq!(<KLTU<(), AU16> as KnownLayout>::LAYOUT, sized_layout(2, 2));
6084
6085        assert_eq!(<KLTU<(), [()]> as KnownLayout>::LAYOUT, unsized_layout(1, 0, 0));
6086
6087        assert_eq!(<KLTU<(), [u8]> as KnownLayout>::LAYOUT, unsized_layout(1, 1, 0));
6088
6089        assert_eq!(<KLTU<(), [AU16]> as KnownLayout>::LAYOUT, unsized_layout(2, 2, 0));
6090
6091        assert_eq!(<KLTU<u8, ()> as KnownLayout>::LAYOUT, sized_layout(1, 1));
6092
6093        assert_eq!(<KLTU<u8, u8> as KnownLayout>::LAYOUT, sized_layout(1, 2));
6094
6095        assert_eq!(<KLTU<u8, AU16> as KnownLayout>::LAYOUT, sized_layout(2, 4));
6096
6097        assert_eq!(<KLTU<u8, [()]> as KnownLayout>::LAYOUT, unsized_layout(1, 0, 1));
6098
6099        assert_eq!(<KLTU<u8, [u8]> as KnownLayout>::LAYOUT, unsized_layout(1, 1, 1));
6100
6101        assert_eq!(<KLTU<u8, [AU16]> as KnownLayout>::LAYOUT, unsized_layout(2, 2, 2));
6102
6103        assert_eq!(<KLTU<AU16, ()> as KnownLayout>::LAYOUT, sized_layout(2, 2));
6104
6105        assert_eq!(<KLTU<AU16, u8> as KnownLayout>::LAYOUT, sized_layout(2, 4));
6106
6107        assert_eq!(<KLTU<AU16, AU16> as KnownLayout>::LAYOUT, sized_layout(2, 4));
6108
6109        assert_eq!(<KLTU<AU16, [()]> as KnownLayout>::LAYOUT, unsized_layout(2, 0, 2));
6110
6111        assert_eq!(<KLTU<AU16, [u8]> as KnownLayout>::LAYOUT, unsized_layout(2, 1, 2));
6112
6113        assert_eq!(<KLTU<AU16, [AU16]> as KnownLayout>::LAYOUT, unsized_layout(2, 2, 2));
6114
6115        // Test a variety of field counts.
6116
6117        #[derive(KnownLayout)]
6118        #[repr(C)]
6119        struct KLF0;
6120
6121        assert_eq!(<KLF0 as KnownLayout>::LAYOUT, sized_layout(1, 0));
6122
6123        #[derive(KnownLayout)]
6124        #[repr(C)]
6125        struct KLF1([u8]);
6126
6127        assert_eq!(<KLF1 as KnownLayout>::LAYOUT, unsized_layout(1, 1, 0));
6128
6129        #[derive(KnownLayout)]
6130        #[repr(C)]
6131        struct KLF2(NotKnownLayout<u8>, [u8]);
6132
6133        assert_eq!(<KLF2 as KnownLayout>::LAYOUT, unsized_layout(1, 1, 1));
6134
6135        #[derive(KnownLayout)]
6136        #[repr(C)]
6137        struct KLF3(NotKnownLayout<u8>, NotKnownLayout<AU16>, [u8]);
6138
6139        assert_eq!(<KLF3 as KnownLayout>::LAYOUT, unsized_layout(2, 1, 4));
6140
6141        #[derive(KnownLayout)]
6142        #[repr(C)]
6143        struct KLF4(NotKnownLayout<u8>, NotKnownLayout<AU16>, NotKnownLayout<AU32>, [u8]);
6144
6145        assert_eq!(<KLF4 as KnownLayout>::LAYOUT, unsized_layout(4, 1, 8));
6146    }
6147
6148    #[test]
6149    fn test_object_safety() {
6150        fn _takes_no_cell(_: &dyn Immutable) {}
6151        fn _takes_unaligned(_: &dyn Unaligned) {}
6152    }
6153
6154    #[test]
6155    fn test_from_zeros_only() {
6156        // Test types that implement `FromZeros` but not `FromBytes`.
6157
6158        assert!(!bool::new_zeroed());
6159        assert_eq!(char::new_zeroed(), '\0');
6160
6161        #[cfg(feature = "alloc")]
6162        {
6163            assert_eq!(bool::new_box_zeroed(), Ok(Box::new(false)));
6164            assert_eq!(char::new_box_zeroed(), Ok(Box::new('\0')));
6165
6166            assert_eq!(
6167                <[bool]>::new_box_zeroed_with_elems(3).unwrap().as_ref(),
6168                [false, false, false]
6169            );
6170            assert_eq!(
6171                <[char]>::new_box_zeroed_with_elems(3).unwrap().as_ref(),
6172                ['\0', '\0', '\0']
6173            );
6174
6175            assert_eq!(bool::new_vec_zeroed(3).unwrap().as_ref(), [false, false, false]);
6176            assert_eq!(char::new_vec_zeroed(3).unwrap().as_ref(), ['\0', '\0', '\0']);
6177        }
6178
6179        let mut string = "hello".to_string();
6180        let s: &mut str = string.as_mut();
6181        assert_eq!(s, "hello");
6182        s.zero();
6183        assert_eq!(s, "\0\0\0\0\0");
6184    }
6185
6186    #[test]
6187    fn test_zst_count_preserved() {
6188        // Test that, when an explicit count is provided to for a type with a
6189        // ZST trailing slice element, that count is preserved. This is
6190        // important since, for such types, all element counts result in objects
6191        // of the same size, and so the correct behavior is ambiguous. However,
6192        // preserving the count as requested by the user is the behavior that we
6193        // document publicly.
6194
6195        // FromZeros methods
6196        #[cfg(feature = "alloc")]
6197        assert_eq!(<[()]>::new_box_zeroed_with_elems(3).unwrap().len(), 3);
6198        #[cfg(feature = "alloc")]
6199        assert_eq!(<()>::new_vec_zeroed(3).unwrap().len(), 3);
6200
6201        // FromBytes methods
6202        assert_eq!(<[()]>::ref_from_bytes_with_elems(&[][..], 3).unwrap().len(), 3);
6203        assert_eq!(<[()]>::ref_from_prefix_with_elems(&[][..], 3).unwrap().0.len(), 3);
6204        assert_eq!(<[()]>::ref_from_suffix_with_elems(&[][..], 3).unwrap().1.len(), 3);
6205        assert_eq!(<[()]>::mut_from_bytes_with_elems(&mut [][..], 3).unwrap().len(), 3);
6206        assert_eq!(<[()]>::mut_from_prefix_with_elems(&mut [][..], 3).unwrap().0.len(), 3);
6207        assert_eq!(<[()]>::mut_from_suffix_with_elems(&mut [][..], 3).unwrap().1.len(), 3);
6208    }
6209
6210    #[test]
6211    fn test_read_write() {
6212        const VAL: u64 = 0x12345678;
6213        #[cfg(target_endian = "big")]
6214        const VAL_BYTES: [u8; 8] = VAL.to_be_bytes();
6215        #[cfg(target_endian = "little")]
6216        const VAL_BYTES: [u8; 8] = VAL.to_le_bytes();
6217        const ZEROS: [u8; 8] = [0u8; 8];
6218
6219        // Test `FromBytes::{read_from, read_from_prefix, read_from_suffix}`.
6220
6221        assert_eq!(u64::read_from_bytes(&VAL_BYTES[..]), Ok(VAL));
6222        // The first 8 bytes are from `VAL_BYTES` and the second 8 bytes are all
6223        // zeros.
6224        let bytes_with_prefix: [u8; 16] = transmute!([VAL_BYTES, [0; 8]]);
6225        assert_eq!(u64::read_from_prefix(&bytes_with_prefix[..]), Ok((VAL, &ZEROS[..])));
6226        assert_eq!(u64::read_from_suffix(&bytes_with_prefix[..]), Ok((&VAL_BYTES[..], 0)));
6227        // The first 8 bytes are all zeros and the second 8 bytes are from
6228        // `VAL_BYTES`
6229        let bytes_with_suffix: [u8; 16] = transmute!([[0; 8], VAL_BYTES]);
6230        assert_eq!(u64::read_from_prefix(&bytes_with_suffix[..]), Ok((0, &VAL_BYTES[..])));
6231        assert_eq!(u64::read_from_suffix(&bytes_with_suffix[..]), Ok((&ZEROS[..], VAL)));
6232
6233        // Test `IntoBytes::{write_to, write_to_prefix, write_to_suffix}`.
6234
6235        let mut bytes = [0u8; 8];
6236        assert_eq!(VAL.write_to(&mut bytes[..]), Ok(()));
6237        assert_eq!(bytes, VAL_BYTES);
6238        let mut bytes = [0u8; 16];
6239        assert_eq!(VAL.write_to_prefix(&mut bytes[..]), Ok(()));
6240        let want: [u8; 16] = transmute!([VAL_BYTES, [0; 8]]);
6241        assert_eq!(bytes, want);
6242        let mut bytes = [0u8; 16];
6243        assert_eq!(VAL.write_to_suffix(&mut bytes[..]), Ok(()));
6244        let want: [u8; 16] = transmute!([[0; 8], VAL_BYTES]);
6245        assert_eq!(bytes, want);
6246    }
6247
6248    #[test]
6249    #[cfg(feature = "std")]
6250    fn test_read_io_with_padding_soundness() {
6251        // This test is designed to exhibit potential UB in
6252        // `FromBytes::read_from_io`. (see #2319, #2320).
6253
6254        // On most platforms (where `align_of::<u16>() == 2`), `WithPadding`
6255        // will have inter-field padding between `x` and `y`.
6256        #[derive(FromBytes)]
6257        #[repr(C)]
6258        struct WithPadding {
6259            x: u8,
6260            y: u16,
6261        }
6262        struct ReadsInRead;
6263        impl std::io::Read for ReadsInRead {
6264            fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
6265                // This body branches on every byte of `buf`, ensuring that it
6266                // exhibits UB if any byte of `buf` is uninitialized.
6267                if buf.iter().all(|&x| x == 0) {
6268                    Ok(buf.len())
6269                } else {
6270                    buf.iter_mut().for_each(|x| *x = 0);
6271                    Ok(buf.len())
6272                }
6273            }
6274        }
6275        assert!(matches!(WithPadding::read_from_io(ReadsInRead), Ok(WithPadding { x: 0, y: 0 })));
6276    }
6277
6278    #[test]
6279    #[cfg(feature = "std")]
6280    fn test_read_write_io() {
6281        let mut long_buffer = [0, 0, 0, 0];
6282        assert!(matches!(u16::MAX.write_to_io(&mut long_buffer[..]), Ok(())));
6283        assert_eq!(long_buffer, [255, 255, 0, 0]);
6284        assert!(matches!(u16::read_from_io(&long_buffer[..]), Ok(u16::MAX)));
6285
6286        let mut short_buffer = [0, 0];
6287        assert!(u32::MAX.write_to_io(&mut short_buffer[..]).is_err());
6288        assert_eq!(short_buffer, [255, 255]);
6289        assert!(u32::read_from_io(&short_buffer[..]).is_err());
6290    }
6291
6292    #[test]
6293    fn test_try_from_bytes_try_read_from() {
6294        assert_eq!(<bool as TryFromBytes>::try_read_from_bytes(&[0]), Ok(false));
6295        assert_eq!(<bool as TryFromBytes>::try_read_from_bytes(&[1]), Ok(true));
6296
6297        assert_eq!(<bool as TryFromBytes>::try_read_from_prefix(&[0, 2]), Ok((false, &[2][..])));
6298        assert_eq!(<bool as TryFromBytes>::try_read_from_prefix(&[1, 2]), Ok((true, &[2][..])));
6299
6300        assert_eq!(<bool as TryFromBytes>::try_read_from_suffix(&[2, 0]), Ok((&[2][..], false)));
6301        assert_eq!(<bool as TryFromBytes>::try_read_from_suffix(&[2, 1]), Ok((&[2][..], true)));
6302
6303        // If we don't pass enough bytes, it fails.
6304        assert!(matches!(
6305            <u8 as TryFromBytes>::try_read_from_bytes(&[]),
6306            Err(TryReadError::Size(_))
6307        ));
6308        assert!(matches!(
6309            <u8 as TryFromBytes>::try_read_from_prefix(&[]),
6310            Err(TryReadError::Size(_))
6311        ));
6312        assert!(matches!(
6313            <u8 as TryFromBytes>::try_read_from_suffix(&[]),
6314            Err(TryReadError::Size(_))
6315        ));
6316
6317        // If we pass too many bytes, it fails.
6318        assert!(matches!(
6319            <u8 as TryFromBytes>::try_read_from_bytes(&[0, 0]),
6320            Err(TryReadError::Size(_))
6321        ));
6322
6323        // If we pass an invalid value, it fails.
6324        assert!(matches!(
6325            <bool as TryFromBytes>::try_read_from_bytes(&[2]),
6326            Err(TryReadError::Validity(_))
6327        ));
6328        assert!(matches!(
6329            <bool as TryFromBytes>::try_read_from_prefix(&[2, 0]),
6330            Err(TryReadError::Validity(_))
6331        ));
6332        assert!(matches!(
6333            <bool as TryFromBytes>::try_read_from_suffix(&[0, 2]),
6334            Err(TryReadError::Validity(_))
6335        ));
6336
6337        // Reading from a misaligned buffer should still succeed. Since `AU64`'s
6338        // alignment is 8, and since we read from two adjacent addresses one
6339        // byte apart, it is guaranteed that at least one of them (though
6340        // possibly both) will be misaligned.
6341        let bytes: [u8; 9] = [0, 0, 0, 0, 0, 0, 0, 0, 0];
6342        assert_eq!(<AU64 as TryFromBytes>::try_read_from_bytes(&bytes[..8]), Ok(AU64(0)));
6343        assert_eq!(<AU64 as TryFromBytes>::try_read_from_bytes(&bytes[1..9]), Ok(AU64(0)));
6344
6345        assert_eq!(
6346            <AU64 as TryFromBytes>::try_read_from_prefix(&bytes[..8]),
6347            Ok((AU64(0), &[][..]))
6348        );
6349        assert_eq!(
6350            <AU64 as TryFromBytes>::try_read_from_prefix(&bytes[1..9]),
6351            Ok((AU64(0), &[][..]))
6352        );
6353
6354        assert_eq!(
6355            <AU64 as TryFromBytes>::try_read_from_suffix(&bytes[..8]),
6356            Ok((&[][..], AU64(0)))
6357        );
6358        assert_eq!(
6359            <AU64 as TryFromBytes>::try_read_from_suffix(&bytes[1..9]),
6360            Ok((&[][..], AU64(0)))
6361        );
6362    }
6363
6364    #[test]
6365    fn test_ref_from_mut_from() {
6366        // Test `FromBytes::{ref_from, mut_from}{,_prefix,Suffix}` success cases
6367        // Exhaustive coverage for these methods is covered by the `Ref` tests above,
6368        // which these helper methods defer to.
6369
6370        let mut buf =
6371            Align::<[u8; 16], AU64>::new([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);
6372
6373        assert_eq!(
6374            AU64::ref_from_bytes(&buf.t[8..]).unwrap().0.to_ne_bytes(),
6375            [8, 9, 10, 11, 12, 13, 14, 15]
6376        );
6377        let suffix = AU64::mut_from_bytes(&mut buf.t[8..]).unwrap();
6378        suffix.0 = 0x0101010101010101;
6379        // The `[u8:9]` is a non-half size of the full buffer, which would catch
6380        // `from_prefix` having the same implementation as `from_suffix` (issues #506, #511).
6381        assert_eq!(
6382            <[u8; 9]>::ref_from_suffix(&buf.t[..]).unwrap(),
6383            (&[0, 1, 2, 3, 4, 5, 6][..], &[7u8, 1, 1, 1, 1, 1, 1, 1, 1])
6384        );
6385        let (prefix, suffix) = AU64::mut_from_suffix(&mut buf.t[1..]).unwrap();
6386        assert_eq!(prefix, &mut [1u8, 2, 3, 4, 5, 6, 7][..]);
6387        suffix.0 = 0x0202020202020202;
6388        let (prefix, suffix) = <[u8; 10]>::mut_from_suffix(&mut buf.t[..]).unwrap();
6389        assert_eq!(prefix, &mut [0u8, 1, 2, 3, 4, 5][..]);
6390        suffix[0] = 42;
6391        assert_eq!(
6392            <[u8; 9]>::ref_from_prefix(&buf.t[..]).unwrap(),
6393            (&[0u8, 1, 2, 3, 4, 5, 42, 7, 2], &[2u8, 2, 2, 2, 2, 2, 2][..])
6394        );
6395        <[u8; 2]>::mut_from_prefix(&mut buf.t[..]).unwrap().0[1] = 30;
6396        assert_eq!(buf.t, [0, 30, 2, 3, 4, 5, 42, 7, 2, 2, 2, 2, 2, 2, 2, 2]);
6397    }
6398
6399    #[test]
6400    fn test_ref_from_mut_from_error() {
6401        // Test `FromBytes::{ref_from, mut_from}{,_prefix,Suffix}` error cases.
6402
6403        // Fail because the buffer is too large.
6404        let mut buf = Align::<[u8; 16], AU64>::default();
6405        // `buf.t` should be aligned to 8, so only the length check should fail.
6406        assert!(AU64::ref_from_bytes(&buf.t[..]).is_err());
6407        assert!(AU64::mut_from_bytes(&mut buf.t[..]).is_err());
6408        assert!(<[u8; 8]>::ref_from_bytes(&buf.t[..]).is_err());
6409        assert!(<[u8; 8]>::mut_from_bytes(&mut buf.t[..]).is_err());
6410
6411        // Fail because the buffer is too small.
6412        let mut buf = Align::<[u8; 4], AU64>::default();
6413        assert!(AU64::ref_from_bytes(&buf.t[..]).is_err());
6414        assert!(AU64::mut_from_bytes(&mut buf.t[..]).is_err());
6415        assert!(<[u8; 8]>::ref_from_bytes(&buf.t[..]).is_err());
6416        assert!(<[u8; 8]>::mut_from_bytes(&mut buf.t[..]).is_err());
6417        assert!(AU64::ref_from_prefix(&buf.t[..]).is_err());
6418        assert!(AU64::mut_from_prefix(&mut buf.t[..]).is_err());
6419        assert!(AU64::ref_from_suffix(&buf.t[..]).is_err());
6420        assert!(AU64::mut_from_suffix(&mut buf.t[..]).is_err());
6421        assert!(<[u8; 8]>::ref_from_prefix(&buf.t[..]).is_err());
6422        assert!(<[u8; 8]>::mut_from_prefix(&mut buf.t[..]).is_err());
6423        assert!(<[u8; 8]>::ref_from_suffix(&buf.t[..]).is_err());
6424        assert!(<[u8; 8]>::mut_from_suffix(&mut buf.t[..]).is_err());
6425
6426        // Fail because the alignment is insufficient.
6427        let mut buf = Align::<[u8; 13], AU64>::default();
6428        assert!(AU64::ref_from_bytes(&buf.t[1..]).is_err());
6429        assert!(AU64::mut_from_bytes(&mut buf.t[1..]).is_err());
6430        assert!(AU64::ref_from_bytes(&buf.t[1..]).is_err());
6431        assert!(AU64::mut_from_bytes(&mut buf.t[1..]).is_err());
6432        assert!(AU64::ref_from_prefix(&buf.t[1..]).is_err());
6433        assert!(AU64::mut_from_prefix(&mut buf.t[1..]).is_err());
6434        assert!(AU64::ref_from_suffix(&buf.t[..]).is_err());
6435        assert!(AU64::mut_from_suffix(&mut buf.t[..]).is_err());
6436    }
6437
6438    #[test]
6439    fn test_to_methods() {
6440        /// Run a series of tests by calling `IntoBytes` methods on `t`.
6441        ///
6442        /// `bytes` is the expected byte sequence returned from `t.as_bytes()`
6443        /// before `t` has been modified. `post_mutation` is the expected
6444        /// sequence returned from `t.as_bytes()` after `t.as_mut_bytes()[0]`
6445        /// has had its bits flipped (by applying `^= 0xFF`).
6446        ///
6447        /// `N` is the size of `t` in bytes.
6448        fn test<T: FromBytes + IntoBytes + Immutable + Debug + Eq + ?Sized, const N: usize>(
6449            t: &mut T,
6450            bytes: &[u8],
6451            post_mutation: &T,
6452        ) {
6453            // Test that we can access the underlying bytes, and that we get the
6454            // right bytes and the right number of bytes.
6455            assert_eq!(t.as_bytes(), bytes);
6456
6457            // Test that changes to the underlying byte slices are reflected in
6458            // the original object.
6459            t.as_mut_bytes()[0] ^= 0xFF;
6460            assert_eq!(t, post_mutation);
6461            t.as_mut_bytes()[0] ^= 0xFF;
6462
6463            // `write_to` rejects slices that are too small or too large.
6464            assert!(t.write_to(&mut vec![0; N - 1][..]).is_err());
6465            assert!(t.write_to(&mut vec![0; N + 1][..]).is_err());
6466
6467            // `write_to` works as expected.
6468            let mut bytes = [0; N];
6469            assert_eq!(t.write_to(&mut bytes[..]), Ok(()));
6470            assert_eq!(bytes, t.as_bytes());
6471
6472            // `write_to_prefix` rejects slices that are too small.
6473            assert!(t.write_to_prefix(&mut vec![0; N - 1][..]).is_err());
6474
6475            // `write_to_prefix` works with exact-sized slices.
6476            let mut bytes = [0; N];
6477            assert_eq!(t.write_to_prefix(&mut bytes[..]), Ok(()));
6478            assert_eq!(bytes, t.as_bytes());
6479
6480            // `write_to_prefix` works with too-large slices, and any bytes past
6481            // the prefix aren't modified.
6482            let mut too_many_bytes = vec![0; N + 1];
6483            too_many_bytes[N] = 123;
6484            assert_eq!(t.write_to_prefix(&mut too_many_bytes[..]), Ok(()));
6485            assert_eq!(&too_many_bytes[..N], t.as_bytes());
6486            assert_eq!(too_many_bytes[N], 123);
6487
6488            // `write_to_suffix` rejects slices that are too small.
6489            assert!(t.write_to_suffix(&mut vec![0; N - 1][..]).is_err());
6490
6491            // `write_to_suffix` works with exact-sized slices.
6492            let mut bytes = [0; N];
6493            assert_eq!(t.write_to_suffix(&mut bytes[..]), Ok(()));
6494            assert_eq!(bytes, t.as_bytes());
6495
6496            // `write_to_suffix` works with too-large slices, and any bytes
6497            // before the suffix aren't modified.
6498            let mut too_many_bytes = vec![0; N + 1];
6499            too_many_bytes[0] = 123;
6500            assert_eq!(t.write_to_suffix(&mut too_many_bytes[..]), Ok(()));
6501            assert_eq!(&too_many_bytes[1..], t.as_bytes());
6502            assert_eq!(too_many_bytes[0], 123);
6503        }
6504
6505        #[derive(Debug, Eq, PartialEq, FromBytes, IntoBytes, Immutable)]
6506        #[repr(C)]
6507        struct Foo {
6508            a: u32,
6509            b: Wrapping<u32>,
6510            c: Option<NonZeroU32>,
6511        }
6512
6513        let expected_bytes: Vec<u8> = if cfg!(target_endian = "little") {
6514            vec![1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0]
6515        } else {
6516            vec![0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 0]
6517        };
6518        let post_mutation_expected_a =
6519            if cfg!(target_endian = "little") { 0x00_00_00_FE } else { 0xFF_00_00_01 };
6520        test::<_, 12>(
6521            &mut Foo { a: 1, b: Wrapping(2), c: None },
6522            expected_bytes.as_bytes(),
6523            &Foo { a: post_mutation_expected_a, b: Wrapping(2), c: None },
6524        );
6525        test::<_, 3>(
6526            Unsized::from_mut_slice(&mut [1, 2, 3]),
6527            &[1, 2, 3],
6528            Unsized::from_mut_slice(&mut [0xFE, 2, 3]),
6529        );
6530    }
6531
6532    #[test]
6533    fn test_array() {
6534        #[derive(FromBytes, IntoBytes, Immutable)]
6535        #[repr(C)]
6536        struct Foo {
6537            a: [u16; 33],
6538        }
6539
6540        let foo = Foo { a: [0xFFFF; 33] };
6541        let expected = [0xFFu8; 66];
6542        assert_eq!(foo.as_bytes(), &expected[..]);
6543    }
6544
6545    #[test]
6546    fn test_new_zeroed() {
6547        assert!(!bool::new_zeroed());
6548        assert_eq!(u64::new_zeroed(), 0);
6549        // This test exists in order to exercise unsafe code, especially when
6550        // running under Miri.
6551        #[allow(clippy::unit_cmp)]
6552        {
6553            assert_eq!(<()>::new_zeroed(), ());
6554        }
6555    }
6556
6557    #[test]
6558    fn test_transparent_packed_generic_struct() {
6559        #[derive(IntoBytes, FromBytes, Unaligned)]
6560        #[repr(transparent)]
6561        #[allow(dead_code)] // We never construct this type
6562        struct Foo<T> {
6563            _t: T,
6564            _phantom: PhantomData<()>,
6565        }
6566
6567        assert_impl_all!(Foo<u32>: FromZeros, FromBytes, IntoBytes);
6568        assert_impl_all!(Foo<u8>: Unaligned);
6569
6570        #[derive(IntoBytes, FromBytes, Unaligned)]
6571        #[repr(C, packed)]
6572        #[allow(dead_code)] // We never construct this type
6573        struct Bar<T, U> {
6574            _t: T,
6575            _u: U,
6576        }
6577
6578        assert_impl_all!(Bar<u8, AU64>: FromZeros, FromBytes, IntoBytes, Unaligned);
6579    }
6580
6581    #[cfg(feature = "alloc")]
6582    mod alloc {
6583        use super::*;
6584
6585        #[cfg(zerocopy_panic_in_const_and_vec_try_reserve_1_57_0)]
6586        #[test]
6587        fn test_extend_vec_zeroed() {
6588            // Test extending when there is an existing allocation.
6589            let mut v = vec![100u16, 200, 300];
6590            FromZeros::extend_vec_zeroed(&mut v, 3).unwrap();
6591            assert_eq!(v.len(), 6);
6592            assert_eq!(&*v, &[100, 200, 300, 0, 0, 0]);
6593            drop(v);
6594
6595            // Test extending when there is no existing allocation.
6596            let mut v: Vec<u64> = Vec::new();
6597            FromZeros::extend_vec_zeroed(&mut v, 3).unwrap();
6598            assert_eq!(v.len(), 3);
6599            assert_eq!(&*v, &[0, 0, 0]);
6600            drop(v);
6601        }
6602
6603        #[cfg(zerocopy_panic_in_const_and_vec_try_reserve_1_57_0)]
6604        #[test]
6605        fn test_extend_vec_zeroed_zst() {
6606            // Test extending when there is an existing (fake) allocation.
6607            let mut v = vec![(), (), ()];
6608            <()>::extend_vec_zeroed(&mut v, 3).unwrap();
6609            assert_eq!(v.len(), 6);
6610            assert_eq!(&*v, &[(), (), (), (), (), ()]);
6611            drop(v);
6612
6613            // Test extending when there is no existing (fake) allocation.
6614            let mut v: Vec<()> = Vec::new();
6615            <()>::extend_vec_zeroed(&mut v, 3).unwrap();
6616            assert_eq!(&*v, &[(), (), ()]);
6617            drop(v);
6618        }
6619
6620        #[cfg(zerocopy_panic_in_const_and_vec_try_reserve_1_57_0)]
6621        #[test]
6622        fn test_insert_vec_zeroed() {
6623            // Insert at start (no existing allocation).
6624            let mut v: Vec<u64> = Vec::new();
6625            u64::insert_vec_zeroed(&mut v, 0, 2).unwrap();
6626            assert_eq!(v.len(), 2);
6627            assert_eq!(&*v, &[0, 0]);
6628            drop(v);
6629
6630            // Insert at start.
6631            let mut v = vec![100u64, 200, 300];
6632            u64::insert_vec_zeroed(&mut v, 0, 2).unwrap();
6633            assert_eq!(v.len(), 5);
6634            assert_eq!(&*v, &[0, 0, 100, 200, 300]);
6635            drop(v);
6636
6637            // Insert at middle.
6638            let mut v = vec![100u64, 200, 300];
6639            u64::insert_vec_zeroed(&mut v, 1, 1).unwrap();
6640            assert_eq!(v.len(), 4);
6641            assert_eq!(&*v, &[100, 0, 200, 300]);
6642            drop(v);
6643
6644            // Insert at end.
6645            let mut v = vec![100u64, 200, 300];
6646            u64::insert_vec_zeroed(&mut v, 3, 1).unwrap();
6647            assert_eq!(v.len(), 4);
6648            assert_eq!(&*v, &[100, 200, 300, 0]);
6649            drop(v);
6650        }
6651
6652        #[cfg(zerocopy_panic_in_const_and_vec_try_reserve_1_57_0)]
6653        #[test]
6654        fn test_insert_vec_zeroed_zst() {
6655            // Insert at start (no existing fake allocation).
6656            let mut v: Vec<()> = Vec::new();
6657            <()>::insert_vec_zeroed(&mut v, 0, 2).unwrap();
6658            assert_eq!(v.len(), 2);
6659            assert_eq!(&*v, &[(), ()]);
6660            drop(v);
6661
6662            // Insert at start.
6663            let mut v = vec![(), (), ()];
6664            <()>::insert_vec_zeroed(&mut v, 0, 2).unwrap();
6665            assert_eq!(v.len(), 5);
6666            assert_eq!(&*v, &[(), (), (), (), ()]);
6667            drop(v);
6668
6669            // Insert at middle.
6670            let mut v = vec![(), (), ()];
6671            <()>::insert_vec_zeroed(&mut v, 1, 1).unwrap();
6672            assert_eq!(v.len(), 4);
6673            assert_eq!(&*v, &[(), (), (), ()]);
6674            drop(v);
6675
6676            // Insert at end.
6677            let mut v = vec![(), (), ()];
6678            <()>::insert_vec_zeroed(&mut v, 3, 1).unwrap();
6679            assert_eq!(v.len(), 4);
6680            assert_eq!(&*v, &[(), (), (), ()]);
6681            drop(v);
6682        }
6683
6684        #[test]
6685        fn test_new_box_zeroed() {
6686            assert_eq!(u64::new_box_zeroed(), Ok(Box::new(0)));
6687        }
6688
6689        #[test]
6690        fn test_new_box_zeroed_array() {
6691            drop(<[u32; 0x1000]>::new_box_zeroed());
6692        }
6693
6694        #[test]
6695        fn test_new_box_zeroed_zst() {
6696            // This test exists in order to exercise unsafe code, especially
6697            // when running under Miri.
6698            #[allow(clippy::unit_cmp)]
6699            {
6700                assert_eq!(<()>::new_box_zeroed(), Ok(Box::new(())));
6701            }
6702        }
6703
6704        #[test]
6705        fn test_new_box_zeroed_with_elems() {
6706            let mut s: Box<[u64]> = <[u64]>::new_box_zeroed_with_elems(3).unwrap();
6707            assert_eq!(s.len(), 3);
6708            assert_eq!(&*s, &[0, 0, 0]);
6709            s[1] = 3;
6710            assert_eq!(&*s, &[0, 3, 0]);
6711        }
6712
6713        #[test]
6714        fn test_new_box_zeroed_with_elems_empty() {
6715            let s: Box<[u64]> = <[u64]>::new_box_zeroed_with_elems(0).unwrap();
6716            assert_eq!(s.len(), 0);
6717        }
6718
6719        #[test]
6720        fn test_new_box_zeroed_with_elems_zst() {
6721            let mut s: Box<[()]> = <[()]>::new_box_zeroed_with_elems(3).unwrap();
6722            assert_eq!(s.len(), 3);
6723            assert!(s.get(10).is_none());
6724            // This test exists in order to exercise unsafe code, especially
6725            // when running under Miri.
6726            #[allow(clippy::unit_cmp)]
6727            {
6728                assert_eq!(s[1], ());
6729            }
6730            s[2] = ();
6731        }
6732
6733        #[test]
6734        fn test_new_box_zeroed_with_elems_zst_empty() {
6735            let s: Box<[()]> = <[()]>::new_box_zeroed_with_elems(0).unwrap();
6736            assert_eq!(s.len(), 0);
6737        }
6738
6739        #[test]
6740        fn new_box_zeroed_with_elems_errors() {
6741            assert_eq!(<[u16]>::new_box_zeroed_with_elems(usize::MAX), Err(AllocError));
6742
6743            let max = <usize as core::convert::TryFrom<_>>::try_from(isize::MAX).unwrap();
6744            assert_eq!(
6745                <[u16]>::new_box_zeroed_with_elems((max / mem::size_of::<u16>()) + 1),
6746                Err(AllocError)
6747            );
6748        }
6749    }
6750}