summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorStephen Rothwell <sfr@canb.auug.org.au>2021-07-20 14:06:33 +1000
committerStephen Rothwell <sfr@canb.auug.org.au>2021-07-20 14:06:33 +1000
commitdd00df5ad71a4b413dfee1c5464e696aff2ac69e (patch)
tree2ce0491556aa43ab86dff3bfc4393ab68a34a73f
parentcfc6662e1db59aaf9a08cda8b049cfdd67661663 (diff)
parent5d3986cf8ed63ff8d86270e578649e71143112d6 (diff)
downloadlinux-next-dd00df5ad71a4b413dfee1c5464e696aff2ac69e.tar.gz
Merge remote-tracking branch 'rust/rust-next'
# Conflicts: # Makefile # include/linux/kallsyms.h
-rw-r--r--.gitignore5
-rw-r--r--.rustfmt.toml12
-rw-r--r--Documentation/doc-guide/kernel-doc.rst3
-rw-r--r--Documentation/index.rst1
-rw-r--r--Documentation/kbuild/kbuild.rst4
-rw-r--r--Documentation/process/changes.rst13
-rw-r--r--Documentation/rust/arch-support.rst35
-rw-r--r--Documentation/rust/assets/favicon-16x16.pngbin0 -> 798 bytes
-rw-r--r--Documentation/rust/assets/favicon-32x32.pngbin0 -> 2076 bytes
-rw-r--r--Documentation/rust/assets/rust-logo.pngbin0 -> 53976 bytes
-rw-r--r--Documentation/rust/coding.rst92
-rw-r--r--Documentation/rust/docs.rst110
-rw-r--r--Documentation/rust/index.rst20
-rw-r--r--Documentation/rust/quick-start.rst222
-rw-r--r--MAINTAINERS14
-rw-r--r--Makefile176
-rw-r--r--arch/arm/rust/target.json28
-rw-r--r--arch/arm64/rust/target.json35
-rw-r--r--arch/powerpc/rust/target.json30
-rw-r--r--arch/riscv/Makefile1
-rw-r--r--arch/riscv/rust/rv32ima.json37
-rw-r--r--arch/riscv/rust/rv32imac.json37
-rw-r--r--arch/riscv/rust/rv64ima.json37
-rw-r--r--arch/riscv/rust/rv64imac.json37
-rw-r--r--arch/x86/rust/target.json37
-rw-r--r--include/linux/kallsyms.h2
-rw-r--r--include/linux/spinlock.h17
-rw-r--r--init/Kconfig28
-rw-r--r--kernel/kallsyms.c7
-rw-r--r--kernel/livepatch/core.c4
-rw-r--r--kernel/printk/printk.c5
-rw-r--r--lib/Kconfig.debug144
-rw-r--r--lib/vsprintf.c12
-rw-r--r--rust/.gitignore6
-rw-r--r--rust/Makefile316
-rw-r--r--rust/alloc/README.md32
-rw-r--r--rust/alloc/alloc.rs425
-rw-r--r--rust/alloc/borrow.rs493
-rw-r--r--rust/alloc/boxed.rs1728
-rw-r--r--rust/alloc/collections/mod.rs116
-rw-r--r--rust/alloc/fmt.rs587
-rw-r--r--rust/alloc/lib.rs197
-rw-r--r--rust/alloc/macros.rs128
-rw-r--r--rust/alloc/prelude/mod.rs17
-rw-r--r--rust/alloc/prelude/v1.rs16
-rw-r--r--rust/alloc/raw_vec.rs612
-rw-r--r--rust/alloc/rc.rs2539
-rw-r--r--rust/alloc/slice.rs1271
-rw-r--r--rust/alloc/str.rs614
-rw-r--r--rust/alloc/string.rs2847
-rw-r--r--rust/alloc/sync.rs2631
-rw-r--r--rust/alloc/vec/drain.rs157
-rw-r--r--rust/alloc/vec/drain_filter.rs145
-rw-r--r--rust/alloc/vec/into_iter.rs296
-rw-r--r--rust/alloc/vec/is_zero.rs106
-rw-r--r--rust/alloc/vec/mod.rs3255
-rw-r--r--rust/alloc/vec/partial_eq.rs49
-rw-r--r--rust/alloc/vec/set_len_on_drop.rs30
-rw-r--r--rust/alloc/vec/spec_extend.rs170
-rw-r--r--rust/bindgen_parameters13
-rw-r--r--rust/build_error.rs33
-rw-r--r--rust/compiler_builtins.rs146
-rw-r--r--rust/exports.c16
-rw-r--r--rust/helpers.c235
-rw-r--r--rust/kernel/allocator.rs63
-rw-r--r--rust/kernel/bindings.rs28
-rw-r--r--rust/kernel/bindings_helper.h24
-rw-r--r--rust/kernel/buffer.rs39
-rw-r--r--rust/kernel/build_assert.rs80
-rw-r--r--rust/kernel/c_types.rs119
-rw-r--r--rust/kernel/chrdev.rs212
-rw-r--r--rust/kernel/error.rs272
-rw-r--r--rust/kernel/file.rs130
-rw-r--r--rust/kernel/file_operations.rs698
-rw-r--r--rust/kernel/io_buffer.rs153
-rw-r--r--rust/kernel/iov_iter.rs95
-rw-r--r--rust/kernel/lib.rs220
-rw-r--r--rust/kernel/linked_list.rs245
-rw-r--r--rust/kernel/miscdev.rs113
-rw-r--r--rust/kernel/module_param.rs497
-rw-r--r--rust/kernel/of.rs101
-rw-r--r--rust/kernel/pages.rs176
-rw-r--r--rust/kernel/platdev.rs166
-rw-r--r--rust/kernel/prelude.rs28
-rw-r--r--rust/kernel/print.rs412
-rw-r--r--rust/kernel/random.rs50
-rw-r--r--rust/kernel/raw_list.rs361
-rw-r--r--rust/kernel/rbtree.rs570
-rw-r--r--rust/kernel/security.rs79
-rw-r--r--rust/kernel/static_assert.rs39
-rw-r--r--rust/kernel/str.rs259
-rw-r--r--rust/kernel/sync/arc.rs227
-rw-r--r--rust/kernel/sync/condvar.rs136
-rw-r--r--rust/kernel/sync/guard.rs82
-rw-r--r--rust/kernel/sync/locked_by.rs112
-rw-r--r--rust/kernel/sync/mod.rs84
-rw-r--r--rust/kernel/sync/mutex.rs101
-rw-r--r--rust/kernel/sync/spinlock.rs109
-rw-r--r--rust/kernel/sysctl.rs198
-rw-r--r--rust/kernel/task.rs193
-rw-r--r--rust/kernel/traits.rs26
-rw-r--r--rust/kernel/types.rs249
-rw-r--r--rust/kernel/user_ptr.rs191
-rw-r--r--rust/macros/lib.rs127
-rw-r--r--rust/macros/module.rs754
-rw-r--r--samples/Kconfig2
-rw-r--r--samples/Makefile1
-rw-r--r--samples/rust/Kconfig113
-rw-r--r--samples/rust/Makefile12
-rw-r--r--samples/rust/rust_chrdev.rs51
-rw-r--r--samples/rust/rust_minimal.rs38
-rw-r--r--samples/rust/rust_miscdev.rs150
-rw-r--r--samples/rust/rust_module_parameters.rs72
-rw-r--r--samples/rust/rust_print.rs57
-rw-r--r--samples/rust/rust_random.rs61
-rw-r--r--samples/rust/rust_semaphore.rs177
-rw-r--r--samples/rust/rust_semaphore_c.c212
-rw-r--r--samples/rust/rust_stack_probing.rs40
-rw-r--r--samples/rust/rust_sync.rs81
-rw-r--r--scripts/Makefile.build22
-rw-r--r--scripts/Makefile.lib12
-rwxr-xr-xscripts/generate_rust_analyzer.py143
-rw-r--r--scripts/kallsyms.c33
-rw-r--r--scripts/kconfig/confdata.c67
-rwxr-xr-xscripts/rust-version.sh31
-rw-r--r--tools/include/linux/kallsyms.h2
-rw-r--r--tools/include/linux/lockdep.h2
-rw-r--r--tools/lib/perf/include/perf/event.h2
-rw-r--r--tools/lib/symbol/kallsyms.h2
129 files changed, 29600 insertions, 32 deletions
diff --git a/.gitignore b/.gitignore
index 7afd412dadd2..48c68948f476 100644
--- a/.gitignore
+++ b/.gitignore
@@ -37,6 +37,7 @@
*.o
*.o.*
*.patch
+*.rmeta
*.s
*.so
*.so.dbg
@@ -96,6 +97,7 @@ modules.order
!.gitattributes
!.gitignore
!.mailmap
+!.rustfmt.toml
#
# Generated include files
@@ -161,3 +163,6 @@ x509.genkey
# Documentation toolchain
sphinx_*/
+
+# Rust analyzer configuration
+/rust-project.json
diff --git a/.rustfmt.toml b/.rustfmt.toml
new file mode 100644
index 000000000000..4fea7c464f0d
--- /dev/null
+++ b/.rustfmt.toml
@@ -0,0 +1,12 @@
+edition = "2018"
+newline_style = "Unix"
+
+# Unstable options that help catching some mistakes in formatting and that we may want to enable
+# when they become stable.
+#
+# They are kept here since they are useful to run from time to time.
+#format_code_in_doc_comments = true
+#reorder_impl_items = true
+#comment_width = 100
+#wrap_comments = true
+#normalize_comments = true
diff --git a/Documentation/doc-guide/kernel-doc.rst b/Documentation/doc-guide/kernel-doc.rst
index 79aaa55d6bcf..724e2ffddff1 100644
--- a/Documentation/doc-guide/kernel-doc.rst
+++ b/Documentation/doc-guide/kernel-doc.rst
@@ -11,6 +11,9 @@ when it is embedded in source files.
reasons. The kernel source contains tens of thousands of kernel-doc
comments. Please stick to the style described here.
+.. note:: kernel-doc does not cover Rust code: please see
+ Documentation/rust/docs.rst instead.
+
The kernel-doc structure is extracted from the comments, and proper
`Sphinx C Domain`_ function and type descriptions with anchors are
generated from them. The descriptions are filtered for special kernel-doc
diff --git a/Documentation/index.rst b/Documentation/index.rst
index 54ce34fd6fbd..1b13c2445e87 100644
--- a/Documentation/index.rst
+++ b/Documentation/index.rst
@@ -82,6 +82,7 @@ merged much easier.
maintainer/index
fault-injection/index
livepatch/index
+ rust/index
Kernel API documentation
diff --git a/Documentation/kbuild/kbuild.rst b/Documentation/kbuild/kbuild.rst
index 2d1fc03d346e..468a0d216c29 100644
--- a/Documentation/kbuild/kbuild.rst
+++ b/Documentation/kbuild/kbuild.rst
@@ -57,6 +57,10 @@ CFLAGS_MODULE
-------------
Additional module specific options to use for $(CC).
+KRUSTFLAGS
+----------
+Additional options to the Rust compiler (for built-in and modules).
+
LDFLAGS_MODULE
--------------
Additional options used for $(LD) when linking modules.
diff --git a/Documentation/process/changes.rst b/Documentation/process/changes.rst
index d3a8557b66a1..7654a7105dcf 100644
--- a/Documentation/process/changes.rst
+++ b/Documentation/process/changes.rst
@@ -26,11 +26,18 @@ running a Linux kernel. Also, not all tools are necessary on all
systems; obviously, if you don't have any PC Card hardware, for example,
you probably needn't concern yourself with pcmciautils.
+Furthermore, note that newer versions of the Rust toolchain may or may not work
+because, for the moment, we depend on some unstable features. Thus, unless you
+know what you are doing, use the exact version listed here. Please see
+:ref:`Documentation/rust/quick-start.rst <rust_quick_start>` for details.
+
====================== =============== ========================================
Program Minimal version Command to check the version
====================== =============== ========================================
GNU C 4.9 gcc --version
Clang/LLVM (optional) 10.0.1 clang --version
+rustc (optional) 1.54.0-beta.1 rustc --version
+bindgen (optional) 0.56.0 bindgen --version
GNU make 3.81 make --version
binutils 2.23 ld -v
flex 2.5.35 flex --version
@@ -329,6 +336,12 @@ Sphinx
Please see :ref:`sphinx_install` in :ref:`Documentation/doc-guide/sphinx.rst <sphinxdoc>`
for details about Sphinx requirements.
+rustdoc
+-------
+
+``rustdoc`` is used to generate Rust documentation. Please see
+:ref:`Documentation/rust/docs.rst <rust_docs>` for more information.
+
Getting updated software
========================
diff --git a/Documentation/rust/arch-support.rst b/Documentation/rust/arch-support.rst
new file mode 100644
index 000000000000..0dd603d37819
--- /dev/null
+++ b/Documentation/rust/arch-support.rst
@@ -0,0 +1,35 @@
+.. _rust_arch_support:
+
+Arch Support
+============
+
+Currently, the Rust compiler (``rustc``) uses LLVM for code generation,
+which limits the supported architectures we can target. In addition, support
+for building the kernel with LLVM/Clang varies (see :ref:`kbuild_llvm`),
+which ``bindgen`` relies on through ``libclang``.
+
+Below is a general summary of architectures that currently work. Level of
+support corresponds to ``S`` values in the ``MAINTAINERS`` file.
+
+.. list-table::
+ :widths: 10 10 10
+ :header-rows: 1
+
+ * - Architecture
+ - Level of support
+ - Constraints
+ * - ``arm``
+ - Maintained
+ - ``armv6`` and compatible only, ``RUST_OPT_LEVEL >= 2``
+ * - ``arm64``
+ - Maintained
+ - None
+ * - ``powerpc``
+ - Maintained
+ - ``ppc64le`` only, ``RUST_OPT_LEVEL < 2`` requires ``CONFIG_THREAD_SHIFT=15``
+ * - ``riscv``
+ - Maintained
+ - ``riscv64`` only
+ * - ``x86``
+ - Maintained
+ - ``x86_64`` only
diff --git a/Documentation/rust/assets/favicon-16x16.png b/Documentation/rust/assets/favicon-16x16.png
new file mode 100644
index 000000000000..d93115e8f47a
--- /dev/null
+++ b/Documentation/rust/assets/favicon-16x16.png
Binary files differ
diff --git a/Documentation/rust/assets/favicon-32x32.png b/Documentation/rust/assets/favicon-32x32.png
new file mode 100644
index 000000000000..655ccbcfc8cd
--- /dev/null
+++ b/Documentation/rust/assets/favicon-32x32.png
Binary files differ
diff --git a/Documentation/rust/assets/rust-logo.png b/Documentation/rust/assets/rust-logo.png
new file mode 100644
index 000000000000..081ae80c193b
--- /dev/null
+++ b/Documentation/rust/assets/rust-logo.png
Binary files differ
diff --git a/Documentation/rust/coding.rst b/Documentation/rust/coding.rst
new file mode 100644
index 000000000000..5cbe132f461f
--- /dev/null
+++ b/Documentation/rust/coding.rst
@@ -0,0 +1,92 @@
+.. _rust_coding:
+
+Coding
+======
+
+This document describes how to write Rust code in the kernel.
+
+
+Coding style
+------------
+
+The code is automatically formatted using the ``rustfmt`` tool. This is very
+good news!
+
+- If you contribute from time to time to the kernel, you do not need to learn
+ and remember one more style guide. You will also need less patch roundtrips
+ to land a change.
+
+- If you are a reviewer or a maintainer, you will not need to spend time on
+ pointing out style issues anymore.
+
+.. note:: Conventions on comments and documentation are not checked by
+ ``rustfmt``. Thus we still need to take care of those: please see
+ :ref:`Documentation/rust/docs.rst <rust_docs>`.
+
+We use the tool's default settings. This means we are following the idiomatic
+Rust style. For instance, we use 4 spaces for indentation rather than tabs.
+
+Typically, you will want to instruct your editor/IDE to format while you type,
+when you save or at commit time. However, if for some reason you want
+to reformat the entire kernel Rust sources at some point, you may run::
+
+ make LLVM=1 rustfmt
+
+To check if everything is formatted (printing a diff otherwise), e.g. if you
+have configured a CI for a tree as a maintainer, you may run::
+
+ make LLVM=1 rustfmtcheck
+
+Like ``clang-format`` for the rest of the kernel, ``rustfmt`` works on
+individual files, and does not require a kernel configuration. Sometimes it may
+even work with broken code.
+
+
+Extra lints
+-----------
+
+While ``rustc`` is a very helpful compiler, some extra lints and analysis are
+available via ``clippy``, a Rust linter. To enable it, pass ``CLIPPY=1`` to
+the same invocation you use for compilation, e.g.::
+
+ make LLVM=1 CLIPPY=1
+
+At the moment, we do not enforce a "clippy-free" compilation, so you can treat
+the output the same way as the extra warning levels for C, e.g. like ``W=2``.
+Still, we use the default configuration, which is relatively conservative, so
+it is a good idea to read any output it may produce from time to time and fix
+the pointed out issues. The list of enabled lists will be likely tweaked over
+time, and extra levels may end up being introduced, e.g. ``CLIPPY=2``.
+
+
+Abstractions vs. bindings
+-------------------------
+
+We don't have abstractions for all the kernel internal APIs and concepts,
+but we would like to expand coverage as time goes on. Unless there is
+a good reason not to, always use the abstractions instead of calling
+the C bindings directly.
+
+If you are writing some code that requires a call to an internal kernel API
+or concept that isn't abstracted yet, consider providing an (ideally safe)
+abstraction for everyone to use.
+
+
+Conditional compilation
+-----------------------
+
+Rust code has access to conditional compilation based on the kernel
+configuration:
+
+.. code-block:: rust
+
+ #[cfg(CONFIG_X)] // `CONFIG_X` is enabled (`y` or `m`)
+ #[cfg(CONFIG_X="y")] // `CONFIG_X` is enabled as a built-in (`y`)
+ #[cfg(CONFIG_X="m")] // `CONFIG_X` is enabled as a module (`m`)
+ #[cfg(not(CONFIG_X))] // `CONFIG_X` is disabled
+
+
+Documentation
+-------------
+
+Please see :ref:`Documentation/rust/docs.rst <rust_docs>`.
diff --git a/Documentation/rust/docs.rst b/Documentation/rust/docs.rst
new file mode 100644
index 000000000000..ab29d8b6a00d
--- /dev/null
+++ b/Documentation/rust/docs.rst
@@ -0,0 +1,110 @@
+.. _rust_docs:
+
+Docs
+====
+
+Rust kernel code is not documented like C kernel code (i.e. via kernel-doc).
+Instead, we use the usual system for documenting Rust code: the ``rustdoc``
+tool, which uses Markdown (a *very* lightweight markup language).
+
+This document describes how to make the most out of the kernel documentation
+for Rust.
+
+
+Reading the docs
+----------------
+
+An advantage of using Markdown is that it attempts to make text look almost as
+you would have written it in plain text. This makes the documentation quite
+pleasant to read even in its source form.
+
+However, the generated HTML docs produced by ``rustdoc`` provide a *very* nice
+experience, including integrated instant search, clickable items (types,
+functions, constants, etc. -- including to all the standard Rust library ones
+that we use in the kernel, e.g. ``core``), categorization, links to the source
+code, etc.
+
+Like for the rest of the kernel documentation, pregenerated HTML docs for
+the libraries (crates) inside ``rust/`` that are used by the rest of the kernel
+are available at `kernel.org`_ (TODO: link when in mainline and generated
+alongside the rest of the documentation).
+
+.. _kernel.org: http://kernel.org/
+
+Otherwise, you can generate them locally. This is quite fast (same order as
+compiling the code itself) and you do not need any special tools or environment.
+This has the added advantage that they will be tailored to your particular
+kernel configuration. To generate them, simply use the ``rustdoc`` target with
+the same invocation you use for compilation, e.g.::
+
+ make LLVM=1 rustdoc
+
+
+Writing the docs
+----------------
+
+If you already know Markdown, learning how to write Rust documentation will be
+a breeze. If not, understanding the basics is a matter of minutes reading other
+code. There are also many guides available out there, a particularly nice one
+is at `GitHub`_.
+
+.. _GitHub: https://guides.github.com/features/mastering-markdown/#syntax
+
+This is how a well-documented Rust function may look like (derived from the Rust
+standard library)::
+
+ /// Returns the contained [`Some`] value, consuming the `self` value,
+ /// without checking that the value is not [`None`].
+ ///
+ /// # Safety
+ ///
+ /// Calling this method on [`None`] is *[undefined behavior]*.
+ ///
+ /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = Some("air");
+ /// assert_eq!(unsafe { x.unwrap_unchecked() }, "air");
+ /// ```
+ pub unsafe fn unwrap_unchecked(self) -> T {
+ match self {
+ Some(val) => val,
+
+ // SAFETY: the safety contract must be upheld by the caller.
+ None => unsafe { hint::unreachable_unchecked() },
+ }
+ }
+
+This example showcases a few ``rustdoc`` features and some common conventions
+(that we also follow in the kernel):
+
+* The first paragraph must be a single sentence briefly describing what
+ the documented item does. Further explanations must go in extra paragraphs.
+
+* ``unsafe`` functions must document the preconditions needed for a call to be
+ safe under a ``Safety`` section.
+
+* While not shown here, if a function may panic, the conditions under which
+ that happens must be described under a ``Panics`` section. Please note that
+ panicking should be very rare and used only with a good reason. In almost
+ all cases, you should use a fallible approach, returning a `Result`.
+
+* If providing examples of usage would help readers, they must be written in
+ a section called ``Examples``.
+
+* Rust items (functions, types, constants...) will be automatically linked
+ (``rustdoc`` will find out the URL for you).
+
+* Following the Rust standard library conventions, any ``unsafe`` block must be
+ preceded by a ``SAFETY`` comment describing why the code inside is sound.
+
+ While sometimes the reason might look trivial and therefore unneeded, writing
+ these comments is not just a good way of documenting what has been taken into
+ account, but also that there are no *extra* implicit constraints.
+
+To learn more about how to write documentation for Rust and extra features,
+please take a look at the ``rustdoc`` `book`_.
+
+.. _book: https://doc.rust-lang.org/rustdoc/how-to-write-documentation.html
diff --git a/Documentation/rust/index.rst b/Documentation/rust/index.rst
new file mode 100644
index 000000000000..257cf2b200b8
--- /dev/null
+++ b/Documentation/rust/index.rst
@@ -0,0 +1,20 @@
+Rust
+====
+
+Documentation related to Rust within the kernel. If you are starting out,
+read the :ref:`Documentation/rust/quick-start.rst <rust_quick_start>` guide.
+
+.. toctree::
+ :maxdepth: 1
+
+ quick-start
+ coding
+ docs
+ arch-support
+
+.. only:: subproject and html
+
+ Indices
+ =======
+
+ * :ref:`genindex`
diff --git a/Documentation/rust/quick-start.rst b/Documentation/rust/quick-start.rst
new file mode 100644
index 000000000000..3f5df2888e0e
--- /dev/null
+++ b/Documentation/rust/quick-start.rst
@@ -0,0 +1,222 @@
+.. _rust_quick_start:
+
+Quick Start
+===========
+
+This document describes how to get started with kernel development in Rust.
+If you have worked previously with Rust, this will only take a moment.
+
+Please note that, at the moment, a very restricted subset of architectures
+is supported, see :doc:`/rust/arch-support`.
+
+
+Requirements: Building
+----------------------
+
+This section explains how to fetch the tools needed for building.
+
+Some of these requirements might be available from your Linux distribution
+under names like ``rustc``, ``rust-src``, ``rust-bindgen``, etc. However,
+at the time of writing, they are likely to not be recent enough.
+
+
+rustc
+*****
+
+A particular version (`1.54.0-beta.1`) of the Rust compiler is required.
+Newer versions may or may not work because, for the moment, we depend on
+some unstable Rust features.
+
+If you are using ``rustup``, run::
+
+ rustup default beta-2021-06-23
+
+Otherwise, fetch a standalone installer or install ``rustup`` from:
+
+ https://www.rust-lang.org
+
+
+Rust standard library source
+****************************
+
+The Rust standard library source is required because the build system will
+cross-compile ``core`` and ``alloc``.
+
+If you are using ``rustup``, run::
+
+ rustup component add rust-src
+
+Otherwise, if you used a standalone installer, you can clone the Rust
+repository into the installation folder of your nightly toolchain::
+
+ git clone --recurse-submodules https://github.com/rust-lang/rust $(rustc --print sysroot)/lib/rustlib/src/rust
+
+
+libclang
+********
+
+``libclang`` (part of LLVM) is used by ``bindgen`` to understand the C code
+in the kernel, which means you will need an LLVM installed; like when
+you compile the kernel with ``CC=clang`` or ``LLVM=1``.
+
+Your Linux distribution is likely to have a suitable one available, so it is
+best if you check that first.
+
+There are also some binaries for several systems and architectures uploaded at:
+
+ https://releases.llvm.org/download.html
+
+Otherwise, building LLVM takes quite a while, but it is not a complex process:
+
+ https://llvm.org/docs/GettingStarted.html#getting-the-source-code-and-building-llvm
+
+See Documentation/kbuild/llvm.rst for more information and further ways
+to fetch pre-built releases and distribution packages.
+
+
+bindgen
+*******
+
+The bindings to the C side of the kernel are generated at build time using
+the ``bindgen`` tool. The version we currently support is ``0.56.0``.
+
+Install it via (this will build the tool from source)::
+
+ cargo install --locked --version 0.56.0 bindgen
+
+
+Requirements: Developing
+------------------------
+
+This section explains how to fetch the tools needed for developing. That is,
+if you only want to build the kernel, you do not need them.
+
+
+rustfmt
+*******
+
+The ``rustfmt`` tool is used to automatically format all the Rust kernel code,
+including the generated C bindings (for details, please see
+:ref:`Documentation/rust/coding.rst <rust_coding>`).
+
+If you are using ``rustup``, its ``default`` profile already installs the tool,
+so you should be good to go. If you are using another profile, you can install
+the component manually::
+
+ rustup component add rustfmt
+
+The standalone installers also come with ``rustfmt``.
+
+
+clippy
+******
+
+``clippy`` is a Rust linter. Installing it allows you to get extra warnings
+for Rust code passing ``CLIPPY=1`` to ``make`` (for details, please see
+:ref:`Documentation/rust/coding.rst <rust_coding>`).
+
+If you are using ``rustup``, its ``default`` profile already installs the tool,
+so you should be good to go. If you are using another profile, you can install
+the component manually::
+
+ rustup component add clippy
+
+The standalone installers also come with ``clippy``.
+
+
+cargo
+*****
+
+``cargo`` is the Rust native build system. It is currently required to run
+the tests (``rusttest`` target) since we use it to build a custom standard
+library that contains the facilities provided by our custom ``alloc``.
+
+If you are using ``rustup``, all the profiles already install the tool,
+so you should be good to go. The standalone installers also include ``cargo``.
+
+
+rustdoc
+*******
+
+``rustdoc`` is the documentation tool for Rust. It generates pretty HTML
+documentation for Rust code (for details, please see
+:ref:`Documentation/rust/docs.rst <rust_docs>`.
+
+``rustdoc`` is also able to test the examples provided in documented Rust code
+(called doctests or documentation tests). We use this feature, thus ``rustdoc``
+is required to run the tests (``rusttest`` target).
+
+If you are using ``rustup``, all the profiles already install the tool,
+so you should be good to go. The standalone installers also include ``rustdoc``.
+
+
+rust-analyzer
+*************
+
+The `rust-analyzer <https://rust-analyzer.github.io/>`_ language server can
+be used with many editors to enable syntax highlighting, completion, go to
+definition, and other features.
+
+``rust-analyzer`` will need to be
+`configured <https://rust-analyzer.github.io/manual.html#non-cargo-based-projects>`_
+to work with the kernel by adding a ``rust-project.json`` file in the root folder.
+A ``rust-project.json`` can be generated by building the Make target ``rust-analyzer``,
+which will create a ``rust-project.json`` in the root of the output directory.
+
+
+Configuration
+-------------
+
+``Rust support`` (``CONFIG_RUST``) needs to be enabled in the ``General setup``
+menu. The option is only shown if the build system can locate ``rustc``.
+In turn, this will make visible the rest of options that depend on Rust.
+
+Afterwards, go to::
+
+ Kernel hacking
+ -> Sample kernel code
+ -> Rust samples
+
+And enable some sample modules either as built-in or as loadable.
+
+
+Building
+--------
+
+Building a kernel with a complete LLVM toolchain is the best supported setup
+at the moment. That is::
+
+ make LLVM=1
+
+For architectures that do not support a full LLVM toolchain, use::
+
+ make CC=clang
+
+Using GCC also works for some configurations, but it is *very* experimental at
+the moment.
+
+
+Hacking
+-------
+
+If you want to dive deeper, take a look at the source code of the samples
+at ``samples/rust/``, the Rust support code under ``rust/`` and
+the ``Rust hacking`` menu under ``Kernel hacking``.
+
+If you use GDB/Binutils and Rust symbols aren't getting demangled, the reason
+is your toolchain doesn't support Rust's new v0 mangling scheme yet. There are
+a few ways out:
+
+ - If you don't mind building your own tools, we provide the following fork
+ with the support cherry-picked from GCC:
+
+ https://github.com/Rust-for-Linux/binutils-gdb/releases/tag/gdb-10.1-release-rust
+ https://github.com/Rust-for-Linux/binutils-gdb/releases/tag/binutils-2_35_1-rust
+
+ - If you only need GDB and can enable ``CONFIG_DEBUG_INFO``, do so:
+ some versions of GDB (e.g. vanilla GDB 10.1) are able to use
+ the pre-demangled names embedded in the debug info.
+
+ - If you don't need loadable module support, you may compile without
+ the ``-Zsymbol-mangling-version=v0`` flag. However, we don't maintain
+ support for that, so avoid it unless you are in a hurry.
diff --git a/MAINTAINERS b/MAINTAINERS
index dd22cc79a1f3..69932194e1ba 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -16137,6 +16137,20 @@ L: linux-rdma@vger.kernel.org
S: Maintained
F: drivers/infiniband/ulp/rtrs/
+RUST
+M: Miguel Ojeda <ojeda@kernel.org>
+M: Alex Gaynor <alex.gaynor@gmail.com>
+M: Wedson Almeida Filho <wedsonaf@google.com>
+L: rust-for-linux@vger.kernel.org
+S: Supported
+W: https://github.com/Rust-for-Linux/linux
+B: https://github.com/Rust-for-Linux/linux/issues
+T: git https://github.com/Rust-for-Linux/linux.git rust-next
+F: rust/
+F: samples/rust/
+F: Documentation/rust/
+K: \b(?i:rust)\b
+
RXRPC SOCKETS (AF_RXRPC)
M: David Howells <dhowells@redhat.com>
M: Marc Dionne <marc.dionne@auristor.com>
diff --git a/Makefile b/Makefile
index 45a3215179b4..85807d0c26ab 100644
--- a/Makefile
+++ b/Makefile
@@ -120,6 +120,13 @@ endif
export KBUILD_CHECKSRC
+# Enable "clippy" (a linter) as part of the Rust compilation.
+#
+# Use 'make CLIPPY=1' to enable it.
+ifeq ("$(origin CLIPPY)", "command line")
+ KBUILD_CLIPPY := $(CLIPPY)
+endif
+
# Use make M=dir or set the environment variable KBUILD_EXTMOD to specify the
# directory of external module to build. Setting M= takes precedence.
ifeq ("$(origin M)", "command line")
@@ -268,7 +275,7 @@ no-dot-config-targets := $(clean-targets) \
cscope gtags TAGS tags help% %docs check% coccicheck \
$(version_h) headers headers_% archheaders archscripts \
%asm-generic kernelversion %src-pkg dt_binding_check \
- outputmakefile
+ outputmakefile rustfmt rustfmtcheck
# Installation targets should not require compiler. Unfortunately, vdso_install
# is an exception where build artifacts may be updated. This must be fixed.
no-compiler-targets := $(no-dot-config-targets) install dtbs_install \
@@ -457,6 +464,12 @@ OBJDUMP = $(CROSS_COMPILE)objdump
READELF = $(CROSS_COMPILE)readelf
STRIP = $(CROSS_COMPILE)strip
endif
+RUSTC = rustc
+RUSTDOC = rustdoc
+RUSTFMT = rustfmt
+CLIPPY_DRIVER = clippy-driver
+BINDGEN = bindgen
+CARGO = cargo
PAHOLE = pahole
RESOLVE_BTFIDS = $(objtree)/tools/bpf/resolve_btfids/resolve_btfids
LEX = flex
@@ -480,9 +493,11 @@ CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ \
-Wbitwise -Wno-return-void -Wno-unknown-attribute $(CF)
NOSTDINC_FLAGS :=
CFLAGS_MODULE =
+RUSTFLAGS_MODULE =
AFLAGS_MODULE =
LDFLAGS_MODULE =
CFLAGS_KERNEL =
+RUSTFLAGS_KERNEL =
AFLAGS_KERNEL =
LDFLAGS_vmlinux =
@@ -511,15 +526,41 @@ KBUILD_CFLAGS := -Wall -Wundef -Werror=strict-prototypes -Wno-trigraphs \
-Werror=return-type -Wno-format-security \
-std=gnu89
KBUILD_CPPFLAGS := -D__KERNEL__
+KBUILD_RUST_TARGET := $(srctree)/arch/$(SRCARCH)/rust/target.json
+KBUILD_RUSTFLAGS := --emit=dep-info,obj,metadata --edition=2018 \
+ -Cpanic=abort -Cembed-bitcode=n -Clto=n -Crpath=n \
+ -Cforce-unwind-tables=n -Ccodegen-units=1 \
+ -Zbinary_dep_depinfo=y -Zsymbol-mangling-version=v0 \
+ -Dunsafe_op_in_unsafe_fn -Drust_2018_idioms \
+ -Wmissing_docs
+KBUILD_CLIPPYFLAGS := -Dclippy::correctness -Dclippy::style \
+ -Dclippy::complexity -Dclippy::perf -Dclippy::float_arithmetic
KBUILD_AFLAGS_KERNEL :=
KBUILD_CFLAGS_KERNEL :=
+KBUILD_RUSTFLAGS_KERNEL :=
KBUILD_AFLAGS_MODULE := -DMODULE
KBUILD_CFLAGS_MODULE := -DMODULE
+KBUILD_RUSTFLAGS_MODULE := --cfg MODULE
KBUILD_LDFLAGS_MODULE :=
KBUILD_LDFLAGS :=
CLANG_FLAGS :=
+ifeq ($(KBUILD_CLIPPY),1)
+ RUSTC_OR_CLIPPY_QUIET := CLIPPY
+ RUSTC_OR_CLIPPY = $(CLIPPY_DRIVER) $(KBUILD_CLIPPYFLAGS)
+else
+ RUSTC_OR_CLIPPY_QUIET := RUSTC
+ RUSTC_OR_CLIPPY = $(RUSTC)
+endif
+
+ifdef RUST_LIB_SRC
+ export RUST_LIB_SRC
+endif
+
+export RUSTC_BOOTSTRAP := 1
+
export ARCH SRCARCH CONFIG_SHELL BASH HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE LD CC
+export RUSTC RUSTDOC RUSTFMT RUSTC_OR_CLIPPY_QUIET RUSTC_OR_CLIPPY BINDGEN CARGO
export CPP AR NM STRIP OBJCOPY OBJDUMP READELF PAHOLE RESOLVE_BTFIDS LEX YACC AWK INSTALLKERNEL
export PERL PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX
export KGZIP KBZIP2 KLZOP LZMA LZ4 XZ ZSTD
@@ -527,9 +568,10 @@ export KBUILD_HOSTCXXFLAGS KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS LDFLAGS_MODULE
export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS KBUILD_LDFLAGS
export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE
+export KBUILD_RUST_TARGET KBUILD_RUSTFLAGS RUSTFLAGS_KERNEL RUSTFLAGS_MODULE
export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE
-export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE
-export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL
+export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_RUSTFLAGS_MODULE KBUILD_LDFLAGS_MODULE
+export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL KBUILD_RUSTFLAGS_KERNEL
# Files to ignore in find ... statements
@@ -585,18 +627,23 @@ endif
# and from include/config/auto.conf.cmd to detect the compiler upgrade.
CC_VERSION_TEXT = $(subst $(pound),,$(shell $(CC) --version 2>/dev/null | head -n 1))
-ifneq ($(findstring clang,$(CC_VERSION_TEXT)),)
+TENTATIVE_CLANG_FLAGS := -Werror=unknown-warning-option
+
ifneq ($(CROSS_COMPILE),)
-CLANG_FLAGS += --target=$(notdir $(CROSS_COMPILE:%-=%))
+TENTATIVE_CLANG_FLAGS += --target=$(notdir $(CROSS_COMPILE:%-=%))
endif
ifeq ($(LLVM_IAS),1)
-CLANG_FLAGS += -integrated-as
+TENTATIVE_CLANG_FLAGS += -integrated-as
else
-CLANG_FLAGS += -no-integrated-as
+TENTATIVE_CLANG_FLAGS += -no-integrated-as
GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)elfedit))
-CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE))
+TENTATIVE_CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE))
endif
-CLANG_FLAGS += -Werror=unknown-warning-option
+
+export TENTATIVE_CLANG_FLAGS
+
+ifneq ($(findstring clang,$(CC_VERSION_TEXT)),)
+CLANG_FLAGS += $(TENTATIVE_CLANG_FLAGS)
KBUILD_CFLAGS += $(CLANG_FLAGS)
KBUILD_AFLAGS += $(CLANG_FLAGS)
export CLANG_FLAGS
@@ -734,7 +781,7 @@ $(KCONFIG_CONFIG):
#
# Do not use $(call cmd,...) here. That would suppress prompts from syncconfig,
# so you cannot notice that Kconfig is waiting for the user input.
-%/config/auto.conf %/config/auto.conf.cmd %/generated/autoconf.h: $(KCONFIG_CONFIG)
+%/config/auto.conf %/config/auto.conf.cmd %/generated/autoconf.h %/generated/rustc_cfg: $(KCONFIG_CONFIG)
$(Q)$(kecho) " SYNC $@"
$(Q)$(MAKE) -f $(srctree)/Makefile syncconfig
else # !may-sync-config
@@ -761,12 +808,43 @@ KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation)
KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow)
KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
+ifdef CONFIG_RUST_DEBUG_ASSERTIONS
+KBUILD_RUSTFLAGS += -Cdebug-assertions=y
+else
+KBUILD_RUSTFLAGS += -Cdebug-assertions=n
+endif
+
+ifdef CONFIG_RUST_OVERFLOW_CHECKS
+KBUILD_RUSTFLAGS += -Coverflow-checks=y
+else
+KBUILD_RUSTFLAGS += -Coverflow-checks=n
+endif
+
ifdef CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE
KBUILD_CFLAGS += -O2
+KBUILD_RUSTFLAGS_OPT_LEVEL_MAP := 2
else ifdef CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3
KBUILD_CFLAGS += -O3
+KBUILD_RUSTFLAGS_OPT_LEVEL_MAP := 3
else ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
KBUILD_CFLAGS += -Os
+KBUILD_RUSTFLAGS_OPT_LEVEL_MAP := z
+endif
+
+ifdef CONFIG_RUST_OPT_LEVEL_SIMILAR_AS_CHOSEN_FOR_C
+KBUILD_RUSTFLAGS += -Copt-level=$(KBUILD_RUSTFLAGS_OPT_LEVEL_MAP)
+else ifdef CONFIG_RUST_OPT_LEVEL_0
+KBUILD_RUSTFLAGS += -Copt-level=0
+else ifdef CONFIG_RUST_OPT_LEVEL_1
+KBUILD_RUSTFLAGS += -Copt-level=1
+else ifdef CONFIG_RUST_OPT_LEVEL_2
+KBUILD_RUSTFLAGS += -Copt-level=2
+else ifdef CONFIG_RUST_OPT_LEVEL_3
+KBUILD_RUSTFLAGS += -Copt-level=3
+else ifdef CONFIG_RUST_OPT_LEVEL_S
+KBUILD_RUSTFLAGS += -Copt-level=s
+else ifdef CONFIG_RUST_OPT_LEVEL_Z
+KBUILD_RUSTFLAGS += -Copt-level=z
endif
# Tell gcc to never replace conditional load with a non-conditional one
@@ -816,6 +894,7 @@ KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
ifdef CONFIG_FRAME_POINTER
KBUILD_CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls
+KBUILD_RUSTFLAGS += -Cforce-frame-pointers=y
else
# Some targets (ARM with Thumb2, for example), can't be built with frame
# pointers. For those, we don't have FUNCTION_TRACER automatically
@@ -853,6 +932,8 @@ ifdef CONFIG_CC_IS_GCC
DEBUG_CFLAGS += $(call cc-ifversion, -lt, 0500, $(call cc-option, -fno-var-tracking-assignments))
endif
+DEBUG_RUSTFLAGS :=
+
ifdef CONFIG_DEBUG_INFO
ifdef CONFIG_DEBUG_INFO_SPLIT
@@ -863,6 +944,11 @@ endif
ifneq ($(LLVM_IAS),1)
KBUILD_AFLAGS += -Wa,-gdwarf-2
+ifdef CONFIG_DEBUG_INFO_REDUCED
+DEBUG_RUSTFLAGS += -Cdebuginfo=1
+else
+DEBUG_RUSTFLAGS += -Cdebuginfo=2
+endif
endif
ifndef CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT
@@ -887,6 +973,9 @@ endif # CONFIG_DEBUG_INFO
KBUILD_CFLAGS += $(DEBUG_CFLAGS)
export DEBUG_CFLAGS
+KBUILD_RUSTFLAGS += $(DEBUG_RUSTFLAGS)
+export DEBUG_RUSTFLAGS
+
ifdef CONFIG_FUNCTION_TRACER
ifdef CONFIG_FTRACE_MCOUNT_USE_CC
CC_FLAGS_FTRACE += -mrecord-mcount
@@ -1042,10 +1131,11 @@ include $(addprefix $(srctree)/, $(include-y))
# Do not add $(call cc-option,...) below this line. When you build the kernel
# from the clean source tree, the GCC plugins do not exist at this point.
-# Add user supplied CPPFLAGS, AFLAGS and CFLAGS as the last assignments
+# Add user supplied CPPFLAGS, AFLAGS, CFLAGS and RUSTFLAGS as the last assignments
KBUILD_CPPFLAGS += $(KCPPFLAGS)
KBUILD_AFLAGS += $(KAFLAGS)
KBUILD_CFLAGS += $(KCFLAGS)
+KBUILD_RUSTFLAGS += $(KRUSTFLAGS)
KBUILD_LDFLAGS_MODULE += --build-id=sha1
LDFLAGS_vmlinux += --build-id=sha1
@@ -1114,6 +1204,10 @@ export MODULES_NSDEPS := $(extmod_prefix)modules.nsdeps
ifeq ($(KBUILD_EXTMOD),)
core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/
+ifdef CONFIG_RUST
+core-y += rust/
+endif
+
vmlinux-dirs := $(patsubst %/,%,$(filter %/, \
$(core-y) $(core-m) $(drivers-y) $(drivers-m) \
$(libs-y) $(libs-m)))
@@ -1214,6 +1308,9 @@ archprepare: outputmakefile archheaders archscripts scripts include/config/kerne
prepare0: archprepare
$(Q)$(MAKE) $(build)=scripts/mod
$(Q)$(MAKE) $(build)=.
+ifdef CONFIG_RUST
+ $(Q)$(MAKE) $(build)=rust
+endif
# All the preparing..
prepare: prepare0
@@ -1504,7 +1601,7 @@ endif # CONFIG_MODULES
# Directories & files removed with 'make clean'
CLEAN_FILES += include/ksym vmlinux.symvers modules-only.symvers \
modules.builtin modules.builtin.modinfo modules.nsdeps \
- compile_commands.json .thinlto-cache
+ compile_commands.json .thinlto-cache rust/test rust/doc
# Directories & files removed with 'make mrproper'
MRPROPER_FILES += include/config include/generated \
@@ -1515,7 +1612,8 @@ MRPROPER_FILES += include/config include/generated \
certs/signing_key.pem certs/signing_key.x509 \
certs/x509.genkey \
vmlinux-gdb.py \
- *.spec
+ *.spec \
+ rust/*_generated.h rust/*_generated.rs rust/libmacros.so
# clean - Delete most, but leave enough to build external modules
#
@@ -1627,6 +1725,17 @@ help:
@echo ' kselftest-merge - Merge all the config dependencies of'
@echo ' kselftest to existing .config.'
@echo ''
+ @echo 'Rust targets:'
+ @echo ' rustfmt - Reformat all the Rust code in the kernel'
+ @echo ' rustfmtcheck - Checks if all the Rust code in the kernel'
+ @echo ' is formatted, printing a diff otherwise.'
+ @echo ' rustdoc - Generate Rust documentation'
+ @echo ' (requires kernel .config)'
+ @echo ' rusttest - Runs the Rust tests'
+ @echo ' (requires kernel .config; downloads external repos)'
+ @echo ' rust-analyzer - Generate rust-project.json rust-analyzer support file'
+ @echo ' (requires kernel .config)'
+ @echo ''
@$(if $(dtstree), \
echo 'Devicetree:'; \
echo '* dtbs - Build device tree blobs for enabled boards'; \
@@ -1698,6 +1807,46 @@ PHONY += $(DOC_TARGETS)
$(DOC_TARGETS):
$(Q)$(MAKE) $(build)=Documentation $@
+
+# Rust targets
+# ---------------------------------------------------------------------------
+
+# Documentation target
+#
+# Using the singular to avoid running afoul of `no-dot-config-targets`.
+PHONY += rustdoc
+rustdoc: prepare0
+ $(Q)$(MAKE) $(build)=rust $@
+
+# Testing target
+PHONY += rusttest
+rusttest: prepare0
+ $(Q)$(MAKE) $(build)=rust $@
+
+# Formatting targets
+PHONY += rustfmt rustfmtcheck
+
+# We skip `rust/alloc` since we want to minimize the diff w.r.t. upstream.
+#
+# We match using absolute paths since `find` does not resolve them
+# when matching, which is a problem when e.g. `srctree` is `..`.
+# We `grep` afterwards in order to remove the directory entry itself.
+rustfmt:
+ $(Q)find $(abs_srctree) -type f -name '*.rs' \
+ -o -path $(abs_srctree)/rust/alloc -prune \
+ -o -path $(abs_objtree)/rust/test -prune \
+ | grep -Fv $(abs_srctree)/rust/alloc \
+ | grep -Fv $(abs_objtree)/rust/test \
+ | xargs $(RUSTFMT) $(rustfmt_flags)
+
+rustfmtcheck: rustfmt_flags = --check
+rustfmtcheck: rustfmt
+
+# IDE support targets
+PHONY += rust-analyzer
+rust-analyzer: prepare0
+ $(Q)$(MAKE) $(build)=rust $@
+
# Misc
# ---------------------------------------------------------------------------
@@ -1855,6 +2004,7 @@ clean: $(clean-dirs)
$(call cmd,rmfiles)
@find $(if $(KBUILD_EXTMOD), $(KBUILD_EXTMOD), .) $(RCS_FIND_IGNORE) \
\( -name '*.[aios]' -o -name '*.ko' -o -name '.*.cmd' \
+ -o -name '*.rmeta' \
-o -name '*.ko.*' \
-o -name '*.dtb' -o -name '*.dtbo' -o -name '*.dtb.S' -o -name '*.dt.yaml' \
-o -name '*.dwo' -o -name '*.lst' \
diff --git a/arch/arm/rust/target.json b/arch/arm/rust/target.json
new file mode 100644
index 000000000000..37710eb727b1
--- /dev/null
+++ b/arch/arm/rust/target.json
@@ -0,0 +1,28 @@
+{
+ "arch": "arm",
+ "crt-static-respected": true,
+ "data-layout": "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64",
+ "dynamic-linking": true,
+ "env": "gnu",
+ "executables": true,
+ "features": "+strict-align,+v6",
+ "function-sections": false,
+ "has-elf-tls": true,
+ "has-rpath": true,
+ "is-builtin": true,
+ "linker-is-gnu": true,
+ "llvm-target": "arm-unknown-linux-gnueabi",
+ "max-atomic-width": 64,
+ "os": "linux",
+ "position-independent-executables": true,
+ "pre-link-args": {
+ "gcc": [
+ "-Wl,--as-needed",
+ "-Wl,-z,noexecstack"
+ ]
+ },
+ "relocation-model": "static",
+ "target-family": "unix",
+ "target-mcount": "\u0001__gnu_mcount_nc",
+ "target-pointer-width": "32"
+}
diff --git a/arch/arm64/rust/target.json b/arch/arm64/rust/target.json
new file mode 100644
index 000000000000..9ea86ed6c736
--- /dev/null
+++ b/arch/arm64/rust/target.json
@@ -0,0 +1,35 @@
+{
+ "arch": "aarch64",
+ "data-layout": "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128",
+ "disable-redzone": true,
+ "eliminate-frame-pointer": false,
+ "emit-debug-gdb-scripts": false,
+ "env": "gnu",
+ "features": "+strict-align,+neon,+fp-armv8",
+ "function-sections": false,
+ "is-builtin": true,
+ "linker-flavor": "gcc",
+ "linker-is-gnu": true,
+ "llvm-target": "aarch64-unknown-none",
+ "max-atomic-width": 128,
+ "needs-plt": true,
+ "os": "none",
+ "panic-strategy": "abort",
+ "position-independent-executables": true,
+ "pre-link-args": {
+ "gcc": [
+ "-Wl,--as-needed",
+ "-Wl,-z,noexecstack",
+ "-m64"
+ ]
+ },
+ "relocation-model": "static",
+ "relro-level": "full",
+ "stack-probes": {
+ "kind": "none"
+ },
+ "target-c-int-width": "32",
+ "target-endian": "little",
+ "target-pointer-width": "64",
+ "vendor": ""
+}
diff --git a/arch/powerpc/rust/target.json b/arch/powerpc/rust/target.json
new file mode 100644
index 000000000000..1e53f8308092
--- /dev/null
+++ b/arch/powerpc/rust/target.json
@@ -0,0 +1,30 @@
+{
+ "arch": "powerpc64",
+ "code-mode": "kernel",
+ "cpu": "ppc64le",
+ "data-layout": "e-m:e-i64:64-n32:64",
+ "env": "gnu",
+ "features": "-altivec,-vsx,-hard-float",
+ "function-sections": false,
+ "is-builtin": true,
+ "linker-flavor": "gcc",
+ "linker-is-gnu": true,
+ "llvm-target": "powerpc64le-elf",
+ "max-atomic-width": 64,
+ "os": "none",
+ "panic-strategy": "abort",
+ "position-independent-executables": true,
+ "pre-link-args": {
+ "gcc": [
+ "-Wl,--as-needed",
+ "-Wl,-z,noexecstack",
+ "-m64"
+ ]
+ },
+ "relocation-model": "static",
+ "relro-level": "full",
+ "target-family": "unix",
+ "target-mcount": "_mcount",
+ "target-endian": "little",
+ "target-pointer-width": "64"
+}
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
index bc74afdbf31e..d87a6ace9bb1 100644
--- a/arch/riscv/Makefile
+++ b/arch/riscv/Makefile
@@ -54,6 +54,7 @@ riscv-march-$(CONFIG_FPU) := $(riscv-march-y)fd
riscv-march-$(CONFIG_RISCV_ISA_C) := $(riscv-march-y)c
KBUILD_CFLAGS += -march=$(subst fd,,$(riscv-march-y))
KBUILD_AFLAGS += -march=$(riscv-march-y)
+KBUILD_RUST_TARGET := $(srctree)/arch/riscv/rust/$(subst fd,,$(riscv-march-y)).json
KBUILD_CFLAGS += -mno-save-restore
KBUILD_CFLAGS += -DCONFIG_PAGE_OFFSET=$(CONFIG_PAGE_OFFSET)
diff --git a/arch/riscv/rust/rv32ima.json b/arch/riscv/rust/rv32ima.json
new file mode 100644
index 000000000000..8e9b46011e7c
--- /dev/null
+++ b/arch/riscv/rust/rv32ima.json
@@ -0,0 +1,37 @@
+{
+ "arch": "riscv32",
+ "code-model": "medium",
+ "cpu": "generic-rv32",
+ "data-layout": "e-m:e-p:32:32-i64:64-n32-S128",
+ "disable-redzone": true,
+ "eliminate-frame-pointer": false,
+ "emit-debug-gdb-scripts": false,
+ "env": "gnu",
+ "features": "+m,+a",
+ "function-sections": false,
+ "is-builtin": true,
+ "linker-flavor": "gcc",
+ "linker-is-gnu": true,
+ "llvm-target": "riscv32",
+ "max-atomic-width": 32,
+ "needs-plt": true,
+ "os": "none",
+ "panic-strategy": "abort",
+ "position-independent-executables": true,
+ "pre-link-args": {
+ "gcc": [
+ "-Wl,--as-needed",
+ "-Wl,-z,noexecstack",
+ "-m32"
+ ]
+ },
+ "relocation-model": "static",
+ "relro-level": "full",
+ "stack-probes": {
+ "kind": "none"
+ },
+ "target-c-int-width": "32",
+ "target-endian": "little",
+ "target-pointer-width": "32",
+ "vendor": ""
+}
diff --git a/arch/riscv/rust/rv32imac.json b/arch/riscv/rust/rv32imac.json
new file mode 100644
index 000000000000..2b3a139da999
--- /dev/null
+++ b/arch/riscv/rust/rv32imac.json
@@ -0,0 +1,37 @@
+{
+ "arch": "riscv32",
+ "code-model": "medium",
+ "cpu": "generic-rv32",
+ "data-layout": "e-m:e-p:32:32-i64:64-n32-S128",
+ "disable-redzone": true,
+ "eliminate-frame-pointer": false,
+ "emit-debug-gdb-scripts": false,
+ "env": "gnu",
+ "features": "+m,+a,+c",
+ "function-sections": false,
+ "is-builtin": true,
+ "linker-flavor": "gcc",
+ "linker-is-gnu": true,
+ "llvm-target": "riscv32",
+ "max-atomic-width": 32,
+ "needs-plt": true,
+ "os": "none",
+ "panic-strategy": "abort",
+ "position-independent-executables": true,
+ "pre-link-args": {
+ "gcc": [
+ "-Wl,--as-needed",
+ "-Wl,-z,noexecstack",
+ "-m32"
+ ]
+ },
+ "relocation-model": "static",
+ "relro-level": "full",
+ "stack-probes": {
+ "kind": "none"
+ },
+ "target-c-int-width": "32",
+ "target-endian": "little",
+ "target-pointer-width": "32",
+ "vendor": ""
+}
diff --git a/arch/riscv/rust/rv64ima.json b/arch/riscv/rust/rv64ima.json
new file mode 100644
index 000000000000..091da50069a3
--- /dev/null
+++ b/arch/riscv/rust/rv64ima.json
@@ -0,0 +1,37 @@
+{
+ "arch": "riscv64",
+ "code-model": "medium",
+ "cpu": "generic-rv64",
+ "data-layout": "e-m:e-p:64:64-i64:64-i128:128-n64-S128",
+ "disable-redzone": true,
+ "eliminate-frame-pointer": false,
+ "emit-debug-gdb-scripts": false,
+ "env": "gnu",
+ "features": "+m,+a",
+ "function-sections": false,
+ "is-builtin": true,
+ "linker-flavor": "gcc",
+ "linker-is-gnu": true,
+ "llvm-target": "riscv64",
+ "max-atomic-width": 64,
+ "needs-plt": true,
+ "os": "none",
+ "panic-strategy": "abort",
+ "position-independent-executables": true,
+ "pre-link-args": {
+ "gcc": [
+ "-Wl,--as-needed",
+ "-Wl,-z,noexecstack",
+ "-m64"
+ ]
+ },
+ "relocation-model": "static",
+ "relro-level": "full",
+ "stack-probes": {
+ "kind": "none"
+ },
+ "target-c-int-width": "32",
+ "target-endian": "little",
+ "target-pointer-width": "64",
+ "vendor": ""
+}
diff --git a/arch/riscv/rust/rv64imac.json b/arch/riscv/rust/rv64imac.json
new file mode 100644
index 000000000000..aa5a8f4549f1
--- /dev/null
+++ b/arch/riscv/rust/rv64imac.json
@@ -0,0 +1,37 @@
+{
+ "arch": "riscv64",
+ "code-model": "medium",
+ "cpu": "generic-rv64",
+ "data-layout": "e-m:e-p:64:64-i64:64-i128:128-n64-S128",
+ "disable-redzone": true,
+ "eliminate-frame-pointer": false,
+ "emit-debug-gdb-scripts": false,
+ "env": "gnu",
+ "features": "+m,+a,+c",
+ "function-sections": false,
+ "is-builtin": true,
+ "linker-flavor": "gcc",
+ "linker-is-gnu": true,
+ "llvm-target": "riscv64",
+ "max-atomic-width": 64,
+ "needs-plt": true,
+ "os": "none",
+ "panic-strategy": "abort",
+ "position-independent-executables": true,
+ "pre-link-args": {
+ "gcc": [
+ "-Wl,--as-needed",
+ "-Wl,-z,noexecstack",
+ "-m64"
+ ]
+ },
+ "relocation-model": "static",
+ "relro-level": "full",
+ "stack-probes": {
+ "kind": "none"
+ },
+ "target-c-int-width": "32",
+ "target-endian": "little",
+ "target-pointer-width": "64",
+ "vendor": ""
+}
diff --git a/arch/x86/rust/target.json b/arch/x86/rust/target.json
new file mode 100644
index 000000000000..76ac800d38ef
--- /dev/null
+++ b/arch/x86/rust/target.json
@@ -0,0 +1,37 @@
+{
+ "arch": "x86_64",
+ "code-model": "kernel",
+ "cpu": "x86-64",
+ "data-layout": "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128",
+ "disable-redzone": true,
+ "eliminate-frame-pointer": false,
+ "emit-debug-gdb-scripts": false,
+ "env": "gnu",
+ "features": "-mmx,-sse,-sse2,-sse3,-ssse3,-sse4.1,-sse4.2,-3dnow,-3dnowa,-avx,-avx2,+soft-float",
+ "function-sections": false,
+ "is-builtin": true,
+ "linker-flavor": "gcc",
+ "linker-is-gnu": true,
+ "llvm-target": "x86_64-elf",
+ "max-atomic-width": 64,
+ "needs-plt": true,
+ "os": "none",
+ "panic-strategy": "abort",
+ "position-independent-executables": true,
+ "pre-link-args": {
+ "gcc": [
+ "-Wl,--as-needed",
+ "-Wl,-z,noexecstack",
+ "-m64"
+ ]
+ },
+ "relocation-model": "static",
+ "relro-level": "full",
+ "stack-probes": {
+ "kind": "none"
+ },
+ "target-c-int-width": "32",
+ "target-endian": "little",
+ "target-pointer-width": "64",
+ "vendor": "unknown"
+}
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
index a1d6fc82d7f0..6851c2313cad 100644
--- a/include/linux/kallsyms.h
+++ b/include/linux/kallsyms.h
@@ -15,7 +15,7 @@
#include <asm/sections.h>
-#define KSYM_NAME_LEN 128
+#define KSYM_NAME_LEN 512
#define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s %s]") + \
(KSYM_NAME_LEN - 1) + \
2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + \
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 79897841a2cc..a022992725be 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -331,12 +331,17 @@ static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
#ifdef CONFIG_DEBUG_SPINLOCK
-# define spin_lock_init(lock) \
-do { \
- static struct lock_class_key __key; \
- \
- __raw_spin_lock_init(spinlock_check(lock), \
- #lock, &__key, LD_WAIT_CONFIG); \
+static inline void __spin_lock_init(spinlock_t *lock, const char *name,
+ struct lock_class_key *key)
+{
+ __raw_spin_lock_init(spinlock_check(lock), name, key, LD_WAIT_CONFIG);
+}
+
+# define spin_lock_init(lock) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __spin_lock_init(lock, #lock, &__key); \
} while (0)
#else
diff --git a/init/Kconfig b/init/Kconfig
index cfec942df25f..50a636163007 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -60,6 +60,15 @@ config LLD_VERSION
default $(ld-version) if LD_IS_LLD
default 0
+config HAS_RUST
+ depends on ARM64 || CPU_32v6 || CPU_32v6K || (PPC64 && CPU_LITTLE_ENDIAN) || X86_64 || RISCV
+ def_bool $(success,$(RUSTC) --version)
+
+config RUSTC_VERSION
+ depends on HAS_RUST
+ int
+ default $(shell,$(srctree)/scripts/rust-version.sh $(RUSTC))
+
config CC_CAN_LINK
bool
default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(m64-flag)) if 64BIT
@@ -2028,6 +2037,25 @@ config PROFILING
Say Y here to enable the extended profiling support mechanisms used
by profilers.
+config RUST
+ bool "Rust support"
+ depends on HAS_RUST
+ depends on !COMPILE_TEST
+ depends on !MODVERSIONS
+ default n
+ help
+ Enables Rust support in the kernel.
+
+ This allows other Rust-related options, like drivers written in Rust,
+ to be selected.
+
+ It is also required to be able to load external kernel modules
+ written in Rust.
+
+ See Documentation/rust/ for more information.
+
+ If unsure, say N.
+
#
# Place an empty function call at each tracepoint site. Can be
# dynamically changed for a probe function.
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 0ba87982d017..4067564ec59f 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -76,6 +76,13 @@ static unsigned int kallsyms_expand_symbol(unsigned int off,
*/
off += len + 1;
+ /* If zero, it is a "big" symbol, so a two byte length follows. */
+ if (len == 0) {
+ len = (data[0] << 8) | data[1];
+ data += 2;
+ off += len + 2;
+ }
+
/*
* For every byte on the compressed symbol data, copy the table
* entry for that byte.
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index 335d988bd811..73874e5edfda 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -213,7 +213,7 @@ static int klp_resolve_symbols(Elf64_Shdr *sechdrs, const char *strtab,
* we use the smallest/strictest upper bound possible (56, based on
* the current definition of MODULE_NAME_LEN) to prevent overflows.
*/
- BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 128);
+ BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 512);
relas = (Elf_Rela *) relasec->sh_addr;
/* For each rela in this klp relocation section */
@@ -227,7 +227,7 @@ static int klp_resolve_symbols(Elf64_Shdr *sechdrs, const char *strtab,
/* Format: .klp.sym.sym_objname.sym_name,sympos */
cnt = sscanf(strtab + sym->st_name,
- ".klp.sym.%55[^.].%127[^,],%lu",
+ ".klp.sym.%55[^.].%511[^,],%lu",
sym_objname, sym_name, &sympos);
if (cnt != 3) {
pr_err("symbol %s has an incorrectly formatted name\n",
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 849afb0bd6d7..9b3bd6e017f1 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -392,7 +392,10 @@ static struct latched_seq clear_seq = {
/* the maximum size of a formatted record (i.e. with prefix added per line) */
#define CONSOLE_LOG_MAX 1024
-/* the maximum size allowed to be reserved for a record */
+/*
+ * The maximum size allowed to be reserved for a record.
+ * Keep in sync with rust/kernel/print.rs.
+ */
#define LOG_LINE_MAX (CONSOLE_LOG_MAX - PREFIX_MAX)
#define LOG_LEVEL(v) ((v) & 0x07)
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 05250bcf3133..b41252e664d7 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -2652,6 +2652,150 @@ config HYPERV_TESTING
endmenu # "Kernel Testing and Coverage"
+menu "Rust hacking"
+
+config RUST_DEBUG_ASSERTIONS
+ bool "Debug assertions"
+ default n
+ depends on RUST
+ help
+ Enables rustc's `-Cdebug-assertions` codegen option.
+
+ This flag lets you turn `cfg(debug_assertions)` conditional
+ compilation on or off. This can be used to enable extra debugging
+ code in development but not in production. For example, it controls
+ the behavior of the standard library's `debug_assert!` macro.
+
+ Note that this will apply to all Rust code, including `core`.
+
+ If unsure, say N.
+
+config RUST_OVERFLOW_CHECKS
+ bool "Overflow checks"
+ default y
+ depends on RUST
+ help
+ Enables rustc's `-Coverflow-checks` codegen option.
+
+ This flag allows you to control the behavior of runtime integer
+ overflow. When overflow-checks are enabled, a panic will occur
+ on overflow.
+
+ Note that this will apply to all Rust code, including `core`.
+
+ If unsure, say Y.
+
+choice
+ prompt "Optimization level"
+ default RUST_OPT_LEVEL_SIMILAR_AS_CHOSEN_FOR_C
+ depends on RUST
+ help
+ Controls rustc's `-Copt-level` codegen option.
+
+ This flag controls the optimization level.
+
+ If unsure, say "Similar as chosen for C".
+
+config RUST_OPT_LEVEL_SIMILAR_AS_CHOSEN_FOR_C
+ bool "Similar as chosen for C"
+ help
+ This choice will pick a similar optimization level as chosen in
+ the "Compiler optimization level" for C:
+
+ -O2 is currently mapped to -Copt-level=2
+ -O3 is currently mapped to -Copt-level=3
+ -Os is currently mapped to -Copt-level=z
+
+ The mapping may change over time to follow the intended semantics
+ of the choice for C as sensibly as possible.
+
+ This is the default.
+
+config RUST_OPT_LEVEL_0
+ bool "No optimizations (-Copt-level=0)"
+ help
+ Not recommended for most purposes. It may come in handy for debugging
+ suspected optimizer bugs, unexpected undefined behavior, etc.
+
+ Note that this level will *not* enable debug assertions nor overflow
+ checks on its own (like it happens when interacting with rustc
+ directly). Use the corresponding configuration options to control
+ that instead, orthogonally.
+
+ Note this level may cause excessive stack usage, which can lead to stack
+ overflow and subsequent crashes.
+
+config RUST_OPT_LEVEL_1
+ bool "Basic optimizations (-Copt-level=1)"
+ help
+ Useful for debugging without getting too lost, but without
+ the overhead and boilerplate of no optimizations at all.
+
+ Note this level may cause excessive stack usage, which can lead to stack
+ overflow and subsequent crashes.
+
+config RUST_OPT_LEVEL_2
+ bool "Some optimizations (-Copt-level=2)"
+ help
+ The sensible choice in most cases.
+
+config RUST_OPT_LEVEL_3
+ bool "All optimizations (-Copt-level=3)"
+ help
+ Yet more performance (hopefully).
+
+config RUST_OPT_LEVEL_S
+ bool "Optimize for size (-Copt-level=s)"
+ help
+ Smaller kernel, ideally without too much performance loss.
+
+config RUST_OPT_LEVEL_Z
+ bool "Optimize for size, no loop vectorization (-Copt-level=z)"
+ help
+ Like the previous level, but also turn off loop vectorization.
+
+endchoice
+
+choice
+ prompt "Build-time assertions"
+ default RUST_BUILD_ASSERT_ALLOW if RUST_OPT_LEVEL_0
+ default RUST_BUILD_ASSERT_DENY if !RUST_OPT_LEVEL_0
+ depends on RUST
+ help
+ Controls how are `build_error!` and `build_assert!` handled during build.
+
+ If calls to them exist in the binary, it may indicate a violated invariant
+ or that the optimizer failed to verify the invariant during compilation.
+ You can choose to abort compilation or ignore them during build and let the
+ check be carried to runtime.
+
+ If optimizations are turned off, you cannot select "Deny".
+
+ If unsure, say "Deny".
+
+config RUST_BUILD_ASSERT_ALLOW
+ bool "Allow"
+ help
+ Unoptimized calls to `build_error!` will be converted to `panic!`
+ and checked at runtime.
+
+config RUST_BUILD_ASSERT_WARN
+ bool "Warn"
+ help
+ Unoptimized calls to `build_error!` will be converted to `panic!`
+ and checked at runtime, but warnings will be generated when building.
+
+config RUST_BUILD_ASSERT_DENY
+ bool "Deny"
+ depends on !RUST_OPT_LEVEL_0
+ help
+ Unoptimized calls to `build_error!` will abort compilation.
+
+endchoice
+
+
+endmenu # "Rust"
+
source "Documentation/Kconfig"
endmenu # Kernel hacking
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 26c83943748a..dd006adfe853 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -2225,6 +2225,10 @@ char *fwnode_string(char *buf, char *end, struct fwnode_handle *fwnode,
return widen_string(buf, buf - buf_start, end, spec);
}
+#ifdef CONFIG_RUST
+char *rust_fmt_argument(char* buf, char* end, void *ptr);
+#endif
+
/* Disable pointer hashing if requested */
bool no_hash_pointers __ro_after_init;
EXPORT_SYMBOL_GPL(no_hash_pointers);
@@ -2380,6 +2384,10 @@ early_param("no_hash_pointers", no_hash_pointers_enable);
*
* Note: The default behaviour (unadorned %p) is to hash the address,
* rendering it useful as a unique identifier.
+ *
+ * There is also a '%pA' format specifier, but it is only intended to be used
+ * from Rust code to format core::fmt::Arguments. Do *not* use it from C.
+ * See rust/kernel/print.rs for details.
*/
static noinline_for_stack
char *pointer(const char *fmt, char *buf, char *end, void *ptr,
@@ -2452,6 +2460,10 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
return device_node_string(buf, end, ptr, spec, fmt + 1);
case 'f':
return fwnode_string(buf, end, ptr, spec, fmt + 1);
+#ifdef CONFIG_RUST
+ case 'A':
+ return rust_fmt_argument(buf, end, ptr);
+#endif
case 'x':
return pointer_string(buf, end, ptr, spec);
case 'e':
diff --git a/rust/.gitignore b/rust/.gitignore
new file mode 100644
index 000000000000..c6186b71e1c3
--- /dev/null
+++ b/rust/.gitignore
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+
+bindings_generated.rs
+exports_*_generated.h
+doc/
+test/
diff --git a/rust/Makefile b/rust/Makefile
new file mode 100644
index 000000000000..233b8f365482
--- /dev/null
+++ b/rust/Makefile
@@ -0,0 +1,316 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_RUST) += core.o compiler_builtins.o helpers.o
+extra-$(CONFIG_RUST) += exports_core_generated.h
+
+extra-$(CONFIG_RUST) += libmacros.so
+
+extra-$(CONFIG_RUST) += bindings_generated.rs
+obj-$(CONFIG_RUST) += alloc.o kernel.o
+extra-$(CONFIG_RUST) += exports_alloc_generated.h exports_kernel_generated.h
+
+ifdef CONFIG_RUST_BUILD_ASSERT_DENY
+extra-$(CONFIG_RUST) += build_error.o
+else
+obj-$(CONFIG_RUST) += build_error.o
+endif
+
+obj-$(CONFIG_RUST) += exports.o
+
+ifeq ($(quiet),silent_)
+cargo_quiet=-q
+rust_test_quiet=-q
+rustdoc_test_quiet=--test-args -q
+else ifeq ($(quiet),quiet_)
+rust_test_quiet=-q
+rustdoc_test_quiet=--test-args -q
+else
+cargo_quiet=--verbose
+endif
+
+quiet_cmd_rustdoc = RUSTDOC $(if $(rustdoc_host),H, ) $<
+ cmd_rustdoc = \
+ RUST_BINDINGS_FILE=$(abspath $(objtree)/rust/bindings_generated.rs) \
+ $(RUSTDOC) $(if $(rustdoc_host),,$(rust_cross_flags)) \
+ $(filter-out -Cpanic=abort, $(filter-out --emit=%, $(rust_flags))) \
+ $(rustc_target_flags) -L $(objtree)/rust \
+ --output $(objtree)/rust/doc --crate-name $(subst rustdoc-,,$@) \
+ @$(objtree)/include/generated/rustc_cfg $<
+
+# This is a temporary fix for the CSS, visible on `type`s (`Result`).
+# It is already fixed in nightly.
+RUSTDOC_FIX_BEFORE := .impl,.method,.type:not(.container-rustdoc),.associatedconstant,.associatedtype
+RUSTDOC_FIX_AFTER := .impl,.impl-items .method,.methods .method,.impl-items \
+ .type,.methods .type,.impl-items .associatedconstant,.methods \
+ .associatedconstant,.impl-items .associatedtype,.methods .associatedtype
+
+rustdoc: rustdoc-core rustdoc-macros rustdoc-compiler_builtins rustdoc-alloc rustdoc-kernel
+ $(Q)cp $(srctree)/Documentation/rust/assets/* $(objtree)/rust/doc
+ $(Q)sed -i "s/$(RUSTDOC_FIX_BEFORE)/$(RUSTDOC_FIX_AFTER)/" \
+ $(objtree)/rust/doc/rustdoc.css
+
+rustdoc-macros: private rustdoc_host = yes
+rustdoc-macros: private rustc_target_flags = --crate-type proc-macro \
+ --extern proc_macro
+rustdoc-macros: $(srctree)/rust/macros/lib.rs FORCE
+ $(call if_changed,rustdoc)
+
+rustdoc-compiler_builtins: $(srctree)/rust/compiler_builtins.rs rustdoc-core FORCE
+ $(call if_changed,rustdoc)
+
+# We need to allow `broken_intra_doc_links` because some
+# `no_global_oom_handling` functions refer to non-`no_global_oom_handling`
+# functions. Ideally `rustdoc` would have a way to distinguish broken links
+# due to things that are "configured out" vs. entirely non-existing ones.
+rustdoc-alloc: private rustc_target_flags = --cfg no_global_oom_handling \
+ -Abroken_intra_doc_links
+rustdoc-alloc: $(srctree)/rust/alloc/lib.rs rustdoc-core \
+ rustdoc-compiler_builtins FORCE
+ $(call if_changed,rustdoc)
+
+rustdoc-kernel: private rustc_target_flags = --extern alloc \
+ --extern build_error \
+ --extern macros=$(objtree)/rust/libmacros.so
+rustdoc-kernel: $(srctree)/rust/kernel/lib.rs rustdoc-core \
+ rustdoc-macros rustdoc-compiler_builtins rustdoc-alloc \
+ $(objtree)/rust/libmacros.so $(objtree)/rust/bindings_generated.rs FORCE
+ $(call if_changed,rustdoc)
+
+quiet_cmd_rustc_test_library = RUSTC TL $<
+ cmd_rustc_test_library = \
+ RUST_BINDINGS_FILE=$(abspath $(objtree)/rust/bindings_generated.rs) \
+ $(RUSTC) $(filter-out --sysroot=%, $(filter-out -Cpanic=abort, $(filter-out --emit=%, $(rust_flags)))) \
+ $(rustc_target_flags) --crate-type $(if $(rustc_test_library_proc),proc-macro,rlib) \
+ --out-dir $(objtree)/rust/test/ --cfg testlib \
+ --sysroot $(objtree)/rust/test/sysroot \
+ -L $(objtree)/rust/test/ --crate-name $(subst rusttest-,,$(subst rusttestlib-,,$@)) $<
+
+rusttestlib-build_error: $(srctree)/rust/build_error.rs rusttest-prepare FORCE
+ $(call if_changed,rustc_test_library)
+
+rusttestlib-macros: private rustc_target_flags = --extern proc_macro
+rusttestlib-macros: private rustc_test_library_proc = yes
+rusttestlib-macros: $(srctree)/rust/macros/lib.rs rusttest-prepare FORCE
+ $(call if_changed,rustc_test_library)
+
+quiet_cmd_rustdoc_test = RUSTDOC T $<
+ cmd_rustdoc_test = \
+ RUST_BINDINGS_FILE=$(abspath $(objtree)/rust/bindings_generated.rs) \
+ $(RUSTDOC) --test $(filter-out --sysroot=%, $(filter-out -Cpanic=abort, $(filter-out --emit=%, $(rust_flags)))) \
+ $(rustc_target_flags) $(rustdoc_test_target_flags) \
+ --sysroot $(objtree)/rust/test/sysroot $(rustdoc_test_quiet) \
+ -L $(objtree)/rust/test \
+ --output $(objtree)/rust/doc --crate-name $(subst rusttest-,,$@) \
+ @$(objtree)/include/generated/rustc_cfg $<
+
+# We cannot use `-Zpanic-abort-tests` because some tests are dynamic,
+# so for the moment we skip `-Cpanic=abort`.
+quiet_cmd_rustc_test = RUSTC T $<
+ cmd_rustc_test = \
+ RUST_BINDINGS_FILE=$(abspath $(objtree)/rust/bindings_generated.rs) \
+ $(RUSTC) --test $(filter-out --sysroot=%, $(filter-out -Cpanic=abort, $(filter-out --emit=%, $(rust_flags)))) \
+ $(rustc_target_flags) --out-dir $(objtree)/rust/test \
+ --sysroot $(objtree)/rust/test/sysroot \
+ -L $(objtree)/rust/test/ --crate-name $(subst rusttest-,,$@) $<; \
+ $(objtree)/rust/test/$(subst rusttest-,,$@) $(rust_test_quiet) \
+ $(rustc_test_run_flags)
+
+rusttest: rusttest-macros rusttest-kernel
+
+# This prepares a custom sysroot with our custom `alloc` instead of
+# the standard one.
+#
+# This requires several hacks:
+# - Unlike `core` and `alloc`, `std` depends on more than a dozen crates,
+# including third-party crates that need to be downloaded, plus custom
+# `build.rs` steps. Thus hardcoding things here is not maintainable.
+# - `cargo` knows how to build the standard library, but it is an unstable
+# feature so far (`-Zbuild-std`).
+# - `cargo` only considers the use case of building the standard library
+# to use it in a given package. Thus we need to create a dummy package
+# and pick the generated libraries from there.
+# - Since we only keep a subset of upstream `alloc` in-tree, we need
+# to recreate it on the fly by putting our sources on top.
+# - The usual ways of modifying the dependency graph in `cargo` do not seem
+# to apply for the `-Zbuild-std` steps, thus we have to mislead it
+# by modifying the sources in the sysroot.
+# - To avoid messing with the user's Rust installation, we create a clone
+# of the sysroot. However, `cargo` ignores `RUSTFLAGS` in the `-Zbuild-std`
+# steps, thus we use a wrapper binary passed via `RUSTC` to pass the flag.
+#
+# In the future, we hope to avoid the whole ordeal by either:
+# - Making the `test` crate not depend on `std` (either improving upstream
+# or having our own custom crate).
+# - Making the tests run in kernel space (requires the previous point).
+# - Making `std` and friends be more like a "normal" crate, so that
+# `-Zbuild-std` and related hacks are not needed.
+quiet_cmd_rustsysroot = RUSTSYSROOT
+ cmd_rustsysroot = \
+ rm -rf $(objtree)/rust/test; \
+ mkdir -p $(objtree)/rust/test; \
+ cp -a $(rustc_sysroot) $(objtree)/rust/test/sysroot; \
+ cp -r $(srctree)/rust/alloc/* \
+ $(objtree)/rust/test/sysroot/lib/rustlib/src/rust/library/alloc/src; \
+ echo '\#!/bin/sh' > $(objtree)/rust/test/rustc_sysroot; \
+ echo "$(RUSTC) --sysroot=$(abspath $(objtree)/rust/test/sysroot) \"\$$@\"" \
+ >> $(objtree)/rust/test/rustc_sysroot; \
+ chmod u+x $(objtree)/rust/test/rustc_sysroot; \
+ $(CARGO) -q new $(objtree)/rust/test/dummy; \
+ RUSTC=$(objtree)/rust/test/rustc_sysroot $(CARGO) $(cargo_quiet) \
+ test -Zbuild-std --target $(rustc_host_target) \
+ --manifest-path $(objtree)/rust/test/dummy/Cargo.toml; \
+ rm $(objtree)/rust/test/sysroot/lib/rustlib/$(rustc_host_target)/lib/*; \
+ cp $(objtree)/rust/test/dummy/target/$(rustc_host_target)/debug/deps/* \
+ $(objtree)/rust/test/sysroot/lib/rustlib/$(rustc_host_target)/lib
+
+rusttest-prepare: FORCE
+ $(call if_changed,rustsysroot)
+
+rusttest-macros: private rustc_target_flags = --extern proc_macro
+rusttest-macros: private rustdoc_test_target_flags = --crate-type proc-macro
+rusttest-macros: $(srctree)/rust/macros/lib.rs rusttest-prepare FORCE
+ $(call if_changed,rustc_test)
+ $(call if_changed,rustdoc_test)
+
+rusttest-kernel: private rustc_target_flags = --extern alloc \
+ --extern build_error --extern macros
+rusttest-kernel: private rustc_test_run_flags = \
+ --skip bindgen_test_layout_
+rusttest-kernel: $(srctree)/rust/kernel/lib.rs rusttest-prepare \
+ rusttestlib-build_error rusttestlib-macros FORCE
+ $(call if_changed,rustc_test)
+ $(call if_changed,rustc_test_library)
+ $(call if_changed,rustdoc_test)
+
+ifdef CONFIG_CC_IS_CLANG
+bindgen_c_flags = $(c_flags)
+else
+# bindgen relies on libclang to parse C. Ideally, bindgen would support a GCC
+# plugin backend and/or the Clang driver would be perfectly compatible with GCC.
+#
+# For the moment, here we are tweaking the flags on the fly. Some config
+# options may not work (e.g. `GCC_PLUGIN_RANDSTRUCT` if we end up using one
+# of those structs). We might want to redo how Clang flags are kept track of
+# in the general `Makefile` even for GCC builds, similar to what we did with
+# `TENTATIVE_CLANG_FLAGS`.
+bindgen_skip_c_flags := -mno-fp-ret-in-387 -mpreferred-stack-boundary=% \
+ -mskip-rax-setup -mgeneral-regs-only -msign-return-address=% \
+ -mindirect-branch=thunk-extern -mindirect-branch-register -mrecord-mcount \
+ -mabi=lp64 -mstack-protector-guard% -fconserve-stack -falign-jumps=% \
+ -falign-loops=% -fno-ipa-cp-clone -fno-partial-inlining \
+ -fno-reorder-blocks -fno-allow-store-data-races -fasan-shadow-offset=% \
+ -Wno-packed-not-aligned -Wno-format-truncation -Wno-format-overflow \
+ -Wno-stringop-truncation -Wno-unused-but-set-variable \
+ -Wno-stringop-overflow -Wno-restrict -Wno-maybe-uninitialized \
+ -Werror=designated-init -Wno-zero-length-bounds \
+ --param=% --param asan-%
+
+# PowerPC
+bindgen_skip_c_flags += -mtraceback=no -mno-pointers-to-nested-functions \
+ -mno-string -mno-strict-align
+
+bindgen_extra_c_flags = $(TENTATIVE_CLANG_FLAGS) -Wno-address-of-packed-member
+bindgen_c_flags = $(filter-out $(bindgen_skip_c_flags), $(c_flags)) \
+ $(bindgen_extra_c_flags)
+endif
+
+# To avoid several recompilations in PowerPC, which inserts `-D_TASK_CPU`
+bindgen_c_flags_final = $(filter-out -D_TASK_CPU=%, $(bindgen_c_flags))
+
+quiet_cmd_bindgen = BINDGEN $@
+ cmd_bindgen = \
+ $(BINDGEN) $< $(shell grep -v '^\#\|^$$' $(srctree)/rust/bindgen_parameters) \
+ --use-core --with-derive-default --ctypes-prefix c_types \
+ --no-debug '.*' \
+ --size_t-is-usize -o $@ -- $(bindgen_c_flags_final) -DMODULE
+
+$(objtree)/rust/bindings_generated.rs: $(srctree)/rust/kernel/bindings_helper.h \
+ $(srctree)/rust/bindgen_parameters FORCE
+ $(call if_changed_dep,bindgen)
+
+quiet_cmd_exports = EXPORTS $@
+ cmd_exports = \
+ $(NM) -p --defined-only $< \
+ | grep -E ' (T|R|D) ' | cut -d ' ' -f 3 \
+ | xargs -Isymbol \
+ echo 'EXPORT_SYMBOL_RUST_GPL(symbol);' > $@
+
+$(objtree)/rust/exports_core_generated.h: $(objtree)/rust/core.o FORCE
+ $(call if_changed,exports)
+
+$(objtree)/rust/exports_alloc_generated.h: $(objtree)/rust/alloc.o FORCE
+ $(call if_changed,exports)
+
+$(objtree)/rust/exports_kernel_generated.h: $(objtree)/rust/kernel.o FORCE
+ $(call if_changed,exports)
+
+# `-Cpanic=unwind -Cforce-unwind-tables=y` overrides `rust_flags` in order to
+# avoid the https://github.com/rust-lang/rust/issues/82320 rustc crash.
+quiet_cmd_rustc_procmacro = $(RUSTC_OR_CLIPPY_QUIET) P $@
+ cmd_rustc_procmacro = \
+ $(RUSTC_OR_CLIPPY) $(rust_flags) \
+ --emit=dep-info,link --extern proc_macro \
+ -Cpanic=unwind -Cforce-unwind-tables=y \
+ --crate-type proc-macro --out-dir $(objtree)/rust/ \
+ --crate-name $(patsubst lib%.so,%,$(notdir $@)) $<; \
+ mv $(objtree)/rust/$(patsubst lib%.so,%,$(notdir $@)).d $(depfile); \
+ sed -i '/^\#/d' $(depfile)
+
+# Procedural macros can only be used with the `rustc` that compiled it.
+# Therefore, to get `libmacros.so` automatically recompiled when the compiler
+# version changes, we add `core.o` as a dependency (even if it is not needed).
+$(objtree)/rust/libmacros.so: $(srctree)/rust/macros/lib.rs \
+ $(objtree)/rust/core.o FORCE
+ $(call if_changed_dep,rustc_procmacro)
+
+quiet_cmd_rustc_library = $(if $(skip_clippy),RUSTC,$(RUSTC_OR_CLIPPY_QUIET)) L $@
+ cmd_rustc_library = \
+ RUST_BINDINGS_FILE=$(abspath $(objtree)/rust/bindings_generated.rs) \
+ $(if $(skip_clippy),$(RUSTC),$(RUSTC_OR_CLIPPY)) \
+ $(rust_flags) $(rust_cross_flags) $(rustc_target_flags) \
+ --crate-type rlib --out-dir $(objtree)/rust/ -L $(objtree)/rust/ \
+ --crate-name $(patsubst %.o,%,$(notdir $@)) $<; \
+ mv $(objtree)/rust/$(patsubst %.o,%,$(notdir $@)).d $(depfile); \
+ sed -i '/^\#/d' $(depfile) \
+ $(if $(rustc_objcopy),;$(OBJCOPY) $(rustc_objcopy) $@)
+
+# `$(rust_flags)` is passed in case the user added `--sysroot`.
+rustc_sysroot = $(shell $(RUSTC) $(rust_flags) --print sysroot)
+rustc_host_target = $(shell $(RUSTC) --version --verbose | grep -F 'host: ' | cut -d' ' -f2)
+RUST_LIB_SRC ?= $(rustc_sysroot)/lib/rustlib/src/rust/library
+
+rust-analyzer:
+ $(Q)$(srctree)/scripts/generate_rust_analyzer.py $(srctree) $(objtree) $(RUST_LIB_SRC) $(objtree)/rust/bindings_generated.rs > $(objtree)/rust-project.json
+
+$(objtree)/rust/compiler_builtins.o: private rustc_objcopy = -w -W '__*'
+$(objtree)/rust/compiler_builtins.o: $(srctree)/rust/compiler_builtins.rs \
+ $(objtree)/rust/core.o FORCE
+ $(call if_changed_dep,rustc_library)
+
+$(objtree)/rust/alloc.o: private skip_clippy = 1
+$(objtree)/rust/alloc.o: private rustc_target_flags = --cfg no_global_oom_handling
+$(objtree)/rust/alloc.o: $(srctree)/rust/alloc/lib.rs \
+ $(objtree)/rust/compiler_builtins.o FORCE
+ $(call if_changed_dep,rustc_library)
+
+$(objtree)/rust/build_error.o: $(srctree)/rust/build_error.rs \
+ $(objtree)/rust/compiler_builtins.o FORCE
+ $(call if_changed_dep,rustc_library)
+
+# ICE on `--extern macros`: https://github.com/rust-lang/rust/issues/56935
+$(objtree)/rust/kernel.o: private rustc_target_flags = --extern alloc \
+ --extern build_error \
+ --extern macros=$(objtree)/rust/libmacros.so
+$(objtree)/rust/kernel.o: $(srctree)/rust/kernel/lib.rs $(objtree)/rust/alloc.o \
+ $(objtree)/rust/build_error.o \
+ $(objtree)/rust/libmacros.so $(objtree)/rust/bindings_generated.rs FORCE
+ $(call if_changed_dep,rustc_library)
+
+# Targets that need to expand twice
+.SECONDEXPANSION:
+$(objtree)/rust/core.o: private skip_clippy = 1
+$(objtree)/rust/core.o: $$(RUST_LIB_SRC)/core/src/lib.rs FORCE
+ $(call if_changed_dep,rustc_library)
+
+rustdoc-core: $$(RUST_LIB_SRC)/core/src/lib.rs FORCE
+ $(call if_changed,rustdoc)
diff --git a/rust/alloc/README.md b/rust/alloc/README.md
new file mode 100644
index 000000000000..a1bcc2cef0e6
--- /dev/null
+++ b/rust/alloc/README.md
@@ -0,0 +1,32 @@
+# `alloc`
+
+These source files come from the Rust standard library, hosted in
+the https://github.com/rust-lang/rust repository. For copyright
+details, see https://github.com/rust-lang/rust/blob/master/COPYRIGHT.
+
+Please note that these files should be kept as close as possible to
+upstream. In general, only additions should be performed (e.g. new
+methods). Eventually, changes should make it into upstream so that,
+at some point, this fork can be dropped from the kernel tree.
+
+
+## Rationale
+
+On one hand, kernel folks wanted to keep `alloc` in-tree to have more
+freedom in both workflow and actual features if actually needed
+(e.g. receiver types if we ended up using them), which is reasonable.
+
+On the other hand, Rust folks wanted to keep `alloc` as close as
+upstream as possible and avoid as much divergence as possible, which
+is also reasonable.
+
+We agreed on a middle-ground: we would keep a subset of `alloc`
+in-tree that would be as small and as close as possible to upstream.
+Then, upstream can start adding the functions that we add to `alloc`
+etc., until we reach a point where the kernel already knows exactly
+what it needs in `alloc` and all the new methods are merged into
+upstream, so that we can drop `alloc` from the kernel tree and go back
+to using the upstream one.
+
+By doing this, the kernel can go a bit faster now, and Rust can
+slowly incorporate and discuss the changes as needed.
diff --git a/rust/alloc/alloc.rs b/rust/alloc/alloc.rs
new file mode 100644
index 000000000000..a59d64ffb36d
--- /dev/null
+++ b/rust/alloc/alloc.rs
@@ -0,0 +1,425 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+//! Memory allocation APIs
+
+#![stable(feature = "alloc_module", since = "1.28.0")]
+
+#[cfg(not(test))]
+use core::intrinsics;
+use core::intrinsics::{min_align_of_val, size_of_val};
+
+use core::ptr::Unique;
+#[cfg(not(test))]
+use core::ptr::{self, NonNull};
+
+#[stable(feature = "alloc_module", since = "1.28.0")]
+#[doc(inline)]
+pub use core::alloc::*;
+
+#[cfg(test)]
+mod tests;
+
+extern "Rust" {
+ // These are the magic symbols to call the global allocator. rustc generates
+ // them to call `__rg_alloc` etc. if there is a `#[global_allocator]` attribute
+ // (the code expanding that attribute macro generates those functions), or to call
+ // the default implementations in libstd (`__rdl_alloc` etc. in `library/std/src/alloc.rs`)
+ // otherwise.
+ // The rustc fork of LLVM also special-cases these function names to be able to optimize them
+ // like `malloc`, `realloc`, and `free`, respectively.
+ #[rustc_allocator]
+ #[rustc_allocator_nounwind]
+ fn __rust_alloc(size: usize, align: usize) -> *mut u8;
+ #[rustc_allocator_nounwind]
+ fn __rust_dealloc(ptr: *mut u8, size: usize, align: usize);
+ #[rustc_allocator_nounwind]
+ fn __rust_realloc(ptr: *mut u8, old_size: usize, align: usize, new_size: usize) -> *mut u8;
+ #[rustc_allocator_nounwind]
+ fn __rust_alloc_zeroed(size: usize, align: usize) -> *mut u8;
+}
+
+/// The global memory allocator.
+///
+/// This type implements the [`Allocator`] trait by forwarding calls
+/// to the allocator registered with the `#[global_allocator]` attribute
+/// if there is one, or the `std` crate’s default.
+///
+/// Note: while this type is unstable, the functionality it provides can be
+/// accessed through the [free functions in `alloc`](self#functions).
+#[unstable(feature = "allocator_api", issue = "32838")]
+#[derive(Copy, Clone, Default, Debug)]
+#[cfg(not(test))]
+pub struct Global;
+
+#[cfg(test)]
+pub use std::alloc::Global;
+
+/// Allocate memory with the global allocator.
+///
+/// This function forwards calls to the [`GlobalAlloc::alloc`] method
+/// of the allocator registered with the `#[global_allocator]` attribute
+/// if there is one, or the `std` crate’s default.
+///
+/// This function is expected to be deprecated in favor of the `alloc` method
+/// of the [`Global`] type when it and the [`Allocator`] trait become stable.
+///
+/// # Safety
+///
+/// See [`GlobalAlloc::alloc`].
+///
+/// # Examples
+///
+/// ```
+/// use std::alloc::{alloc, dealloc, Layout};
+///
+/// unsafe {
+/// let layout = Layout::new::<u16>();
+/// let ptr = alloc(layout);
+///
+/// *(ptr as *mut u16) = 42;
+/// assert_eq!(*(ptr as *mut u16), 42);
+///
+/// dealloc(ptr, layout);
+/// }
+/// ```
+#[stable(feature = "global_alloc", since = "1.28.0")]
+#[inline]
+pub unsafe fn alloc(layout: Layout) -> *mut u8 {
+ unsafe { __rust_alloc(layout.size(), layout.align()) }
+}
+
+/// Deallocate memory with the global allocator.
+///
+/// This function forwards calls to the [`GlobalAlloc::dealloc`] method
+/// of the allocator registered with the `#[global_allocator]` attribute
+/// if there is one, or the `std` crate’s default.
+///
+/// This function is expected to be deprecated in favor of the `dealloc` method
+/// of the [`Global`] type when it and the [`Allocator`] trait become stable.
+///
+/// # Safety
+///
+/// See [`GlobalAlloc::dealloc`].
+#[stable(feature = "global_alloc", since = "1.28.0")]
+#[inline]
+pub unsafe fn dealloc(ptr: *mut u8, layout: Layout) {
+ unsafe { __rust_dealloc(ptr, layout.size(), layout.align()) }
+}
+
+/// Reallocate memory with the global allocator.
+///
+/// This function forwards calls to the [`GlobalAlloc::realloc`] method
+/// of the allocator registered with the `#[global_allocator]` attribute
+/// if there is one, or the `std` crate’s default.
+///
+/// This function is expected to be deprecated in favor of the `realloc` method
+/// of the [`Global`] type when it and the [`Allocator`] trait become stable.
+///
+/// # Safety
+///
+/// See [`GlobalAlloc::realloc`].
+#[stable(feature = "global_alloc", since = "1.28.0")]
+#[inline]
+pub unsafe fn realloc(ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
+ unsafe { __rust_realloc(ptr, layout.size(), layout.align(), new_size) }
+}
+
+/// Allocate zero-initialized memory with the global allocator.
+///
+/// This function forwards calls to the [`GlobalAlloc::alloc_zeroed`] method
+/// of the allocator registered with the `#[global_allocator]` attribute
+/// if there is one, or the `std` crate’s default.
+///
+/// This function is expected to be deprecated in favor of the `alloc_zeroed` method
+/// of the [`Global`] type when it and the [`Allocator`] trait become stable.
+///
+/// # Safety
+///
+/// See [`GlobalAlloc::alloc_zeroed`].
+///
+/// # Examples
+///
+/// ```
+/// use std::alloc::{alloc_zeroed, dealloc, Layout};
+///
+/// unsafe {
+/// let layout = Layout::new::<u16>();
+/// let ptr = alloc_zeroed(layout);
+///
+/// assert_eq!(*(ptr as *mut u16), 0);
+///
+/// dealloc(ptr, layout);
+/// }
+/// ```
+#[stable(feature = "global_alloc", since = "1.28.0")]
+#[inline]
+pub unsafe fn alloc_zeroed(layout: Layout) -> *mut u8 {
+ unsafe { __rust_alloc_zeroed(layout.size(), layout.align()) }
+}
+
+#[cfg(not(test))]
+impl Global {
+ #[inline]
+ fn alloc_impl(&self, layout: Layout, zeroed: bool) -> Result<NonNull<[u8]>, AllocError> {
+ match layout.size() {
+ 0 => Ok(NonNull::slice_from_raw_parts(layout.dangling(), 0)),
+ // SAFETY: `layout` is non-zero in size,
+ size => unsafe {
+ let raw_ptr = if zeroed { alloc_zeroed(layout) } else { alloc(layout) };
+ let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
+ Ok(NonNull::slice_from_raw_parts(ptr, size))
+ },
+ }
+ }
+
+ // SAFETY: Same as `Allocator::grow`
+ #[inline]
+ unsafe fn grow_impl(
+ &self,
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ zeroed: bool,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ debug_assert!(
+ new_layout.size() >= old_layout.size(),
+ "`new_layout.size()` must be greater than or equal to `old_layout.size()`"
+ );
+
+ match old_layout.size() {
+ 0 => self.alloc_impl(new_layout, zeroed),
+
+ // SAFETY: `new_size` is non-zero as `old_size` is greater than or equal to `new_size`
+ // as required by safety conditions. Other conditions must be upheld by the caller
+ old_size if old_layout.align() == new_layout.align() => unsafe {
+ let new_size = new_layout.size();
+
+ // `realloc` probably checks for `new_size >= old_layout.size()` or something similar.
+ intrinsics::assume(new_size >= old_layout.size());
+
+ let raw_ptr = realloc(ptr.as_ptr(), old_layout, new_size);
+ let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
+ if zeroed {
+ raw_ptr.add(old_size).write_bytes(0, new_size - old_size);
+ }
+ Ok(NonNull::slice_from_raw_parts(ptr, new_size))
+ },
+
+ // SAFETY: because `new_layout.size()` must be greater than or equal to `old_size`,
+ // both the old and new memory allocation are valid for reads and writes for `old_size`
+ // bytes. Also, because the old allocation wasn't yet deallocated, it cannot overlap
+ // `new_ptr`. Thus, the call to `copy_nonoverlapping` is safe. The safety contract
+ // for `dealloc` must be upheld by the caller.
+ old_size => unsafe {
+ let new_ptr = self.alloc_impl(new_layout, zeroed)?;
+ ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_mut_ptr(), old_size);
+ self.deallocate(ptr, old_layout);
+ Ok(new_ptr)
+ },
+ }
+ }
+}
+
+#[unstable(feature = "allocator_api", issue = "32838")]
+#[cfg(not(test))]
+unsafe impl Allocator for Global {
+ #[inline]
+ fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
+ self.alloc_impl(layout, false)
+ }
+
+ #[inline]
+ fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
+ self.alloc_impl(layout, true)
+ }
+
+ #[inline]
+ unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
+ if layout.size() != 0 {
+ // SAFETY: `layout` is non-zero in size,
+ // other conditions must be upheld by the caller
+ unsafe { dealloc(ptr.as_ptr(), layout) }
+ }
+ }
+
+ #[inline]
+ unsafe fn grow(
+ &self,
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ // SAFETY: all conditions must be upheld by the caller
+ unsafe { self.grow_impl(ptr, old_layout, new_layout, false) }
+ }
+
+ #[inline]
+ unsafe fn grow_zeroed(
+ &self,
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ // SAFETY: all conditions must be upheld by the caller
+ unsafe { self.grow_impl(ptr, old_layout, new_layout, true) }
+ }
+
+ #[inline]
+ unsafe fn shrink(
+ &self,
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ debug_assert!(
+ new_layout.size() <= old_layout.size(),
+ "`new_layout.size()` must be smaller than or equal to `old_layout.size()`"
+ );
+
+ match new_layout.size() {
+ // SAFETY: conditions must be upheld by the caller
+ 0 => unsafe {
+ self.deallocate(ptr, old_layout);
+ Ok(NonNull::slice_from_raw_parts(new_layout.dangling(), 0))
+ },
+
+ // SAFETY: `new_size` is non-zero. Other conditions must be upheld by the caller
+ new_size if old_layout.align() == new_layout.align() => unsafe {
+ // `realloc` probably checks for `new_size <= old_layout.size()` or something similar.
+ intrinsics::assume(new_size <= old_layout.size());
+
+ let raw_ptr = realloc(ptr.as_ptr(), old_layout, new_size);
+ let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
+ Ok(NonNull::slice_from_raw_parts(ptr, new_size))
+ },
+
+ // SAFETY: because `new_size` must be smaller than or equal to `old_layout.size()`,
+ // both the old and new memory allocation are valid for reads and writes for `new_size`
+ // bytes. Also, because the old allocation wasn't yet deallocated, it cannot overlap
+ // `new_ptr`. Thus, the call to `copy_nonoverlapping` is safe. The safety contract
+ // for `dealloc` must be upheld by the caller.
+ new_size => unsafe {
+ let new_ptr = self.allocate(new_layout)?;
+ ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_mut_ptr(), new_size);
+ self.deallocate(ptr, old_layout);
+ Ok(new_ptr)
+ },
+ }
+ }
+}
+
+/// The allocator for unique pointers.
+// This function must not unwind. If it does, MIR codegen will fail.
+#[cfg(all(not(no_global_oom_handling), not(test)))]
+#[lang = "exchange_malloc"]
+#[inline]
+unsafe fn exchange_malloc(size: usize, align: usize) -> *mut u8 {
+ let layout = unsafe { Layout::from_size_align_unchecked(size, align) };
+ match Global.allocate(layout) {
+ Ok(ptr) => ptr.as_mut_ptr(),
+ Err(_) => handle_alloc_error(layout),
+ }
+}
+
+#[cfg_attr(not(test), lang = "box_free")]
+#[inline]
+// This signature has to be the same as `Box`, otherwise an ICE will happen.
+// When an additional parameter to `Box` is added (like `A: Allocator`), this has to be added here as
+// well.
+// For example if `Box` is changed to `struct Box<T: ?Sized, A: Allocator>(Unique<T>, A)`,
+// this function has to be changed to `fn box_free<T: ?Sized, A: Allocator>(Unique<T>, A)` as well.
+pub(crate) unsafe fn box_free<T: ?Sized, A: Allocator>(ptr: Unique<T>, alloc: A) {
+ unsafe {
+ let size = size_of_val(ptr.as_ref());
+ let align = min_align_of_val(ptr.as_ref());
+ let layout = Layout::from_size_align_unchecked(size, align);
+ alloc.deallocate(ptr.cast().into(), layout)
+ }
+}
+
+// # Allocation error handler
+
+#[cfg(not(no_global_oom_handling))]
+extern "Rust" {
+ // This is the magic symbol to call the global alloc error handler. rustc generates
+ // it to call `__rg_oom` if there is a `#[alloc_error_handler]`, or to call the
+ // default implementations below (`__rdl_oom`) otherwise.
+ #[rustc_allocator_nounwind]
+ fn __rust_alloc_error_handler(size: usize, align: usize) -> !;
+}
+
+/// Abort on memory allocation error or failure.
+///
+/// Callers of memory allocation APIs wishing to abort computation
+/// in response to an allocation error are encouraged to call this function,
+/// rather than directly invoking `panic!` or similar.
+///
+/// The default behavior of this function is to print a message to standard error
+/// and abort the process.
+/// It can be replaced with [`set_alloc_error_hook`] and [`take_alloc_error_hook`].
+///
+/// [`set_alloc_error_hook`]: ../../std/alloc/fn.set_alloc_error_hook.html
+/// [`take_alloc_error_hook`]: ../../std/alloc/fn.take_alloc_error_hook.html
+#[stable(feature = "global_alloc", since = "1.28.0")]
+#[cfg(all(not(no_global_oom_handling), not(test)))]
+#[rustc_allocator_nounwind]
+#[cold]
+pub fn handle_alloc_error(layout: Layout) -> ! {
+ unsafe {
+ __rust_alloc_error_handler(layout.size(), layout.align());
+ }
+}
+
+// For alloc test `std::alloc::handle_alloc_error` can be used directly.
+#[cfg(all(not(no_global_oom_handling), test))]
+pub use std::alloc::handle_alloc_error;
+
+#[cfg(all(not(no_global_oom_handling), not(any(target_os = "hermit", test))))]
+#[doc(hidden)]
+#[allow(unused_attributes)]
+#[unstable(feature = "alloc_internals", issue = "none")]
+pub mod __alloc_error_handler {
+ use crate::alloc::Layout;
+
+ // called via generated `__rust_alloc_error_handler`
+
+ // if there is no `#[alloc_error_handler]`
+ #[rustc_std_internal_symbol]
+ pub unsafe extern "C" fn __rdl_oom(size: usize, _align: usize) -> ! {
+ panic!("memory allocation of {} bytes failed", size)
+ }
+
+ // if there is a `#[alloc_error_handler]`
+ #[rustc_std_internal_symbol]
+ pub unsafe extern "C" fn __rg_oom(size: usize, align: usize) -> ! {
+ let layout = unsafe { Layout::from_size_align_unchecked(size, align) };
+ extern "Rust" {
+ #[lang = "oom"]
+ fn oom_impl(layout: Layout) -> !;
+ }
+ unsafe { oom_impl(layout) }
+ }
+}
+
+/// Specialize clones into pre-allocated, uninitialized memory.
+/// Used by `Box::clone` and `Rc`/`Arc::make_mut`.
+pub(crate) trait WriteCloneIntoRaw: Sized {
+ unsafe fn write_clone_into_raw(&self, target: *mut Self);
+}
+
+impl<T: Clone> WriteCloneIntoRaw for T {
+ #[inline]
+ default unsafe fn write_clone_into_raw(&self, target: *mut Self) {
+ // Having allocated *first* may allow the optimizer to create
+ // the cloned value in-place, skipping the local and move.
+ unsafe { target.write(self.clone()) };
+ }
+}
+
+impl<T: Copy> WriteCloneIntoRaw for T {
+ #[inline]
+ unsafe fn write_clone_into_raw(&self, target: *mut Self) {
+ // We can always copy in-place, without ever involving a local value.
+ unsafe { target.copy_from_nonoverlapping(self, 1) };
+ }
+}
diff --git a/rust/alloc/borrow.rs b/rust/alloc/borrow.rs
new file mode 100644
index 000000000000..beaf7b330f2b
--- /dev/null
+++ b/rust/alloc/borrow.rs
@@ -0,0 +1,493 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+//! A module for working with borrowed data.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+use core::cmp::Ordering;
+use core::hash::{Hash, Hasher};
+use core::ops::Deref;
+#[cfg(not(no_global_oom_handling))]
+use core::ops::{Add, AddAssign};
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::borrow::{Borrow, BorrowMut};
+
+use crate::fmt;
+#[cfg(not(no_global_oom_handling))]
+use crate::string::String;
+
+use Cow::*;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, B: ?Sized> Borrow<B> for Cow<'a, B>
+where
+ B: ToOwned,
+ <B as ToOwned>::Owned: 'a,
+{
+ fn borrow(&self) -> &B {
+ &**self
+ }
+}
+
+/// A generalization of `Clone` to borrowed data.
+///
+/// Some types make it possible to go from borrowed to owned, usually by
+/// implementing the `Clone` trait. But `Clone` works only for going from `&T`
+/// to `T`. The `ToOwned` trait generalizes `Clone` to construct owned data
+/// from any borrow of a given type.
+#[cfg_attr(not(test), rustc_diagnostic_item = "ToOwned")]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait ToOwned {
+ /// The resulting type after obtaining ownership.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ type Owned: Borrow<Self>;
+
+ /// Creates owned data from borrowed data, usually by cloning.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s: &str = "a";
+ /// let ss: String = s.to_owned();
+ ///
+ /// let v: &[i32] = &[1, 2];
+ /// let vv: Vec<i32> = v.to_owned();
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use = "cloning is often expensive and is not expected to have side effects"]
+ fn to_owned(&self) -> Self::Owned;
+
+ /// Uses borrowed data to replace owned data, usually by cloning.
+ ///
+ /// This is borrow-generalized version of `Clone::clone_from`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// # #![feature(toowned_clone_into)]
+ /// let mut s: String = String::new();
+ /// "hello".clone_into(&mut s);
+ ///
+ /// let mut v: Vec<i32> = Vec::new();
+ /// [1, 2][..].clone_into(&mut v);
+ /// ```
+ #[unstable(feature = "toowned_clone_into", reason = "recently added", issue = "41263")]
+ fn clone_into(&self, target: &mut Self::Owned) {
+ *target = self.to_owned();
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> ToOwned for T
+where
+ T: Clone,
+{
+ type Owned = T;
+ fn to_owned(&self) -> T {
+ self.clone()
+ }
+
+ fn clone_into(&self, target: &mut T) {
+ target.clone_from(self);
+ }
+}
+
+/// A clone-on-write smart pointer.
+///
+/// The type `Cow` is a smart pointer providing clone-on-write functionality: it
+/// can enclose and provide immutable access to borrowed data, and clone the
+/// data lazily when mutation or ownership is required. The type is designed to
+/// work with general borrowed data via the `Borrow` trait.
+///
+/// `Cow` implements `Deref`, which means that you can call
+/// non-mutating methods directly on the data it encloses. If mutation
+/// is desired, `to_mut` will obtain a mutable reference to an owned
+/// value, cloning if necessary.
+///
+/// If you need reference-counting pointers, note that
+/// [`Rc::make_mut`][crate::rc::Rc::make_mut] and
+/// [`Arc::make_mut`][crate::sync::Arc::make_mut] can provide clone-on-write
+/// functionality as well.
+///
+/// # Examples
+///
+/// ```
+/// use std::borrow::Cow;
+///
+/// fn abs_all(input: &mut Cow<[i32]>) {
+/// for i in 0..input.len() {
+/// let v = input[i];
+/// if v < 0 {
+/// // Clones into a vector if not already owned.
+/// input.to_mut()[i] = -v;
+/// }
+/// }
+/// }
+///
+/// // No clone occurs because `input` doesn't need to be mutated.
+/// let slice = [0, 1, 2];
+/// let mut input = Cow::from(&slice[..]);
+/// abs_all(&mut input);
+///
+/// // Clone occurs because `input` needs to be mutated.
+/// let slice = [-1, 0, 1];
+/// let mut input = Cow::from(&slice[..]);
+/// abs_all(&mut input);
+///
+/// // No clone occurs because `input` is already owned.
+/// let mut input = Cow::from(vec![-1, 0, 1]);
+/// abs_all(&mut input);
+/// ```
+///
+/// Another example showing how to keep `Cow` in a struct:
+///
+/// ```
+/// use std::borrow::Cow;
+///
+/// struct Items<'a, X: 'a> where [X]: ToOwned<Owned = Vec<X>> {
+/// values: Cow<'a, [X]>,
+/// }
+///
+/// impl<'a, X: Clone + 'a> Items<'a, X> where [X]: ToOwned<Owned = Vec<X>> {
+/// fn new(v: Cow<'a, [X]>) -> Self {
+/// Items { values: v }
+/// }
+/// }
+///
+/// // Creates a container from borrowed values of a slice
+/// let readonly = [1, 2];
+/// let borrowed = Items::new((&readonly[..]).into());
+/// match borrowed {
+/// Items { values: Cow::Borrowed(b) } => println!("borrowed {:?}", b),
+/// _ => panic!("expect borrowed value"),
+/// }
+///
+/// let mut clone_on_write = borrowed;
+/// // Mutates the data from slice into owned vec and pushes a new value on top
+/// clone_on_write.values.to_mut().push(3);
+/// println!("clone_on_write = {:?}", clone_on_write.values);
+///
+/// // The data was mutated. Let check it out.
+/// match clone_on_write {
+/// Items { values: Cow::Owned(_) } => println!("clone_on_write contains owned data"),
+/// _ => panic!("expect owned data"),
+/// }
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub enum Cow<'a, B: ?Sized + 'a>
+where
+ B: ToOwned,
+{
+ /// Borrowed data.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Borrowed(#[stable(feature = "rust1", since = "1.0.0")] &'a B),
+
+ /// Owned data.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Owned(#[stable(feature = "rust1", since = "1.0.0")] <B as ToOwned>::Owned),
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<B: ?Sized + ToOwned> Clone for Cow<'_, B> {
+ fn clone(&self) -> Self {
+ match *self {
+ Borrowed(b) => Borrowed(b),
+ Owned(ref o) => {
+ let b: &B = o.borrow();
+ Owned(b.to_owned())
+ }
+ }
+ }
+
+ fn clone_from(&mut self, source: &Self) {
+ match (self, source) {
+ (&mut Owned(ref mut dest), &Owned(ref o)) => o.borrow().clone_into(dest),
+ (t, s) => *t = s.clone(),
+ }
+ }
+}
+
+impl<B: ?Sized + ToOwned> Cow<'_, B> {
+ /// Returns true if the data is borrowed, i.e. if `to_mut` would require additional work.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(cow_is_borrowed)]
+ /// use std::borrow::Cow;
+ ///
+ /// let cow = Cow::Borrowed("moo");
+ /// assert!(cow.is_borrowed());
+ ///
+ /// let bull: Cow<'_, str> = Cow::Owned("...moo?".to_string());
+ /// assert!(!bull.is_borrowed());
+ /// ```
+ #[unstable(feature = "cow_is_borrowed", issue = "65143")]
+ #[rustc_const_unstable(feature = "const_cow_is_borrowed", issue = "65143")]
+ pub const fn is_borrowed(&self) -> bool {
+ match *self {
+ Borrowed(_) => true,
+ Owned(_) => false,
+ }
+ }
+
+ /// Returns true if the data is owned, i.e. if `to_mut` would be a no-op.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(cow_is_borrowed)]
+ /// use std::borrow::Cow;
+ ///
+ /// let cow: Cow<'_, str> = Cow::Owned("moo".to_string());
+ /// assert!(cow.is_owned());
+ ///
+ /// let bull = Cow::Borrowed("...moo?");
+ /// assert!(!bull.is_owned());
+ /// ```
+ #[unstable(feature = "cow_is_borrowed", issue = "65143")]
+ #[rustc_const_unstable(feature = "const_cow_is_borrowed", issue = "65143")]
+ pub const fn is_owned(&self) -> bool {
+ !self.is_borrowed()
+ }
+
+ /// Acquires a mutable reference to the owned form of the data.
+ ///
+ /// Clones the data if it is not already owned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::borrow::Cow;
+ ///
+ /// let mut cow = Cow::Borrowed("foo");
+ /// cow.to_mut().make_ascii_uppercase();
+ ///
+ /// assert_eq!(
+ /// cow,
+ /// Cow::Owned(String::from("FOO")) as Cow<str>
+ /// );
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn to_mut(&mut self) -> &mut <B as ToOwned>::Owned {
+ match *self {
+ Borrowed(borrowed) => {
+ *self = Owned(borrowed.to_owned());
+ match *self {
+ Borrowed(..) => unreachable!(),
+ Owned(ref mut owned) => owned,
+ }
+ }
+ Owned(ref mut owned) => owned,
+ }
+ }
+
+ /// Extracts the owned data.
+ ///
+ /// Clones the data if it is not already owned.
+ ///
+ /// # Examples
+ ///
+ /// Calling `into_owned` on a `Cow::Borrowed` clones the underlying data
+ /// and becomes a `Cow::Owned`:
+ ///
+ /// ```
+ /// use std::borrow::Cow;
+ ///
+ /// let s = "Hello world!";
+ /// let cow = Cow::Borrowed(s);
+ ///
+ /// assert_eq!(
+ /// cow.into_owned(),
+ /// String::from(s)
+ /// );
+ /// ```
+ ///
+ /// Calling `into_owned` on a `Cow::Owned` is a no-op:
+ ///
+ /// ```
+ /// use std::borrow::Cow;
+ ///
+ /// let s = "Hello world!";
+ /// let cow: Cow<str> = Cow::Owned(String::from(s));
+ ///
+ /// assert_eq!(
+ /// cow.into_owned(),
+ /// String::from(s)
+ /// );
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn into_owned(self) -> <B as ToOwned>::Owned {
+ match self {
+ Borrowed(borrowed) => borrowed.to_owned(),
+ Owned(owned) => owned,
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<B: ?Sized + ToOwned> Deref for Cow<'_, B> {
+ type Target = B;
+
+ fn deref(&self) -> &B {
+ match *self {
+ Borrowed(borrowed) => borrowed,
+ Owned(ref owned) => owned.borrow(),
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<B: ?Sized> Eq for Cow<'_, B> where B: Eq + ToOwned {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<B: ?Sized> Ord for Cow<'_, B>
+where
+ B: Ord + ToOwned,
+{
+ #[inline]
+ fn cmp(&self, other: &Self) -> Ordering {
+ Ord::cmp(&**self, &**other)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, 'b, B: ?Sized, C: ?Sized> PartialEq<Cow<'b, C>> for Cow<'a, B>
+where
+ B: PartialEq<C> + ToOwned,
+ C: ToOwned,
+{
+ #[inline]
+ fn eq(&self, other: &Cow<'b, C>) -> bool {
+ PartialEq::eq(&**self, &**other)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, B: ?Sized> PartialOrd for Cow<'a, B>
+where
+ B: PartialOrd + ToOwned,
+{
+ #[inline]
+ fn partial_cmp(&self, other: &Cow<'a, B>) -> Option<Ordering> {
+ PartialOrd::partial_cmp(&**self, &**other)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<B: ?Sized> fmt::Debug for Cow<'_, B>
+where
+ B: fmt::Debug + ToOwned<Owned: fmt::Debug>,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {
+ Borrowed(ref b) => fmt::Debug::fmt(b, f),
+ Owned(ref o) => fmt::Debug::fmt(o, f),
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<B: ?Sized> fmt::Display for Cow<'_, B>
+where
+ B: fmt::Display + ToOwned<Owned: fmt::Display>,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {
+ Borrowed(ref b) => fmt::Display::fmt(b, f),
+ Owned(ref o) => fmt::Display::fmt(o, f),
+ }
+ }
+}
+
+#[stable(feature = "default", since = "1.11.0")]
+impl<B: ?Sized> Default for Cow<'_, B>
+where
+ B: ToOwned<Owned: Default>,
+{
+ /// Creates an owned Cow<'a, B> with the default value for the contained owned value.
+ fn default() -> Self {
+ Owned(<B as ToOwned>::Owned::default())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<B: ?Sized> Hash for Cow<'_, B>
+where
+ B: Hash + ToOwned,
+{
+ #[inline]
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ Hash::hash(&**self, state)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + ToOwned> AsRef<T> for Cow<'_, T> {
+ fn as_ref(&self) -> &T {
+ self
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "cow_add", since = "1.14.0")]
+impl<'a> Add<&'a str> for Cow<'a, str> {
+ type Output = Cow<'a, str>;
+
+ #[inline]
+ fn add(mut self, rhs: &'a str) -> Self::Output {
+ self += rhs;
+ self
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "cow_add", since = "1.14.0")]
+impl<'a> Add<Cow<'a, str>> for Cow<'a, str> {
+ type Output = Cow<'a, str>;
+
+ #[inline]
+ fn add(mut self, rhs: Cow<'a, str>) -> Self::Output {
+ self += rhs;
+ self
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "cow_add", since = "1.14.0")]
+impl<'a> AddAssign<&'a str> for Cow<'a, str> {
+ fn add_assign(&mut self, rhs: &'a str) {
+ if self.is_empty() {
+ *self = Cow::Borrowed(rhs)
+ } else if !rhs.is_empty() {
+ if let Cow::Borrowed(lhs) = *self {
+ let mut s = String::with_capacity(lhs.len() + rhs.len());
+ s.push_str(lhs);
+ *self = Cow::Owned(s);
+ }
+ self.to_mut().push_str(rhs);
+ }
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "cow_add", since = "1.14.0")]
+impl<'a> AddAssign<Cow<'a, str>> for Cow<'a, str> {
+ fn add_assign(&mut self, rhs: Cow<'a, str>) {
+ if self.is_empty() {
+ *self = rhs
+ } else if !rhs.is_empty() {
+ if let Cow::Borrowed(lhs) = *self {
+ let mut s = String::with_capacity(lhs.len() + rhs.len());
+ s.push_str(lhs);
+ *self = Cow::Owned(s);
+ }
+ self.to_mut().push_str(&rhs);
+ }
+ }
+}
diff --git a/rust/alloc/boxed.rs b/rust/alloc/boxed.rs
new file mode 100644
index 000000000000..df6a77eea06b
--- /dev/null
+++ b/rust/alloc/boxed.rs
@@ -0,0 +1,1728 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+//! A pointer type for heap allocation.
+//!
+//! [`Box<T>`], casually referred to as a 'box', provides the simplest form of
+//! heap allocation in Rust. Boxes provide ownership for this allocation, and
+//! drop their contents when they go out of scope. Boxes also ensure that they
+//! never allocate more than `isize::MAX` bytes.
+//!
+//! # Examples
+//!
+//! Move a value from the stack to the heap by creating a [`Box`]:
+//!
+//! ```
+//! let val: u8 = 5;
+//! let boxed: Box<u8> = Box::new(val);
+//! ```
+//!
+//! Move a value from a [`Box`] back to the stack by [dereferencing]:
+//!
+//! ```
+//! let boxed: Box<u8> = Box::new(5);
+//! let val: u8 = *boxed;
+//! ```
+//!
+//! Creating a recursive data structure:
+//!
+//! ```
+//! #[derive(Debug)]
+//! enum List<T> {
+//! Cons(T, Box<List<T>>),
+//! Nil,
+//! }
+//!
+//! let list: List<i32> = List::Cons(1, Box::new(List::Cons(2, Box::new(List::Nil))));
+//! println!("{:?}", list);
+//! ```
+//!
+//! This will print `Cons(1, Cons(2, Nil))`.
+//!
+//! Recursive structures must be boxed, because if the definition of `Cons`
+//! looked like this:
+//!
+//! ```compile_fail,E0072
+//! # enum List<T> {
+//! Cons(T, List<T>),
+//! # }
+//! ```
+//!
+//! It wouldn't work. This is because the size of a `List` depends on how many
+//! elements are in the list, and so we don't know how much memory to allocate
+//! for a `Cons`. By introducing a [`Box<T>`], which has a defined size, we know how
+//! big `Cons` needs to be.
+//!
+//! # Memory layout
+//!
+//! For non-zero-sized values, a [`Box`] will use the [`Global`] allocator for
+//! its allocation. It is valid to convert both ways between a [`Box`] and a
+//! raw pointer allocated with the [`Global`] allocator, given that the
+//! [`Layout`] used with the allocator is correct for the type. More precisely,
+//! a `value: *mut T` that has been allocated with the [`Global`] allocator
+//! with `Layout::for_value(&*value)` may be converted into a box using
+//! [`Box::<T>::from_raw(value)`]. Conversely, the memory backing a `value: *mut
+//! T` obtained from [`Box::<T>::into_raw`] may be deallocated using the
+//! [`Global`] allocator with [`Layout::for_value(&*value)`].
+//!
+//! For zero-sized values, the `Box` pointer still has to be [valid] for reads
+//! and writes and sufficiently aligned. In particular, casting any aligned
+//! non-zero integer literal to a raw pointer produces a valid pointer, but a
+//! pointer pointing into previously allocated memory that since got freed is
+//! not valid. The recommended way to build a Box to a ZST if `Box::new` cannot
+//! be used is to use [`ptr::NonNull::dangling`].
+//!
+//! So long as `T: Sized`, a `Box<T>` is guaranteed to be represented
+//! as a single pointer and is also ABI-compatible with C pointers
+//! (i.e. the C type `T*`). This means that if you have extern "C"
+//! Rust functions that will be called from C, you can define those
+//! Rust functions using `Box<T>` types, and use `T*` as corresponding
+//! type on the C side. As an example, consider this C header which
+//! declares functions that create and destroy some kind of `Foo`
+//! value:
+//!
+//! ```c
+//! /* C header */
+//!
+//! /* Returns ownership to the caller */
+//! struct Foo* foo_new(void);
+//!
+//! /* Takes ownership from the caller; no-op when invoked with null */
+//! void foo_delete(struct Foo*);
+//! ```
+//!
+//! These two functions might be implemented in Rust as follows. Here, the
+//! `struct Foo*` type from C is translated to `Box<Foo>`, which captures
+//! the ownership constraints. Note also that the nullable argument to
+//! `foo_delete` is represented in Rust as `Option<Box<Foo>>`, since `Box<Foo>`
+//! cannot be null.
+//!
+//! ```
+//! #[repr(C)]
+//! pub struct Foo;
+//!
+//! #[no_mangle]
+//! pub extern "C" fn foo_new() -> Box<Foo> {
+//! Box::new(Foo)
+//! }
+//!
+//! #[no_mangle]
+//! pub extern "C" fn foo_delete(_: Option<Box<Foo>>) {}
+//! ```
+//!
+//! Even though `Box<T>` has the same representation and C ABI as a C pointer,
+//! this does not mean that you can convert an arbitrary `T*` into a `Box<T>`
+//! and expect things to work. `Box<T>` values will always be fully aligned,
+//! non-null pointers. Moreover, the destructor for `Box<T>` will attempt to
+//! free the value with the global allocator. In general, the best practice
+//! is to only use `Box<T>` for pointers that originated from the global
+//! allocator.
+//!
+//! **Important.** At least at present, you should avoid using
+//! `Box<T>` types for functions that are defined in C but invoked
+//! from Rust. In those cases, you should directly mirror the C types
+//! as closely as possible. Using types like `Box<T>` where the C
+//! definition is just using `T*` can lead to undefined behavior, as
+//! described in [rust-lang/unsafe-code-guidelines#198][ucg#198].
+//!
+//! [ucg#198]: https://github.com/rust-lang/unsafe-code-guidelines/issues/198
+//! [dereferencing]: core::ops::Deref
+//! [`Box::<T>::from_raw(value)`]: Box::from_raw
+//! [`Global`]: crate::alloc::Global
+//! [`Layout`]: crate::alloc::Layout
+//! [`Layout::for_value(&*value)`]: crate::alloc::Layout::for_value
+//! [valid]: ptr#safety
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+use core::any::Any;
+use core::borrow;
+use core::cmp::Ordering;
+use core::convert::{From, TryFrom};
+use core::fmt;
+use core::future::Future;
+use core::hash::{Hash, Hasher};
+#[cfg(not(no_global_oom_handling))]
+use core::iter::FromIterator;
+use core::iter::{FusedIterator, Iterator};
+use core::marker::{Unpin, Unsize};
+use core::mem;
+use core::ops::{
+ CoerceUnsized, Deref, DerefMut, DispatchFromDyn, Generator, GeneratorState, Receiver,
+};
+use core::pin::Pin;
+use core::ptr::{self, Unique};
+use core::stream::Stream;
+use core::task::{Context, Poll};
+
+#[cfg(not(no_global_oom_handling))]
+use crate::alloc::{handle_alloc_error, WriteCloneIntoRaw};
+use crate::alloc::{AllocError, Allocator, Global, Layout};
+#[cfg(not(no_global_oom_handling))]
+use crate::borrow::Cow;
+#[cfg(not(no_global_oom_handling))]
+use crate::raw_vec::RawVec;
+#[cfg(not(no_global_oom_handling))]
+use crate::str::from_boxed_utf8_unchecked;
+#[cfg(not(no_global_oom_handling))]
+use crate::vec::Vec;
+
+/// A pointer type for heap allocation.
+///
+/// See the [module-level documentation](../../std/boxed/index.html) for more.
+#[lang = "owned_box"]
+#[fundamental]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Box<
+ T: ?Sized,
+ #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
+>(Unique<T>, A);
+
+impl<T> Box<T> {
+ /// Allocates memory on the heap and then places `x` into it.
+ ///
+ /// This doesn't actually allocate if `T` is zero-sized.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let five = Box::new(5);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline(always)]
+ #[doc(alias = "alloc")]
+ #[doc(alias = "malloc")]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn new(x: T) -> Self {
+ box x
+ }
+
+ /// Constructs a new box with uninitialized contents.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ ///
+ /// let mut five = Box::<u32>::new_uninit();
+ ///
+ /// let five = unsafe {
+ /// // Deferred initialization:
+ /// five.as_mut_ptr().write(5);
+ ///
+ /// five.assume_init()
+ /// };
+ ///
+ /// assert_eq!(*five, 5)
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "new_uninit", issue = "63291")]
+ #[inline]
+ pub fn new_uninit() -> Box<mem::MaybeUninit<T>> {
+ Self::new_uninit_in(Global)
+ }
+
+ /// Constructs a new `Box` with uninitialized contents, with the memory
+ /// being filled with `0` bytes.
+ ///
+ /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
+ /// of this method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ ///
+ /// let zero = Box::<u32>::new_zeroed();
+ /// let zero = unsafe { zero.assume_init() };
+ ///
+ /// assert_eq!(*zero, 0)
+ /// ```
+ ///
+ /// [zeroed]: mem::MaybeUninit::zeroed
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[doc(alias = "calloc")]
+ #[unstable(feature = "new_uninit", issue = "63291")]
+ pub fn new_zeroed() -> Box<mem::MaybeUninit<T>> {
+ Self::new_zeroed_in(Global)
+ }
+
+ /// Constructs a new `Pin<Box<T>>`. If `T` does not implement `Unpin`, then
+ /// `x` will be pinned in memory and unable to be moved.
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "pin", since = "1.33.0")]
+ #[inline(always)]
+ pub fn pin(x: T) -> Pin<Box<T>> {
+ (box x).into()
+ }
+
+ /// Allocates memory on the heap then places `x` into it,
+ /// returning an error if the allocation fails
+ ///
+ /// This doesn't actually allocate if `T` is zero-sized.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api)]
+ ///
+ /// let five = Box::try_new(5)?;
+ /// # Ok::<(), std::alloc::AllocError>(())
+ /// ```
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[inline]
+ pub fn try_new(x: T) -> Result<Self, AllocError> {
+ Self::try_new_in(x, Global)
+ }
+
+ /// Constructs a new box with uninitialized contents on the heap,
+ /// returning an error if the allocation fails
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api, new_uninit)]
+ ///
+ /// let mut five = Box::<u32>::try_new_uninit()?;
+ ///
+ /// let five = unsafe {
+ /// // Deferred initialization:
+ /// five.as_mut_ptr().write(5);
+ ///
+ /// five.assume_init()
+ /// };
+ ///
+ /// assert_eq!(*five, 5);
+ /// # Ok::<(), std::alloc::AllocError>(())
+ /// ```
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ // #[unstable(feature = "new_uninit", issue = "63291")]
+ #[inline]
+ pub fn try_new_uninit() -> Result<Box<mem::MaybeUninit<T>>, AllocError> {
+ Box::try_new_uninit_in(Global)
+ }
+
+ /// Constructs a new `Box` with uninitialized contents, with the memory
+ /// being filled with `0` bytes on the heap
+ ///
+ /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
+ /// of this method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api, new_uninit)]
+ ///
+ /// let zero = Box::<u32>::try_new_zeroed()?;
+ /// let zero = unsafe { zero.assume_init() };
+ ///
+ /// assert_eq!(*zero, 0);
+ /// # Ok::<(), std::alloc::AllocError>(())
+ /// ```
+ ///
+ /// [zeroed]: mem::MaybeUninit::zeroed
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ // #[unstable(feature = "new_uninit", issue = "63291")]
+ #[inline]
+ pub fn try_new_zeroed() -> Result<Box<mem::MaybeUninit<T>>, AllocError> {
+ Box::try_new_zeroed_in(Global)
+ }
+}
+
+impl<T, A: Allocator> Box<T, A> {
+ /// Allocates memory in the given allocator then places `x` into it.
+ ///
+ /// This doesn't actually allocate if `T` is zero-sized.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// let five = Box::new_in(5, System);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[inline]
+ pub fn new_in(x: T, alloc: A) -> Self {
+ let mut boxed = Self::new_uninit_in(alloc);
+ unsafe {
+ boxed.as_mut_ptr().write(x);
+ boxed.assume_init()
+ }
+ }
+
+ /// Allocates memory in the given allocator then places `x` into it,
+ /// returning an error if the allocation fails
+ ///
+ /// This doesn't actually allocate if `T` is zero-sized.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// let five = Box::try_new_in(5, System)?;
+ /// # Ok::<(), std::alloc::AllocError>(())
+ /// ```
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[inline]
+ pub fn try_new_in(x: T, alloc: A) -> Result<Self, AllocError> {
+ let mut boxed = Self::try_new_uninit_in(alloc)?;
+ unsafe {
+ boxed.as_mut_ptr().write(x);
+ Ok(boxed.assume_init())
+ }
+ }
+
+ /// Constructs a new box with uninitialized contents in the provided allocator.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api, new_uninit)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// let mut five = Box::<u32, _>::new_uninit_in(System);
+ ///
+ /// let five = unsafe {
+ /// // Deferred initialization:
+ /// five.as_mut_ptr().write(5);
+ ///
+ /// five.assume_init()
+ /// };
+ ///
+ /// assert_eq!(*five, 5)
+ /// ```
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[cfg(not(no_global_oom_handling))]
+ // #[unstable(feature = "new_uninit", issue = "63291")]
+ pub fn new_uninit_in(alloc: A) -> Box<mem::MaybeUninit<T>, A> {
+ let layout = Layout::new::<mem::MaybeUninit<T>>();
+ // NOTE: Prefer match over unwrap_or_else since closure sometimes not inlineable.
+ // That would make code size bigger.
+ match Box::try_new_uninit_in(alloc) {
+ Ok(m) => m,
+ Err(_) => handle_alloc_error(layout),
+ }
+ }
+
+ /// Constructs a new box with uninitialized contents in the provided allocator,
+ /// returning an error if the allocation fails
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api, new_uninit)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// let mut five = Box::<u32, _>::try_new_uninit_in(System)?;
+ ///
+ /// let five = unsafe {
+ /// // Deferred initialization:
+ /// five.as_mut_ptr().write(5);
+ ///
+ /// five.assume_init()
+ /// };
+ ///
+ /// assert_eq!(*five, 5);
+ /// # Ok::<(), std::alloc::AllocError>(())
+ /// ```
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ // #[unstable(feature = "new_uninit", issue = "63291")]
+ pub fn try_new_uninit_in(alloc: A) -> Result<Box<mem::MaybeUninit<T>, A>, AllocError> {
+ let layout = Layout::new::<mem::MaybeUninit<T>>();
+ let ptr = alloc.allocate(layout)?.cast();
+ unsafe { Ok(Box::from_raw_in(ptr.as_ptr(), alloc)) }
+ }
+
+ /// Constructs a new `Box` with uninitialized contents, with the memory
+ /// being filled with `0` bytes in the provided allocator.
+ ///
+ /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
+ /// of this method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api, new_uninit)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// let zero = Box::<u32, _>::new_zeroed_in(System);
+ /// let zero = unsafe { zero.assume_init() };
+ ///
+ /// assert_eq!(*zero, 0)
+ /// ```
+ ///
+ /// [zeroed]: mem::MaybeUninit::zeroed
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[cfg(not(no_global_oom_handling))]
+ // #[unstable(feature = "new_uninit", issue = "63291")]
+ pub fn new_zeroed_in(alloc: A) -> Box<mem::MaybeUninit<T>, A> {
+ let layout = Layout::new::<mem::MaybeUninit<T>>();
+ // NOTE: Prefer match over unwrap_or_else since closure sometimes not inlineable.
+ // That would make code size bigger.
+ match Box::try_new_zeroed_in(alloc) {
+ Ok(m) => m,
+ Err(_) => handle_alloc_error(layout),
+ }
+ }
+
+ /// Constructs a new `Box` with uninitialized contents, with the memory
+ /// being filled with `0` bytes in the provided allocator,
+ /// returning an error if the allocation fails,
+ ///
+ /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
+ /// of this method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api, new_uninit)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// let zero = Box::<u32, _>::try_new_zeroed_in(System)?;
+ /// let zero = unsafe { zero.assume_init() };
+ ///
+ /// assert_eq!(*zero, 0);
+ /// # Ok::<(), std::alloc::AllocError>(())
+ /// ```
+ ///
+ /// [zeroed]: mem::MaybeUninit::zeroed
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ // #[unstable(feature = "new_uninit", issue = "63291")]
+ pub fn try_new_zeroed_in(alloc: A) -> Result<Box<mem::MaybeUninit<T>, A>, AllocError> {
+ let layout = Layout::new::<mem::MaybeUninit<T>>();
+ let ptr = alloc.allocate_zeroed(layout)?.cast();
+ unsafe { Ok(Box::from_raw_in(ptr.as_ptr(), alloc)) }
+ }
+
+ /// Constructs a new `Pin<Box<T, A>>`. If `T` does not implement `Unpin`, then
+ /// `x` will be pinned in memory and unable to be moved.
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[inline(always)]
+ pub fn pin_in(x: T, alloc: A) -> Pin<Self>
+ where
+ A: 'static,
+ {
+ Self::new_in(x, alloc).into()
+ }
+
+ /// Converts a `Box<T>` into a `Box<[T]>`
+ ///
+ /// This conversion does not allocate on the heap and happens in place.
+ #[unstable(feature = "box_into_boxed_slice", issue = "71582")]
+ pub fn into_boxed_slice(boxed: Self) -> Box<[T], A> {
+ let (raw, alloc) = Box::into_raw_with_allocator(boxed);
+ unsafe { Box::from_raw_in(raw as *mut [T; 1], alloc) }
+ }
+
+ /// Consumes the `Box`, returning the wrapped value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(box_into_inner)]
+ ///
+ /// let c = Box::new(5);
+ ///
+ /// assert_eq!(Box::into_inner(c), 5);
+ /// ```
+ #[unstable(feature = "box_into_inner", issue = "80437")]
+ #[inline]
+ pub fn into_inner(boxed: Self) -> T {
+ *boxed
+ }
+}
+
+impl<T> Box<[T]> {
+ /// Constructs a new boxed slice with uninitialized contents.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ ///
+ /// let mut values = Box::<[u32]>::new_uninit_slice(3);
+ ///
+ /// let values = unsafe {
+ /// // Deferred initialization:
+ /// values[0].as_mut_ptr().write(1);
+ /// values[1].as_mut_ptr().write(2);
+ /// values[2].as_mut_ptr().write(3);
+ ///
+ /// values.assume_init()
+ /// };
+ ///
+ /// assert_eq!(*values, [1, 2, 3])
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "new_uninit", issue = "63291")]
+ pub fn new_uninit_slice(len: usize) -> Box<[mem::MaybeUninit<T>]> {
+ unsafe { RawVec::with_capacity(len).into_box(len) }
+ }
+
+ /// Constructs a new boxed slice with uninitialized contents, with the memory
+ /// being filled with `0` bytes.
+ ///
+ /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
+ /// of this method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ ///
+ /// let values = Box::<[u32]>::new_zeroed_slice(3);
+ /// let values = unsafe { values.assume_init() };
+ ///
+ /// assert_eq!(*values, [0, 0, 0])
+ /// ```
+ ///
+ /// [zeroed]: mem::MaybeUninit::zeroed
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "new_uninit", issue = "63291")]
+ pub fn new_zeroed_slice(len: usize) -> Box<[mem::MaybeUninit<T>]> {
+ unsafe { RawVec::with_capacity_zeroed(len).into_box(len) }
+ }
+}
+
+impl<T, A: Allocator> Box<[T], A> {
+ /// Constructs a new boxed slice with uninitialized contents in the provided allocator.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api, new_uninit)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// let mut values = Box::<[u32], _>::new_uninit_slice_in(3, System);
+ ///
+ /// let values = unsafe {
+ /// // Deferred initialization:
+ /// values[0].as_mut_ptr().write(1);
+ /// values[1].as_mut_ptr().write(2);
+ /// values[2].as_mut_ptr().write(3);
+ ///
+ /// values.assume_init()
+ /// };
+ ///
+ /// assert_eq!(*values, [1, 2, 3])
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ // #[unstable(feature = "new_uninit", issue = "63291")]
+ pub fn new_uninit_slice_in(len: usize, alloc: A) -> Box<[mem::MaybeUninit<T>], A> {
+ unsafe { RawVec::with_capacity_in(len, alloc).into_box(len) }
+ }
+
+ /// Constructs a new boxed slice with uninitialized contents in the provided allocator,
+ /// with the memory being filled with `0` bytes.
+ ///
+ /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
+ /// of this method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api, new_uninit)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// let values = Box::<[u32], _>::new_zeroed_slice_in(3, System);
+ /// let values = unsafe { values.assume_init() };
+ ///
+ /// assert_eq!(*values, [0, 0, 0])
+ /// ```
+ ///
+ /// [zeroed]: mem::MaybeUninit::zeroed
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ // #[unstable(feature = "new_uninit", issue = "63291")]
+ pub fn new_zeroed_slice_in(len: usize, alloc: A) -> Box<[mem::MaybeUninit<T>], A> {
+ unsafe { RawVec::with_capacity_zeroed_in(len, alloc).into_box(len) }
+ }
+}
+
+impl<T, A: Allocator> Box<mem::MaybeUninit<T>, A> {
+ /// Converts to `Box<T, A>`.
+ ///
+ /// # Safety
+ ///
+ /// As with [`MaybeUninit::assume_init`],
+ /// it is up to the caller to guarantee that the value
+ /// really is in an initialized state.
+ /// Calling this when the content is not yet fully initialized
+ /// causes immediate undefined behavior.
+ ///
+ /// [`MaybeUninit::assume_init`]: mem::MaybeUninit::assume_init
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ ///
+ /// let mut five = Box::<u32>::new_uninit();
+ ///
+ /// let five: Box<u32> = unsafe {
+ /// // Deferred initialization:
+ /// five.as_mut_ptr().write(5);
+ ///
+ /// five.assume_init()
+ /// };
+ ///
+ /// assert_eq!(*five, 5)
+ /// ```
+ #[unstable(feature = "new_uninit", issue = "63291")]
+ #[inline]
+ pub unsafe fn assume_init(self) -> Box<T, A> {
+ let (raw, alloc) = Box::into_raw_with_allocator(self);
+ unsafe { Box::from_raw_in(raw as *mut T, alloc) }
+ }
+}
+
+impl<T, A: Allocator> Box<[mem::MaybeUninit<T>], A> {
+ /// Converts to `Box<[T], A>`.
+ ///
+ /// # Safety
+ ///
+ /// As with [`MaybeUninit::assume_init`],
+ /// it is up to the caller to guarantee that the values
+ /// really are in an initialized state.
+ /// Calling this when the content is not yet fully initialized
+ /// causes immediate undefined behavior.
+ ///
+ /// [`MaybeUninit::assume_init`]: mem::MaybeUninit::assume_init
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ ///
+ /// let mut values = Box::<[u32]>::new_uninit_slice(3);
+ ///
+ /// let values = unsafe {
+ /// // Deferred initialization:
+ /// values[0].as_mut_ptr().write(1);
+ /// values[1].as_mut_ptr().write(2);
+ /// values[2].as_mut_ptr().write(3);
+ ///
+ /// values.assume_init()
+ /// };
+ ///
+ /// assert_eq!(*values, [1, 2, 3])
+ /// ```
+ #[unstable(feature = "new_uninit", issue = "63291")]
+ #[inline]
+ pub unsafe fn assume_init(self) -> Box<[T], A> {
+ let (raw, alloc) = Box::into_raw_with_allocator(self);
+ unsafe { Box::from_raw_in(raw as *mut [T], alloc) }
+ }
+}
+
+impl<T: ?Sized> Box<T> {
+ /// Constructs a box from a raw pointer.
+ ///
+ /// After calling this function, the raw pointer is owned by the
+ /// resulting `Box`. Specifically, the `Box` destructor will call
+ /// the destructor of `T` and free the allocated memory. For this
+ /// to be safe, the memory must have been allocated in accordance
+ /// with the [memory layout] used by `Box` .
+ ///
+ /// # Safety
+ ///
+ /// This function is unsafe because improper use may lead to
+ /// memory problems. For example, a double-free may occur if the
+ /// function is called twice on the same raw pointer.
+ ///
+ /// The safety conditions are described in the [memory layout] section.
+ ///
+ /// # Examples
+ ///
+ /// Recreate a `Box` which was previously converted to a raw pointer
+ /// using [`Box::into_raw`]:
+ /// ```
+ /// let x = Box::new(5);
+ /// let ptr = Box::into_raw(x);
+ /// let x = unsafe { Box::from_raw(ptr) };
+ /// ```
+ /// Manually create a `Box` from scratch by using the global allocator:
+ /// ```
+ /// use std::alloc::{alloc, Layout};
+ ///
+ /// unsafe {
+ /// let ptr = alloc(Layout::new::<i32>()) as *mut i32;
+ /// // In general .write is required to avoid attempting to destruct
+ /// // the (uninitialized) previous contents of `ptr`, though for this
+ /// // simple example `*ptr = 5` would have worked as well.
+ /// ptr.write(5);
+ /// let x = Box::from_raw(ptr);
+ /// }
+ /// ```
+ ///
+ /// [memory layout]: self#memory-layout
+ /// [`Layout`]: crate::Layout
+ #[stable(feature = "box_raw", since = "1.4.0")]
+ #[inline]
+ pub unsafe fn from_raw(raw: *mut T) -> Self {
+ unsafe { Self::from_raw_in(raw, Global) }
+ }
+}
+
+impl<T: ?Sized, A: Allocator> Box<T, A> {
+ /// Constructs a box from a raw pointer in the given allocator.
+ ///
+ /// After calling this function, the raw pointer is owned by the
+ /// resulting `Box`. Specifically, the `Box` destructor will call
+ /// the destructor of `T` and free the allocated memory. For this
+ /// to be safe, the memory must have been allocated in accordance
+ /// with the [memory layout] used by `Box` .
+ ///
+ /// # Safety
+ ///
+ /// This function is unsafe because improper use may lead to
+ /// memory problems. For example, a double-free may occur if the
+ /// function is called twice on the same raw pointer.
+ ///
+ ///
+ /// # Examples
+ ///
+ /// Recreate a `Box` which was previously converted to a raw pointer
+ /// using [`Box::into_raw_with_allocator`]:
+ /// ```
+ /// #![feature(allocator_api)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// let x = Box::new_in(5, System);
+ /// let (ptr, alloc) = Box::into_raw_with_allocator(x);
+ /// let x = unsafe { Box::from_raw_in(ptr, alloc) };
+ /// ```
+ /// Manually create a `Box` from scratch by using the system allocator:
+ /// ```
+ /// #![feature(allocator_api, slice_ptr_get)]
+ ///
+ /// use std::alloc::{Allocator, Layout, System};
+ ///
+ /// unsafe {
+ /// let ptr = System.allocate(Layout::new::<i32>())?.as_mut_ptr() as *mut i32;
+ /// // In general .write is required to avoid attempting to destruct
+ /// // the (uninitialized) previous contents of `ptr`, though for this
+ /// // simple example `*ptr = 5` would have worked as well.
+ /// ptr.write(5);
+ /// let x = Box::from_raw_in(ptr, System);
+ /// }
+ /// # Ok::<(), std::alloc::AllocError>(())
+ /// ```
+ ///
+ /// [memory layout]: self#memory-layout
+ /// [`Layout`]: crate::Layout
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[inline]
+ pub unsafe fn from_raw_in(raw: *mut T, alloc: A) -> Self {
+ Box(unsafe { Unique::new_unchecked(raw) }, alloc)
+ }
+
+ /// Consumes the `Box`, returning a wrapped raw pointer.
+ ///
+ /// The pointer will be properly aligned and non-null.
+ ///
+ /// After calling this function, the caller is responsible for the
+ /// memory previously managed by the `Box`. In particular, the
+ /// caller should properly destroy `T` and release the memory, taking
+ /// into account the [memory layout] used by `Box`. The easiest way to
+ /// do this is to convert the raw pointer back into a `Box` with the
+ /// [`Box::from_raw`] function, allowing the `Box` destructor to perform
+ /// the cleanup.
+ ///
+ /// Note: this is an associated function, which means that you have
+ /// to call it as `Box::into_raw(b)` instead of `b.into_raw()`. This
+ /// is so that there is no conflict with a method on the inner type.
+ ///
+ /// # Examples
+ /// Converting the raw pointer back into a `Box` with [`Box::from_raw`]
+ /// for automatic cleanup:
+ /// ```
+ /// let x = Box::new(String::from("Hello"));
+ /// let ptr = Box::into_raw(x);
+ /// let x = unsafe { Box::from_raw(ptr) };
+ /// ```
+ /// Manual cleanup by explicitly running the destructor and deallocating
+ /// the memory:
+ /// ```
+ /// use std::alloc::{dealloc, Layout};
+ /// use std::ptr;
+ ///
+ /// let x = Box::new(String::from("Hello"));
+ /// let p = Box::into_raw(x);
+ /// unsafe {
+ /// ptr::drop_in_place(p);
+ /// dealloc(p as *mut u8, Layout::new::<String>());
+ /// }
+ /// ```
+ ///
+ /// [memory layout]: self#memory-layout
+ #[stable(feature = "box_raw", since = "1.4.0")]
+ #[inline]
+ pub fn into_raw(b: Self) -> *mut T {
+ Self::into_raw_with_allocator(b).0
+ }
+
+ /// Consumes the `Box`, returning a wrapped raw pointer and the allocator.
+ ///
+ /// The pointer will be properly aligned and non-null.
+ ///
+ /// After calling this function, the caller is responsible for the
+ /// memory previously managed by the `Box`. In particular, the
+ /// caller should properly destroy `T` and release the memory, taking
+ /// into account the [memory layout] used by `Box`. The easiest way to
+ /// do this is to convert the raw pointer back into a `Box` with the
+ /// [`Box::from_raw_in`] function, allowing the `Box` destructor to perform
+ /// the cleanup.
+ ///
+ /// Note: this is an associated function, which means that you have
+ /// to call it as `Box::into_raw_with_allocator(b)` instead of `b.into_raw_with_allocator()`. This
+ /// is so that there is no conflict with a method on the inner type.
+ ///
+ /// # Examples
+ /// Converting the raw pointer back into a `Box` with [`Box::from_raw_in`]
+ /// for automatic cleanup:
+ /// ```
+ /// #![feature(allocator_api)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// let x = Box::new_in(String::from("Hello"), System);
+ /// let (ptr, alloc) = Box::into_raw_with_allocator(x);
+ /// let x = unsafe { Box::from_raw_in(ptr, alloc) };
+ /// ```
+ /// Manual cleanup by explicitly running the destructor and deallocating
+ /// the memory:
+ /// ```
+ /// #![feature(allocator_api)]
+ ///
+ /// use std::alloc::{Allocator, Layout, System};
+ /// use std::ptr::{self, NonNull};
+ ///
+ /// let x = Box::new_in(String::from("Hello"), System);
+ /// let (ptr, alloc) = Box::into_raw_with_allocator(x);
+ /// unsafe {
+ /// ptr::drop_in_place(ptr);
+ /// let non_null = NonNull::new_unchecked(ptr);
+ /// alloc.deallocate(non_null.cast(), Layout::new::<String>());
+ /// }
+ /// ```
+ ///
+ /// [memory layout]: self#memory-layout
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[inline]
+ pub fn into_raw_with_allocator(b: Self) -> (*mut T, A) {
+ let (leaked, alloc) = Box::into_unique(b);
+ (leaked.as_ptr(), alloc)
+ }
+
+ #[unstable(
+ feature = "ptr_internals",
+ issue = "none",
+ reason = "use `Box::leak(b).into()` or `Unique::from(Box::leak(b))` instead"
+ )]
+ #[inline]
+ #[doc(hidden)]
+ pub fn into_unique(b: Self) -> (Unique<T>, A) {
+ // Box is recognized as a "unique pointer" by Stacked Borrows, but internally it is a
+ // raw pointer for the type system. Turning it directly into a raw pointer would not be
+ // recognized as "releasing" the unique pointer to permit aliased raw accesses,
+ // so all raw pointer methods have to go through `Box::leak`. Turning *that* to a raw pointer
+ // behaves correctly.
+ let alloc = unsafe { ptr::read(&b.1) };
+ (Unique::from(Box::leak(b)), alloc)
+ }
+
+ /// Returns a reference to the underlying allocator.
+ ///
+ /// Note: this is an associated function, which means that you have
+ /// to call it as `Box::allocator(&b)` instead of `b.allocator()`. This
+ /// is so that there is no conflict with a method on the inner type.
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[inline]
+ pub fn allocator(b: &Self) -> &A {
+ &b.1
+ }
+
+ /// Consumes and leaks the `Box`, returning a mutable reference,
+ /// `&'a mut T`. Note that the type `T` must outlive the chosen lifetime
+ /// `'a`. If the type has only static references, or none at all, then this
+ /// may be chosen to be `'static`.
+ ///
+ /// This function is mainly useful for data that lives for the remainder of
+ /// the program's life. Dropping the returned reference will cause a memory
+ /// leak. If this is not acceptable, the reference should first be wrapped
+ /// with the [`Box::from_raw`] function producing a `Box`. This `Box` can
+ /// then be dropped which will properly destroy `T` and release the
+ /// allocated memory.
+ ///
+ /// Note: this is an associated function, which means that you have
+ /// to call it as `Box::leak(b)` instead of `b.leak()`. This
+ /// is so that there is no conflict with a method on the inner type.
+ ///
+ /// # Examples
+ ///
+ /// Simple usage:
+ ///
+ /// ```
+ /// let x = Box::new(41);
+ /// let static_ref: &'static mut usize = Box::leak(x);
+ /// *static_ref += 1;
+ /// assert_eq!(*static_ref, 42);
+ /// ```
+ ///
+ /// Unsized data:
+ ///
+ /// ```
+ /// let x = vec![1, 2, 3].into_boxed_slice();
+ /// let static_ref = Box::leak(x);
+ /// static_ref[0] = 4;
+ /// assert_eq!(*static_ref, [4, 2, 3]);
+ /// ```
+ #[stable(feature = "box_leak", since = "1.26.0")]
+ #[inline]
+ pub fn leak<'a>(b: Self) -> &'a mut T
+ where
+ A: 'a,
+ {
+ unsafe { &mut *mem::ManuallyDrop::new(b).0.as_ptr() }
+ }
+
+ /// Converts a `Box<T>` into a `Pin<Box<T>>`
+ ///
+ /// This conversion does not allocate on the heap and happens in place.
+ ///
+ /// This is also available via [`From`].
+ #[unstable(feature = "box_into_pin", issue = "62370")]
+ pub fn into_pin(boxed: Self) -> Pin<Self>
+ where
+ A: 'static,
+ {
+ // It's not possible to move or replace the insides of a `Pin<Box<T>>`
+ // when `T: !Unpin`, so it's safe to pin it directly without any
+ // additional requirements.
+ unsafe { Pin::new_unchecked(boxed) }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<#[may_dangle] T: ?Sized, A: Allocator> Drop for Box<T, A> {
+ fn drop(&mut self) {
+ // FIXME: Do nothing, drop is currently performed by compiler.
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Default> Default for Box<T> {
+ /// Creates a `Box<T>`, with the `Default` value for T.
+ fn default() -> Self {
+ box T::default()
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> Default for Box<[T]> {
+ fn default() -> Self {
+ Box::<[T; 0]>::new([])
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "default_box_extra", since = "1.17.0")]
+impl Default for Box<str> {
+ fn default() -> Self {
+ unsafe { from_boxed_utf8_unchecked(Default::default()) }
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Clone, A: Allocator + Clone> Clone for Box<T, A> {
+ /// Returns a new box with a `clone()` of this box's contents.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = Box::new(5);
+ /// let y = x.clone();
+ ///
+ /// // The value is the same
+ /// assert_eq!(x, y);
+ ///
+ /// // But they are unique objects
+ /// assert_ne!(&*x as *const i32, &*y as *const i32);
+ /// ```
+ #[inline]
+ fn clone(&self) -> Self {
+ // Pre-allocate memory to allow writing the cloned value directly.
+ let mut boxed = Self::new_uninit_in(self.1.clone());
+ unsafe {
+ (**self).write_clone_into_raw(boxed.as_mut_ptr());
+ boxed.assume_init()
+ }
+ }
+
+ /// Copies `source`'s contents into `self` without creating a new allocation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = Box::new(5);
+ /// let mut y = Box::new(10);
+ /// let yp: *const i32 = &*y;
+ ///
+ /// y.clone_from(&x);
+ ///
+ /// // The value is the same
+ /// assert_eq!(x, y);
+ ///
+ /// // And no allocation occurred
+ /// assert_eq!(yp, &*y);
+ /// ```
+ #[inline]
+ fn clone_from(&mut self, source: &Self) {
+ (**self).clone_from(&(**source));
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "box_slice_clone", since = "1.3.0")]
+impl Clone for Box<str> {
+ fn clone(&self) -> Self {
+ // this makes a copy of the data
+ let buf: Box<[u8]> = self.as_bytes().into();
+ unsafe { from_boxed_utf8_unchecked(buf) }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + PartialEq, A: Allocator> PartialEq for Box<T, A> {
+ #[inline]
+ fn eq(&self, other: &Self) -> bool {
+ PartialEq::eq(&**self, &**other)
+ }
+ #[inline]
+ fn ne(&self, other: &Self) -> bool {
+ PartialEq::ne(&**self, &**other)
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + PartialOrd, A: Allocator> PartialOrd for Box<T, A> {
+ #[inline]
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ PartialOrd::partial_cmp(&**self, &**other)
+ }
+ #[inline]
+ fn lt(&self, other: &Self) -> bool {
+ PartialOrd::lt(&**self, &**other)
+ }
+ #[inline]
+ fn le(&self, other: &Self) -> bool {
+ PartialOrd::le(&**self, &**other)
+ }
+ #[inline]
+ fn ge(&self, other: &Self) -> bool {
+ PartialOrd::ge(&**self, &**other)
+ }
+ #[inline]
+ fn gt(&self, other: &Self) -> bool {
+ PartialOrd::gt(&**self, &**other)
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + Ord, A: Allocator> Ord for Box<T, A> {
+ #[inline]
+ fn cmp(&self, other: &Self) -> Ordering {
+ Ord::cmp(&**self, &**other)
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + Eq, A: Allocator> Eq for Box<T, A> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + Hash, A: Allocator> Hash for Box<T, A> {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ (**self).hash(state);
+ }
+}
+
+#[stable(feature = "indirect_hasher_impl", since = "1.22.0")]
+impl<T: ?Sized + Hasher, A: Allocator> Hasher for Box<T, A> {
+ fn finish(&self) -> u64 {
+ (**self).finish()
+ }
+ fn write(&mut self, bytes: &[u8]) {
+ (**self).write(bytes)
+ }
+ fn write_u8(&mut self, i: u8) {
+ (**self).write_u8(i)
+ }
+ fn write_u16(&mut self, i: u16) {
+ (**self).write_u16(i)
+ }
+ fn write_u32(&mut self, i: u32) {
+ (**self).write_u32(i)
+ }
+ fn write_u64(&mut self, i: u64) {
+ (**self).write_u64(i)
+ }
+ fn write_u128(&mut self, i: u128) {
+ (**self).write_u128(i)
+ }
+ fn write_usize(&mut self, i: usize) {
+ (**self).write_usize(i)
+ }
+ fn write_i8(&mut self, i: i8) {
+ (**self).write_i8(i)
+ }
+ fn write_i16(&mut self, i: i16) {
+ (**self).write_i16(i)
+ }
+ fn write_i32(&mut self, i: i32) {
+ (**self).write_i32(i)
+ }
+ fn write_i64(&mut self, i: i64) {
+ (**self).write_i64(i)
+ }
+ fn write_i128(&mut self, i: i128) {
+ (**self).write_i128(i)
+ }
+ fn write_isize(&mut self, i: isize) {
+ (**self).write_isize(i)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "from_for_ptrs", since = "1.6.0")]
+impl<T> From<T> for Box<T> {
+ /// Converts a `T` into a `Box<T>`
+ ///
+ /// The conversion allocates on the heap and moves `t`
+ /// from the stack into it.
+ ///
+ /// # Examples
+ /// ```rust
+ /// let x = 5;
+ /// let boxed = Box::new(5);
+ ///
+ /// assert_eq!(Box::from(x), boxed);
+ /// ```
+ fn from(t: T) -> Self {
+ Box::new(t)
+ }
+}
+
+#[stable(feature = "pin", since = "1.33.0")]
+impl<T: ?Sized, A: Allocator> From<Box<T, A>> for Pin<Box<T, A>>
+where
+ A: 'static,
+{
+ /// Converts a `Box<T>` into a `Pin<Box<T>>`
+ ///
+ /// This conversion does not allocate on the heap and happens in place.
+ fn from(boxed: Box<T, A>) -> Self {
+ Box::into_pin(boxed)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "box_from_slice", since = "1.17.0")]
+impl<T: Copy> From<&[T]> for Box<[T]> {
+ /// Converts a `&[T]` into a `Box<[T]>`
+ ///
+ /// This conversion allocates on the heap
+ /// and performs a copy of `slice`.
+ ///
+ /// # Examples
+ /// ```rust
+ /// // create a &[u8] which will be used to create a Box<[u8]>
+ /// let slice: &[u8] = &[104, 101, 108, 108, 111];
+ /// let boxed_slice: Box<[u8]> = Box::from(slice);
+ ///
+ /// println!("{:?}", boxed_slice);
+ /// ```
+ fn from(slice: &[T]) -> Box<[T]> {
+ let len = slice.len();
+ let buf = RawVec::with_capacity(len);
+ unsafe {
+ ptr::copy_nonoverlapping(slice.as_ptr(), buf.ptr(), len);
+ buf.into_box(slice.len()).assume_init()
+ }
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "box_from_cow", since = "1.45.0")]
+impl<T: Copy> From<Cow<'_, [T]>> for Box<[T]> {
+ #[inline]
+ fn from(cow: Cow<'_, [T]>) -> Box<[T]> {
+ match cow {
+ Cow::Borrowed(slice) => Box::from(slice),
+ Cow::Owned(slice) => Box::from(slice),
+ }
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "box_from_slice", since = "1.17.0")]
+impl From<&str> for Box<str> {
+ /// Converts a `&str` into a `Box<str>`
+ ///
+ /// This conversion allocates on the heap
+ /// and performs a copy of `s`.
+ ///
+ /// # Examples
+ /// ```rust
+ /// let boxed: Box<str> = Box::from("hello");
+ /// println!("{}", boxed);
+ /// ```
+ #[inline]
+ fn from(s: &str) -> Box<str> {
+ unsafe { from_boxed_utf8_unchecked(Box::from(s.as_bytes())) }
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "box_from_cow", since = "1.45.0")]
+impl From<Cow<'_, str>> for Box<str> {
+ #[inline]
+ fn from(cow: Cow<'_, str>) -> Box<str> {
+ match cow {
+ Cow::Borrowed(s) => Box::from(s),
+ Cow::Owned(s) => Box::from(s),
+ }
+ }
+}
+
+#[stable(feature = "boxed_str_conv", since = "1.19.0")]
+impl<A: Allocator> From<Box<str, A>> for Box<[u8], A> {
+ /// Converts a `Box<str>` into a `Box<[u8]>`
+ ///
+ /// This conversion does not allocate on the heap and happens in place.
+ ///
+ /// # Examples
+ /// ```rust
+ /// // create a Box<str> which will be used to create a Box<[u8]>
+ /// let boxed: Box<str> = Box::from("hello");
+ /// let boxed_str: Box<[u8]> = Box::from(boxed);
+ ///
+ /// // create a &[u8] which will be used to create a Box<[u8]>
+ /// let slice: &[u8] = &[104, 101, 108, 108, 111];
+ /// let boxed_slice = Box::from(slice);
+ ///
+ /// assert_eq!(boxed_slice, boxed_str);
+ /// ```
+ #[inline]
+ fn from(s: Box<str, A>) -> Self {
+ let (raw, alloc) = Box::into_raw_with_allocator(s);
+ unsafe { Box::from_raw_in(raw as *mut [u8], alloc) }
+ }
+}
+
+#[stable(feature = "box_from_array", since = "1.45.0")]
+impl<T, const N: usize> From<[T; N]> for Box<[T]> {
+ /// Converts a `[T; N]` into a `Box<[T]>`
+ ///
+ /// This conversion moves the array to newly heap-allocated memory.
+ ///
+ /// # Examples
+ /// ```rust
+ /// let boxed: Box<[u8]> = Box::from([4, 2]);
+ /// println!("{:?}", boxed);
+ /// ```
+ fn from(array: [T; N]) -> Box<[T]> {
+ box array
+ }
+}
+
+#[stable(feature = "boxed_slice_try_from", since = "1.43.0")]
+impl<T, const N: usize> TryFrom<Box<[T]>> for Box<[T; N]> {
+ type Error = Box<[T]>;
+
+ fn try_from(boxed_slice: Box<[T]>) -> Result<Self, Self::Error> {
+ if boxed_slice.len() == N {
+ Ok(unsafe { Box::from_raw(Box::into_raw(boxed_slice) as *mut [T; N]) })
+ } else {
+ Err(boxed_slice)
+ }
+ }
+}
+
+impl<A: Allocator> Box<dyn Any, A> {
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ /// Attempt to downcast the box to a concrete type.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::any::Any;
+ ///
+ /// fn print_if_string(value: Box<dyn Any>) {
+ /// if let Ok(string) = value.downcast::<String>() {
+ /// println!("String ({}): {}", string.len(), string);
+ /// }
+ /// }
+ ///
+ /// let my_string = "Hello World".to_string();
+ /// print_if_string(Box::new(my_string));
+ /// print_if_string(Box::new(0i8));
+ /// ```
+ pub fn downcast<T: Any>(self) -> Result<Box<T, A>, Self> {
+ if self.is::<T>() {
+ unsafe {
+ let (raw, alloc): (*mut dyn Any, _) = Box::into_raw_with_allocator(self);
+ Ok(Box::from_raw_in(raw as *mut T, alloc))
+ }
+ } else {
+ Err(self)
+ }
+ }
+}
+
+impl<A: Allocator> Box<dyn Any + Send, A> {
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ /// Attempt to downcast the box to a concrete type.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::any::Any;
+ ///
+ /// fn print_if_string(value: Box<dyn Any + Send>) {
+ /// if let Ok(string) = value.downcast::<String>() {
+ /// println!("String ({}): {}", string.len(), string);
+ /// }
+ /// }
+ ///
+ /// let my_string = "Hello World".to_string();
+ /// print_if_string(Box::new(my_string));
+ /// print_if_string(Box::new(0i8));
+ /// ```
+ pub fn downcast<T: Any>(self) -> Result<Box<T, A>, Self> {
+ if self.is::<T>() {
+ unsafe {
+ let (raw, alloc): (*mut (dyn Any + Send), _) = Box::into_raw_with_allocator(self);
+ Ok(Box::from_raw_in(raw as *mut T, alloc))
+ }
+ } else {
+ Err(self)
+ }
+ }
+}
+
+impl<A: Allocator> Box<dyn Any + Send + Sync, A> {
+ #[inline]
+ #[stable(feature = "box_send_sync_any_downcast", since = "1.51.0")]
+ /// Attempt to downcast the box to a concrete type.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::any::Any;
+ ///
+ /// fn print_if_string(value: Box<dyn Any + Send + Sync>) {
+ /// if let Ok(string) = value.downcast::<String>() {
+ /// println!("String ({}): {}", string.len(), string);
+ /// }
+ /// }
+ ///
+ /// let my_string = "Hello World".to_string();
+ /// print_if_string(Box::new(my_string));
+ /// print_if_string(Box::new(0i8));
+ /// ```
+ pub fn downcast<T: Any>(self) -> Result<Box<T, A>, Self> {
+ if self.is::<T>() {
+ unsafe {
+ let (raw, alloc): (*mut (dyn Any + Send + Sync), _) =
+ Box::into_raw_with_allocator(self);
+ Ok(Box::from_raw_in(raw as *mut T, alloc))
+ }
+ } else {
+ Err(self)
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: fmt::Display + ?Sized, A: Allocator> fmt::Display for Box<T, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(&**self, f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: fmt::Debug + ?Sized, A: Allocator> fmt::Debug for Box<T, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&**self, f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized, A: Allocator> fmt::Pointer for Box<T, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // It's not possible to extract the inner Uniq directly from the Box,
+ // instead we cast it to a *const which aliases the Unique
+ let ptr: *const T = &**self;
+ fmt::Pointer::fmt(&ptr, f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized, A: Allocator> Deref for Box<T, A> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ &**self
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized, A: Allocator> DerefMut for Box<T, A> {
+ fn deref_mut(&mut self) -> &mut T {
+ &mut **self
+ }
+}
+
+#[unstable(feature = "receiver_trait", issue = "none")]
+impl<T: ?Sized, A: Allocator> Receiver for Box<T, A> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I: Iterator + ?Sized, A: Allocator> Iterator for Box<I, A> {
+ type Item = I::Item;
+ fn next(&mut self) -> Option<I::Item> {
+ (**self).next()
+ }
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (**self).size_hint()
+ }
+ fn nth(&mut self, n: usize) -> Option<I::Item> {
+ (**self).nth(n)
+ }
+ fn last(self) -> Option<I::Item> {
+ BoxIter::last(self)
+ }
+}
+
+trait BoxIter {
+ type Item;
+ fn last(self) -> Option<Self::Item>;
+}
+
+impl<I: Iterator + ?Sized, A: Allocator> BoxIter for Box<I, A> {
+ type Item = I::Item;
+ default fn last(self) -> Option<I::Item> {
+ #[inline]
+ fn some<T>(_: Option<T>, x: T) -> Option<T> {
+ Some(x)
+ }
+
+ self.fold(None, some)
+ }
+}
+
+/// Specialization for sized `I`s that uses `I`s implementation of `last()`
+/// instead of the default.
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I: Iterator, A: Allocator> BoxIter for Box<I, A> {
+ fn last(self) -> Option<I::Item> {
+ (*self).last()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I: DoubleEndedIterator + ?Sized, A: Allocator> DoubleEndedIterator for Box<I, A> {
+ fn next_back(&mut self) -> Option<I::Item> {
+ (**self).next_back()
+ }
+ fn nth_back(&mut self, n: usize) -> Option<I::Item> {
+ (**self).nth_back(n)
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I: ExactSizeIterator + ?Sized, A: Allocator> ExactSizeIterator for Box<I, A> {
+ fn len(&self) -> usize {
+ (**self).len()
+ }
+ fn is_empty(&self) -> bool {
+ (**self).is_empty()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<I: FusedIterator + ?Sized, A: Allocator> FusedIterator for Box<I, A> {}
+
+#[stable(feature = "boxed_closure_impls", since = "1.35.0")]
+impl<Args, F: FnOnce<Args> + ?Sized, A: Allocator> FnOnce<Args> for Box<F, A> {
+ type Output = <F as FnOnce<Args>>::Output;
+
+ extern "rust-call" fn call_once(self, args: Args) -> Self::Output {
+ <F as FnOnce<Args>>::call_once(*self, args)
+ }
+}
+
+#[stable(feature = "boxed_closure_impls", since = "1.35.0")]
+impl<Args, F: FnMut<Args> + ?Sized, A: Allocator> FnMut<Args> for Box<F, A> {
+ extern "rust-call" fn call_mut(&mut self, args: Args) -> Self::Output {
+ <F as FnMut<Args>>::call_mut(self, args)
+ }
+}
+
+#[stable(feature = "boxed_closure_impls", since = "1.35.0")]
+impl<Args, F: Fn<Args> + ?Sized, A: Allocator> Fn<Args> for Box<F, A> {
+ extern "rust-call" fn call(&self, args: Args) -> Self::Output {
+ <F as Fn<Args>>::call(self, args)
+ }
+}
+
+#[unstable(feature = "coerce_unsized", issue = "27732")]
+impl<T: ?Sized + Unsize<U>, U: ?Sized, A: Allocator> CoerceUnsized<Box<U, A>> for Box<T, A> {}
+
+#[unstable(feature = "dispatch_from_dyn", issue = "none")]
+impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Box<U>> for Box<T, Global> {}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "boxed_slice_from_iter", since = "1.32.0")]
+impl<I> FromIterator<I> for Box<[I]> {
+ fn from_iter<T: IntoIterator<Item = I>>(iter: T) -> Self {
+ iter.into_iter().collect::<Vec<_>>().into_boxed_slice()
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "box_slice_clone", since = "1.3.0")]
+impl<T: Clone, A: Allocator + Clone> Clone for Box<[T], A> {
+ fn clone(&self) -> Self {
+ let alloc = Box::allocator(self).clone();
+ self.to_vec_in(alloc).into_boxed_slice()
+ }
+
+ fn clone_from(&mut self, other: &Self) {
+ if self.len() == other.len() {
+ self.clone_from_slice(&other);
+ } else {
+ *self = other.clone();
+ }
+ }
+}
+
+#[stable(feature = "box_borrow", since = "1.1.0")]
+impl<T: ?Sized, A: Allocator> borrow::Borrow<T> for Box<T, A> {
+ fn borrow(&self) -> &T {
+ &**self
+ }
+}
+
+#[stable(feature = "box_borrow", since = "1.1.0")]
+impl<T: ?Sized, A: Allocator> borrow::BorrowMut<T> for Box<T, A> {
+ fn borrow_mut(&mut self) -> &mut T {
+ &mut **self
+ }
+}
+
+#[stable(since = "1.5.0", feature = "smart_ptr_as_ref")]
+impl<T: ?Sized, A: Allocator> AsRef<T> for Box<T, A> {
+ fn as_ref(&self) -> &T {
+ &**self
+ }
+}
+
+#[stable(since = "1.5.0", feature = "smart_ptr_as_ref")]
+impl<T: ?Sized, A: Allocator> AsMut<T> for Box<T, A> {
+ fn as_mut(&mut self) -> &mut T {
+ &mut **self
+ }
+}
+
+/* Nota bene
+ *
+ * We could have chosen not to add this impl, and instead have written a
+ * function of Pin<Box<T>> to Pin<T>. Such a function would not be sound,
+ * because Box<T> implements Unpin even when T does not, as a result of
+ * this impl.
+ *
+ * We chose this API instead of the alternative for a few reasons:
+ * - Logically, it is helpful to understand pinning in regard to the
+ * memory region being pointed to. For this reason none of the
+ * standard library pointer types support projecting through a pin
+ * (Box<T> is the only pointer type in std for which this would be
+ * safe.)
+ * - It is in practice very useful to have Box<T> be unconditionally
+ * Unpin because of trait objects, for which the structural auto
+ * trait functionality does not apply (e.g., Box<dyn Foo> would
+ * otherwise not be Unpin).
+ *
+ * Another type with the same semantics as Box but only a conditional
+ * implementation of `Unpin` (where `T: Unpin`) would be valid/safe, and
+ * could have a method to project a Pin<T> from it.
+ */
+#[stable(feature = "pin", since = "1.33.0")]
+impl<T: ?Sized, A: Allocator> Unpin for Box<T, A> where A: 'static {}
+
+#[unstable(feature = "generator_trait", issue = "43122")]
+impl<G: ?Sized + Generator<R> + Unpin, R, A: Allocator> Generator<R> for Box<G, A>
+where
+ A: 'static,
+{
+ type Yield = G::Yield;
+ type Return = G::Return;
+
+ fn resume(mut self: Pin<&mut Self>, arg: R) -> GeneratorState<Self::Yield, Self::Return> {
+ G::resume(Pin::new(&mut *self), arg)
+ }
+}
+
+#[unstable(feature = "generator_trait", issue = "43122")]
+impl<G: ?Sized + Generator<R>, R, A: Allocator> Generator<R> for Pin<Box<G, A>>
+where
+ A: 'static,
+{
+ type Yield = G::Yield;
+ type Return = G::Return;
+
+ fn resume(mut self: Pin<&mut Self>, arg: R) -> GeneratorState<Self::Yield, Self::Return> {
+ G::resume((*self).as_mut(), arg)
+ }
+}
+
+#[stable(feature = "futures_api", since = "1.36.0")]
+impl<F: ?Sized + Future + Unpin, A: Allocator> Future for Box<F, A>
+where
+ A: 'static,
+{
+ type Output = F::Output;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ F::poll(Pin::new(&mut *self), cx)
+ }
+}
+
+#[unstable(feature = "async_stream", issue = "79024")]
+impl<S: ?Sized + Stream + Unpin> Stream for Box<S> {
+ type Item = S::Item;
+
+ fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ Pin::new(&mut **self).poll_next(cx)
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (**self).size_hint()
+ }
+}
diff --git a/rust/alloc/collections/mod.rs b/rust/alloc/collections/mod.rs
new file mode 100644
index 000000000000..2970fe44a21b
--- /dev/null
+++ b/rust/alloc/collections/mod.rs
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+//! Collection types.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+#[cfg(not(no_global_oom_handling))]
+pub mod binary_heap;
+#[cfg(not(no_global_oom_handling))]
+mod btree;
+#[cfg(not(no_global_oom_handling))]
+pub mod linked_list;
+#[cfg(not(no_global_oom_handling))]
+pub mod vec_deque;
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub mod btree_map {
+ //! A map based on a B-Tree.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub use super::btree::map::*;
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub mod btree_set {
+ //! A set based on a B-Tree.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub use super::btree::set::*;
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[doc(no_inline)]
+pub use binary_heap::BinaryHeap;
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[doc(no_inline)]
+pub use btree_map::BTreeMap;
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[doc(no_inline)]
+pub use btree_set::BTreeSet;
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[doc(no_inline)]
+pub use linked_list::LinkedList;
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[doc(no_inline)]
+pub use vec_deque::VecDeque;
+
+use crate::alloc::{Layout, LayoutError};
+use core::fmt::Display;
+
+/// The error type for `try_reserve` methods.
+#[derive(Clone, PartialEq, Eq, Debug)]
+#[unstable(feature = "try_reserve", reason = "new API", issue = "48043")]
+pub enum TryReserveError {
+ /// Error due to the computed capacity exceeding the collection's maximum
+ /// (usually `isize::MAX` bytes).
+ CapacityOverflow,
+
+ /// The memory allocator returned an error
+ AllocError {
+ /// The layout of allocation request that failed
+ layout: Layout,
+
+ #[doc(hidden)]
+ #[unstable(
+ feature = "container_error_extra",
+ issue = "none",
+ reason = "\
+ Enable exposing the allocator’s custom error value \
+ if an associated type is added in the future: \
+ https://github.com/rust-lang/wg-allocators/issues/23"
+ )]
+ non_exhaustive: (),
+ },
+}
+
+#[unstable(feature = "try_reserve", reason = "new API", issue = "48043")]
+impl From<LayoutError> for TryReserveError {
+ #[inline]
+ fn from(_: LayoutError) -> Self {
+ TryReserveError::CapacityOverflow
+ }
+}
+
+#[unstable(feature = "try_reserve", reason = "new API", issue = "48043")]
+impl Display for TryReserveError {
+ fn fmt(
+ &self,
+ fmt: &mut core::fmt::Formatter<'_>,
+ ) -> core::result::Result<(), core::fmt::Error> {
+ fmt.write_str("memory allocation failed")?;
+ let reason = match &self {
+ TryReserveError::CapacityOverflow => {
+ " because the computed capacity exceeded the collection's maximum"
+ }
+ TryReserveError::AllocError { .. } => " because the memory allocator returned a error",
+ };
+ fmt.write_str(reason)
+ }
+}
+
+/// An intermediate trait for specialization of `Extend`.
+#[doc(hidden)]
+trait SpecExtend<I: IntoIterator> {
+ /// Extends `self` with the contents of the given iterator.
+ fn spec_extend(&mut self, iter: I);
+}
diff --git a/rust/alloc/fmt.rs b/rust/alloc/fmt.rs
new file mode 100644
index 000000000000..9c4e0b2f2111
--- /dev/null
+++ b/rust/alloc/fmt.rs
@@ -0,0 +1,587 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+//! Utilities for formatting and printing `String`s.
+//!
+//! This module contains the runtime support for the [`format!`] syntax extension.
+//! This macro is implemented in the compiler to emit calls to this module in
+//! order to format arguments at runtime into strings.
+//!
+//! # Usage
+//!
+//! The [`format!`] macro is intended to be familiar to those coming from C's
+//! `printf`/`fprintf` functions or Python's `str.format` function.
+//!
+//! Some examples of the [`format!`] extension are:
+//!
+//! ```
+//! format!("Hello"); // => "Hello"
+//! format!("Hello, {}!", "world"); // => "Hello, world!"
+//! format!("The number is {}", 1); // => "The number is 1"
+//! format!("{:?}", (3, 4)); // => "(3, 4)"
+//! format!("{value}", value=4); // => "4"
+//! format!("{} {}", 1, 2); // => "1 2"
+//! format!("{:04}", 42); // => "0042" with leading zeros
+//! format!("{:#?}", (100, 200)); // => "(
+//! // 100,
+//! // 200,
+//! // )"
+//! ```
+//!
+//! From these, you can see that the first argument is a format string. It is
+//! required by the compiler for this to be a string literal; it cannot be a
+//! variable passed in (in order to perform validity checking). The compiler
+//! will then parse the format string and determine if the list of arguments
+//! provided is suitable to pass to this format string.
+//!
+//! To convert a single value to a string, use the [`to_string`] method. This
+//! will use the [`Display`] formatting trait.
+//!
+//! ## Positional parameters
+//!
+//! Each formatting argument is allowed to specify which value argument it's
+//! referencing, and if omitted it is assumed to be "the next argument". For
+//! example, the format string `{} {} {}` would take three parameters, and they
+//! would be formatted in the same order as they're given. The format string
+//! `{2} {1} {0}`, however, would format arguments in reverse order.
+//!
+//! Things can get a little tricky once you start intermingling the two types of
+//! positional specifiers. The "next argument" specifier can be thought of as an
+//! iterator over the argument. Each time a "next argument" specifier is seen,
+//! the iterator advances. This leads to behavior like this:
+//!
+//! ```
+//! format!("{1} {} {0} {}", 1, 2); // => "2 1 1 2"
+//! ```
+//!
+//! The internal iterator over the argument has not been advanced by the time
+//! the first `{}` is seen, so it prints the first argument. Then upon reaching
+//! the second `{}`, the iterator has advanced forward to the second argument.
+//! Essentially, parameters that explicitly name their argument do not affect
+//! parameters that do not name an argument in terms of positional specifiers.
+//!
+//! A format string is required to use all of its arguments, otherwise it is a
+//! compile-time error. You may refer to the same argument more than once in the
+//! format string.
+//!
+//! ## Named parameters
+//!
+//! Rust itself does not have a Python-like equivalent of named parameters to a
+//! function, but the [`format!`] macro is a syntax extension that allows it to
+//! leverage named parameters. Named parameters are listed at the end of the
+//! argument list and have the syntax:
+//!
+//! ```text
+//! identifier '=' expression
+//! ```
+//!
+//! For example, the following [`format!`] expressions all use named argument:
+//!
+//! ```
+//! format!("{argument}", argument = "test"); // => "test"
+//! format!("{name} {}", 1, name = 2); // => "2 1"
+//! format!("{a} {c} {b}", a="a", b='b', c=3); // => "a 3 b"
+//! ```
+//!
+//! It is not valid to put positional parameters (those without names) after
+//! arguments that have names. Like with positional parameters, it is not
+//! valid to provide named parameters that are unused by the format string.
+//!
+//! # Formatting Parameters
+//!
+//! Each argument being formatted can be transformed by a number of formatting
+//! parameters (corresponding to `format_spec` in [the syntax](#syntax)). These
+//! parameters affect the string representation of what's being formatted.
+//!
+//! ## Width
+//!
+//! ```
+//! // All of these print "Hello x !"
+//! println!("Hello {:5}!", "x");
+//! println!("Hello {:1$}!", "x", 5);
+//! println!("Hello {1:0$}!", 5, "x");
+//! println!("Hello {:width$}!", "x", width = 5);
+//! ```
+//!
+//! This is a parameter for the "minimum width" that the format should take up.
+//! If the value's string does not fill up this many characters, then the
+//! padding specified by fill/alignment will be used to take up the required
+//! space (see below).
+//!
+//! The value for the width can also be provided as a [`usize`] in the list of
+//! parameters by adding a postfix `$`, indicating that the second argument is
+//! a [`usize`] specifying the width.
+//!
+//! Referring to an argument with the dollar syntax does not affect the "next
+//! argument" counter, so it's usually a good idea to refer to arguments by
+//! position, or use named arguments.
+//!
+//! ## Fill/Alignment
+//!
+//! ```
+//! assert_eq!(format!("Hello {:<5}!", "x"), "Hello x !");
+//! assert_eq!(format!("Hello {:-<5}!", "x"), "Hello x----!");
+//! assert_eq!(format!("Hello {:^5}!", "x"), "Hello x !");
+//! assert_eq!(format!("Hello {:>5}!", "x"), "Hello x!");
+//! ```
+//!
+//! The optional fill character and alignment is provided normally in conjunction with the
+//! [`width`](#width) parameter. It must be defined before `width`, right after the `:`.
+//! This indicates that if the value being formatted is smaller than
+//! `width` some extra characters will be printed around it.
+//! Filling comes in the following variants for different alignments:
+//!
+//! * `[fill]<` - the argument is left-aligned in `width` columns
+//! * `[fill]^` - the argument is center-aligned in `width` columns
+//! * `[fill]>` - the argument is right-aligned in `width` columns
+//!
+//! The default [fill/alignment](#fillalignment) for non-numerics is a space and
+//! left-aligned. The
+//! default for numeric formatters is also a space character but with right-alignment. If
+//! the `0` flag (see below) is specified for numerics, then the implicit fill character is
+//! `0`.
+//!
+//! Note that alignment may not be implemented by some types. In particular, it
+//! is not generally implemented for the `Debug` trait. A good way to ensure
+//! padding is applied is to format your input, then pad this resulting string
+//! to obtain your output:
+//!
+//! ```
+//! println!("Hello {:^15}!", format!("{:?}", Some("hi"))); // => "Hello Some("hi") !"
+//! ```
+//!
+//! ## Sign/`#`/`0`
+//!
+//! ```
+//! assert_eq!(format!("Hello {:+}!", 5), "Hello +5!");
+//! assert_eq!(format!("{:#x}!", 27), "0x1b!");
+//! assert_eq!(format!("Hello {:05}!", 5), "Hello 00005!");
+//! assert_eq!(format!("Hello {:05}!", -5), "Hello -0005!");
+//! assert_eq!(format!("{:#010x}!", 27), "0x0000001b!");
+//! ```
+//!
+//! These are all flags altering the behavior of the formatter.
+//!
+//! * `+` - This is intended for numeric types and indicates that the sign
+//! should always be printed. Positive signs are never printed by
+//! default, and the negative sign is only printed by default for signed values.
+//! This flag indicates that the correct sign (`+` or `-`) should always be printed.
+//! * `-` - Currently not used
+//! * `#` - This flag indicates that the "alternate" form of printing should
+//! be used. The alternate forms are:
+//! * `#?` - pretty-print the [`Debug`] formatting (adds linebreaks and indentation)
+//! * `#x` - precedes the argument with a `0x`
+//! * `#X` - precedes the argument with a `0x`
+//! * `#b` - precedes the argument with a `0b`
+//! * `#o` - precedes the argument with a `0o`
+//! * `0` - This is used to indicate for integer formats that the padding to `width` should
+//! both be done with a `0` character as well as be sign-aware. A format
+//! like `{:08}` would yield `00000001` for the integer `1`, while the
+//! same format would yield `-0000001` for the integer `-1`. Notice that
+//! the negative version has one fewer zero than the positive version.
+//! Note that padding zeros are always placed after the sign (if any)
+//! and before the digits. When used together with the `#` flag, a similar
+//! rule applies: padding zeros are inserted after the prefix but before
+//! the digits. The prefix is included in the total width.
+//!
+//! ## Precision
+//!
+//! For non-numeric types, this can be considered a "maximum width". If the resulting string is
+//! longer than this width, then it is truncated down to this many characters and that truncated
+//! value is emitted with proper `fill`, `alignment` and `width` if those parameters are set.
+//!
+//! For integral types, this is ignored.
+//!
+//! For floating-point types, this indicates how many digits after the decimal point should be
+//! printed.
+//!
+//! There are three possible ways to specify the desired `precision`:
+//!
+//! 1. An integer `.N`:
+//!
+//! the integer `N` itself is the precision.
+//!
+//! 2. An integer or name followed by dollar sign `.N$`:
+//!
+//! use format *argument* `N` (which must be a `usize`) as the precision.
+//!
+//! 3. An asterisk `.*`:
+//!
+//! `.*` means that this `{...}` is associated with *two* format inputs rather than one: the
+//! first input holds the `usize` precision, and the second holds the value to print. Note that
+//! in this case, if one uses the format string `{<arg>:<spec>.*}`, then the `<arg>` part refers
+//! to the *value* to print, and the `precision` must come in the input preceding `<arg>`.
+//!
+//! For example, the following calls all print the same thing `Hello x is 0.01000`:
+//!
+//! ```
+//! // Hello {arg 0 ("x")} is {arg 1 (0.01) with precision specified inline (5)}
+//! println!("Hello {0} is {1:.5}", "x", 0.01);
+//!
+//! // Hello {arg 1 ("x")} is {arg 2 (0.01) with precision specified in arg 0 (5)}
+//! println!("Hello {1} is {2:.0$}", 5, "x", 0.01);
+//!
+//! // Hello {arg 0 ("x")} is {arg 2 (0.01) with precision specified in arg 1 (5)}
+//! println!("Hello {0} is {2:.1$}", "x", 5, 0.01);
+//!
+//! // Hello {next arg ("x")} is {second of next two args (0.01) with precision
+//! // specified in first of next two args (5)}
+//! println!("Hello {} is {:.*}", "x", 5, 0.01);
+//!
+//! // Hello {next arg ("x")} is {arg 2 (0.01) with precision
+//! // specified in its predecessor (5)}
+//! println!("Hello {} is {2:.*}", "x", 5, 0.01);
+//!
+//! // Hello {next arg ("x")} is {arg "number" (0.01) with precision specified
+//! // in arg "prec" (5)}
+//! println!("Hello {} is {number:.prec$}", "x", prec = 5, number = 0.01);
+//! ```
+//!
+//! While these:
+//!
+//! ```
+//! println!("{}, `{name:.*}` has 3 fractional digits", "Hello", 3, name=1234.56);
+//! println!("{}, `{name:.*}` has 3 characters", "Hello", 3, name="1234.56");
+//! println!("{}, `{name:>8.*}` has 3 right-aligned characters", "Hello", 3, name="1234.56");
+//! ```
+//!
+//! print three significantly different things:
+//!
+//! ```text
+//! Hello, `1234.560` has 3 fractional digits
+//! Hello, `123` has 3 characters
+//! Hello, ` 123` has 3 right-aligned characters
+//! ```
+//!
+//! ## Localization
+//!
+//! In some programming languages, the behavior of string formatting functions
+//! depends on the operating system's locale setting. The format functions
+//! provided by Rust's standard library do not have any concept of locale and
+//! will produce the same results on all systems regardless of user
+//! configuration.
+//!
+//! For example, the following code will always print `1.5` even if the system
+//! locale uses a decimal separator other than a dot.
+//!
+//! ```
+//! println!("The value is {}", 1.5);
+//! ```
+//!
+//! # Escaping
+//!
+//! The literal characters `{` and `}` may be included in a string by preceding
+//! them with the same character. For example, the `{` character is escaped with
+//! `{{` and the `}` character is escaped with `}}`.
+//!
+//! ```
+//! assert_eq!(format!("Hello {{}}"), "Hello {}");
+//! assert_eq!(format!("{{ Hello"), "{ Hello");
+//! ```
+//!
+//! # Syntax
+//!
+//! To summarize, here you can find the full grammar of format strings.
+//! The syntax for the formatting language used is drawn from other languages,
+//! so it should not be too alien. Arguments are formatted with Python-like
+//! syntax, meaning that arguments are surrounded by `{}` instead of the C-like
+//! `%`. The actual grammar for the formatting syntax is:
+//!
+//! ```text
+//! format_string := text [ maybe_format text ] *
+//! maybe_format := '{' '{' | '}' '}' | format
+//! format := '{' [ argument ] [ ':' format_spec ] '}'
+//! argument := integer | identifier
+//!
+//! format_spec := [[fill]align][sign]['#']['0'][width]['.' precision]type
+//! fill := character
+//! align := '<' | '^' | '>'
+//! sign := '+' | '-'
+//! width := count
+//! precision := count | '*'
+//! type := '' | '?' | 'x?' | 'X?' | identifier
+//! count := parameter | integer
+//! parameter := argument '$'
+//! ```
+//! In the above grammar, `text` may not contain any `'{'` or `'}'` characters.
+//!
+//! # Formatting traits
+//!
+//! When requesting that an argument be formatted with a particular type, you
+//! are actually requesting that an argument ascribes to a particular trait.
+//! This allows multiple actual types to be formatted via `{:x}` (like [`i8`] as
+//! well as [`isize`]). The current mapping of types to traits is:
+//!
+//! * *nothing* ⇒ [`Display`]
+//! * `?` ⇒ [`Debug`]
+//! * `x?` ⇒ [`Debug`] with lower-case hexadecimal integers
+//! * `X?` ⇒ [`Debug`] with upper-case hexadecimal integers
+//! * `o` ⇒ [`Octal`]
+//! * `x` ⇒ [`LowerHex`]
+//! * `X` ⇒ [`UpperHex`]
+//! * `p` ⇒ [`Pointer`]
+//! * `b` ⇒ [`Binary`]
+//! * `e` ⇒ [`LowerExp`]
+//! * `E` ⇒ [`UpperExp`]
+//!
+//! What this means is that any type of argument which implements the
+//! [`fmt::Binary`][`Binary`] trait can then be formatted with `{:b}`. Implementations
+//! are provided for these traits for a number of primitive types by the
+//! standard library as well. If no format is specified (as in `{}` or `{:6}`),
+//! then the format trait used is the [`Display`] trait.
+//!
+//! When implementing a format trait for your own type, you will have to
+//! implement a method of the signature:
+//!
+//! ```
+//! # #![allow(dead_code)]
+//! # use std::fmt;
+//! # struct Foo; // our custom type
+//! # impl fmt::Display for Foo {
+//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+//! # write!(f, "testing, testing")
+//! # } }
+//! ```
+//!
+//! Your type will be passed as `self` by-reference, and then the function
+//! should emit output into the `f.buf` stream. It is up to each format trait
+//! implementation to correctly adhere to the requested formatting parameters.
+//! The values of these parameters will be listed in the fields of the
+//! [`Formatter`] struct. In order to help with this, the [`Formatter`] struct also
+//! provides some helper methods.
+//!
+//! Additionally, the return value of this function is [`fmt::Result`] which is a
+//! type alias of [`Result`]`<(), `[`std::fmt::Error`]`>`. Formatting implementations
+//! should ensure that they propagate errors from the [`Formatter`] (e.g., when
+//! calling [`write!`]). However, they should never return errors spuriously. That
+//! is, a formatting implementation must and may only return an error if the
+//! passed-in [`Formatter`] returns an error. This is because, contrary to what
+//! the function signature might suggest, string formatting is an infallible
+//! operation. This function only returns a result because writing to the
+//! underlying stream might fail and it must provide a way to propagate the fact
+//! that an error has occurred back up the stack.
+//!
+//! An example of implementing the formatting traits would look
+//! like:
+//!
+//! ```
+//! use std::fmt;
+//!
+//! #[derive(Debug)]
+//! struct Vector2D {
+//! x: isize,
+//! y: isize,
+//! }
+//!
+//! impl fmt::Display for Vector2D {
+//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+//! // The `f` value implements the `Write` trait, which is what the
+//! // write! macro is expecting. Note that this formatting ignores the
+//! // various flags provided to format strings.
+//! write!(f, "({}, {})", self.x, self.y)
+//! }
+//! }
+//!
+//! // Different traits allow different forms of output of a type. The meaning
+//! // of this format is to print the magnitude of a vector.
+//! impl fmt::Binary for Vector2D {
+//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+//! let magnitude = (self.x * self.x + self.y * self.y) as f64;
+//! let magnitude = magnitude.sqrt();
+//!
+//! // Respect the formatting flags by using the helper method
+//! // `pad_integral` on the Formatter object. See the method
+//! // documentation for details, and the function `pad` can be used
+//! // to pad strings.
+//! let decimals = f.precision().unwrap_or(3);
+//! let string = format!("{:.*}", decimals, magnitude);
+//! f.pad_integral(true, "", &string)
+//! }
+//! }
+//!
+//! fn main() {
+//! let myvector = Vector2D { x: 3, y: 4 };
+//!
+//! println!("{}", myvector); // => "(3, 4)"
+//! println!("{:?}", myvector); // => "Vector2D {x: 3, y:4}"
+//! println!("{:10.3b}", myvector); // => " 5.000"
+//! }
+//! ```
+//!
+//! ### `fmt::Display` vs `fmt::Debug`
+//!
+//! These two formatting traits have distinct purposes:
+//!
+//! - [`fmt::Display`][`Display`] implementations assert that the type can be faithfully
+//! represented as a UTF-8 string at all times. It is **not** expected that
+//! all types implement the [`Display`] trait.
+//! - [`fmt::Debug`][`Debug`] implementations should be implemented for **all** public types.
+//! Output will typically represent the internal state as faithfully as possible.
+//! The purpose of the [`Debug`] trait is to facilitate debugging Rust code. In
+//! most cases, using `#[derive(Debug)]` is sufficient and recommended.
+//!
+//! Some examples of the output from both traits:
+//!
+//! ```
+//! assert_eq!(format!("{} {:?}", 3, 4), "3 4");
+//! assert_eq!(format!("{} {:?}", 'a', 'b'), "a 'b'");
+//! assert_eq!(format!("{} {:?}", "foo\n", "bar\n"), "foo\n \"bar\\n\"");
+//! ```
+//!
+//! # Related macros
+//!
+//! There are a number of related macros in the [`format!`] family. The ones that
+//! are currently implemented are:
+//!
+//! ```ignore (only-for-syntax-highlight)
+//! format! // described above
+//! write! // first argument is a &mut io::Write, the destination
+//! writeln! // same as write but appends a newline
+//! print! // the format string is printed to the standard output
+//! println! // same as print but appends a newline
+//! eprint! // the format string is printed to the standard error
+//! eprintln! // same as eprint but appends a newline
+//! format_args! // described below.
+//! ```
+//!
+//! ### `write!`
+//!
+//! This and [`writeln!`] are two macros which are used to emit the format string
+//! to a specified stream. This is used to prevent intermediate allocations of
+//! format strings and instead directly write the output. Under the hood, this
+//! function is actually invoking the [`write_fmt`] function defined on the
+//! [`std::io::Write`] trait. Example usage is:
+//!
+//! ```
+//! # #![allow(unused_must_use)]
+//! use std::io::Write;
+//! let mut w = Vec::new();
+//! write!(&mut w, "Hello {}!", "world");
+//! ```
+//!
+//! ### `print!`
+//!
+//! This and [`println!`] emit their output to stdout. Similarly to the [`write!`]
+//! macro, the goal of these macros is to avoid intermediate allocations when
+//! printing output. Example usage is:
+//!
+//! ```
+//! print!("Hello {}!", "world");
+//! println!("I have a newline {}", "character at the end");
+//! ```
+//! ### `eprint!`
+//!
+//! The [`eprint!`] and [`eprintln!`] macros are identical to
+//! [`print!`] and [`println!`], respectively, except they emit their
+//! output to stderr.
+//!
+//! ### `format_args!`
+//!
+//! This is a curious macro used to safely pass around
+//! an opaque object describing the format string. This object
+//! does not require any heap allocations to create, and it only
+//! references information on the stack. Under the hood, all of
+//! the related macros are implemented in terms of this. First
+//! off, some example usage is:
+//!
+//! ```
+//! # #![allow(unused_must_use)]
+//! use std::fmt;
+//! use std::io::{self, Write};
+//!
+//! let mut some_writer = io::stdout();
+//! write!(&mut some_writer, "{}", format_args!("print with a {}", "macro"));
+//!
+//! fn my_fmt_fn(args: fmt::Arguments) {
+//! write!(&mut io::stdout(), "{}", args);
+//! }
+//! my_fmt_fn(format_args!(", or a {} too", "function"));
+//! ```
+//!
+//! The result of the [`format_args!`] macro is a value of type [`fmt::Arguments`].
+//! This structure can then be passed to the [`write`] and [`format`] functions
+//! inside this module in order to process the format string.
+//! The goal of this macro is to even further prevent intermediate allocations
+//! when dealing with formatting strings.
+//!
+//! For example, a logging library could use the standard formatting syntax, but
+//! it would internally pass around this structure until it has been determined
+//! where output should go to.
+//!
+//! [`fmt::Result`]: Result
+//! [`Result`]: core::result::Result
+//! [`std::fmt::Error`]: Error
+//! [`write!`]: core::write
+//! [`write`]: core::write
+//! [`format!`]: crate::format
+//! [`to_string`]: crate::string::ToString
+//! [`writeln!`]: core::writeln
+//! [`write_fmt`]: ../../std/io/trait.Write.html#method.write_fmt
+//! [`std::io::Write`]: ../../std/io/trait.Write.html
+//! [`print!`]: ../../std/macro.print.html
+//! [`println!`]: ../../std/macro.println.html
+//! [`eprint!`]: ../../std/macro.eprint.html
+//! [`eprintln!`]: ../../std/macro.eprintln.html
+//! [`format_args!`]: core::format_args
+//! [`fmt::Arguments`]: Arguments
+//! [`format`]: crate::format
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+#[unstable(feature = "fmt_internals", issue = "none")]
+pub use core::fmt::rt;
+#[stable(feature = "fmt_flags_align", since = "1.28.0")]
+pub use core::fmt::Alignment;
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::fmt::Error;
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::fmt::{write, ArgumentV1, Arguments};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::fmt::{Binary, Octal};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::fmt::{Debug, Display};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::fmt::{DebugList, DebugMap, DebugSet, DebugStruct, DebugTuple};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::fmt::{Formatter, Result, Write};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::fmt::{LowerExp, UpperExp};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::fmt::{LowerHex, Pointer, UpperHex};
+
+#[cfg(not(no_global_oom_handling))]
+use crate::string;
+
+/// The `format` function takes an [`Arguments`] struct and returns the resulting
+/// formatted string.
+///
+/// The [`Arguments`] instance can be created with the [`format_args!`] macro.
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// use std::fmt;
+///
+/// let s = fmt::format(format_args!("Hello, {}!", "world"));
+/// assert_eq!(s, "Hello, world!");
+/// ```
+///
+/// Please note that using [`format!`] might be preferable.
+/// Example:
+///
+/// ```
+/// let s = format!("Hello, {}!", "world");
+/// assert_eq!(s, "Hello, world!");
+/// ```
+///
+/// [`format_args!`]: core::format_args
+/// [`format!`]: crate::format
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub fn format(args: Arguments<'_>) -> string::String {
+ let capacity = args.estimated_capacity();
+ let mut output = string::String::with_capacity(capacity);
+ output.write_fmt(args).expect("a formatting trait implementation returned an error");
+ output
+}
diff --git a/rust/alloc/lib.rs b/rust/alloc/lib.rs
new file mode 100644
index 000000000000..f109e7902b20
--- /dev/null
+++ b/rust/alloc/lib.rs
@@ -0,0 +1,197 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+//! # The Rust core allocation and collections library
+//!
+//! This library provides smart pointers and collections for managing
+//! heap-allocated values.
+//!
+//! This library, like libcore, normally doesn’t need to be used directly
+//! since its contents are re-exported in the [`std` crate](../std/index.html).
+//! Crates that use the `#![no_std]` attribute however will typically
+//! not depend on `std`, so they’d use this crate instead.
+//!
+//! ## Boxed values
+//!
+//! The [`Box`] type is a smart pointer type. There can only be one owner of a
+//! [`Box`], and the owner can decide to mutate the contents, which live on the
+//! heap.
+//!
+//! This type can be sent among threads efficiently as the size of a `Box` value
+//! is the same as that of a pointer. Tree-like data structures are often built
+//! with boxes because each node often has only one owner, the parent.
+//!
+//! ## Reference counted pointers
+//!
+//! The [`Rc`] type is a non-threadsafe reference-counted pointer type intended
+//! for sharing memory within a thread. An [`Rc`] pointer wraps a type, `T`, and
+//! only allows access to `&T`, a shared reference.
+//!
+//! This type is useful when inherited mutability (such as using [`Box`]) is too
+//! constraining for an application, and is often paired with the [`Cell`] or
+//! [`RefCell`] types in order to allow mutation.
+//!
+//! ## Atomically reference counted pointers
+//!
+//! The [`Arc`] type is the threadsafe equivalent of the [`Rc`] type. It
+//! provides all the same functionality of [`Rc`], except it requires that the
+//! contained type `T` is shareable. Additionally, [`Arc<T>`][`Arc`] is itself
+//! sendable while [`Rc<T>`][`Rc`] is not.
+//!
+//! This type allows for shared access to the contained data, and is often
+//! paired with synchronization primitives such as mutexes to allow mutation of
+//! shared resources.
+//!
+//! ## Collections
+//!
+//! Implementations of the most common general purpose data structures are
+//! defined in this library. They are re-exported through the
+//! [standard collections library](../std/collections/index.html).
+//!
+//! ## Heap interfaces
+//!
+//! The [`alloc`](alloc/index.html) module defines the low-level interface to the
+//! default global allocator. It is not compatible with the libc allocator API.
+//!
+//! [`Arc`]: sync
+//! [`Box`]: boxed
+//! [`Cell`]: core::cell
+//! [`Rc`]: rc
+//! [`RefCell`]: core::cell
+
+#![allow(unused_attributes)]
+#![stable(feature = "alloc", since = "1.36.0")]
+#![doc(
+ html_playground_url = "https://play.rust-lang.org/",
+ issue_tracker_base_url = "https://github.com/rust-lang/rust/issues/",
+ test(no_crate_inject, attr(allow(unused_variables), deny(warnings)))
+)]
+#![no_std]
+#![needs_allocator]
+#![warn(deprecated_in_future)]
+#![warn(missing_docs)]
+#![warn(missing_debug_implementations)]
+#![allow(explicit_outlives_requirements)]
+#![deny(unsafe_op_in_unsafe_fn)]
+#![feature(rustc_allow_const_fn_unstable)]
+#![cfg_attr(not(test), feature(generator_trait))]
+#![cfg_attr(test, feature(test))]
+#![cfg_attr(test, feature(new_uninit))]
+#![feature(allocator_api)]
+#![feature(array_chunks)]
+#![feature(array_methods)]
+#![feature(array_windows)]
+#![feature(allow_internal_unstable)]
+#![feature(arbitrary_self_types)]
+#![feature(async_stream)]
+#![feature(box_patterns)]
+#![feature(box_syntax)]
+#![feature(cfg_sanitize)]
+#![feature(cfg_target_has_atomic)]
+#![feature(coerce_unsized)]
+#![cfg_attr(not(no_global_oom_handling), feature(const_btree_new))]
+#![feature(const_fn_trait_bound)]
+#![feature(cow_is_borrowed)]
+#![feature(const_cow_is_borrowed)]
+#![feature(destructuring_assignment)]
+#![feature(dispatch_from_dyn)]
+#![feature(core_intrinsics)]
+#![feature(dropck_eyepatch)]
+#![feature(exact_size_is_empty)]
+#![feature(exclusive_range_pattern)]
+#![feature(extend_one)]
+#![feature(fmt_internals)]
+#![feature(fn_traits)]
+#![feature(fundamental)]
+#![feature(inplace_iteration)]
+// Technically, this is a bug in rustdoc: rustdoc sees the documentation on `#[lang = slice_alloc]`
+// blocks is for `&[T]`, which also has documentation using this feature in `core`, and gets mad
+// that the feature-gate isn't enabled. Ideally, it wouldn't check for the feature gate for docs
+// from other crates, but since this can only appear for lang items, it doesn't seem worth fixing.
+#![feature(intra_doc_pointers)]
+#![feature(iter_zip)]
+#![feature(lang_items)]
+#![feature(layout_for_ptr)]
+#![feature(maybe_uninit_ref)]
+#![feature(negative_impls)]
+#![feature(never_type)]
+#![feature(nll)]
+#![feature(nonnull_slice_from_raw_parts)]
+#![feature(auto_traits)]
+#![feature(option_result_unwrap_unchecked)]
+#![feature(pattern)]
+#![feature(ptr_internals)]
+#![feature(rustc_attrs)]
+#![feature(receiver_trait)]
+#![feature(min_specialization)]
+#![feature(set_ptr_value)]
+#![feature(slice_ptr_get)]
+#![feature(slice_ptr_len)]
+#![feature(slice_range)]
+#![feature(staged_api)]
+#![feature(str_internals)]
+#![feature(trusted_len)]
+#![feature(unboxed_closures)]
+#![feature(unicode_internals)]
+#![feature(unsize)]
+#![feature(unsized_fn_params)]
+#![feature(allocator_internals)]
+#![feature(slice_partition_dedup)]
+#![feature(maybe_uninit_extra, maybe_uninit_slice, maybe_uninit_uninit_array)]
+#![feature(alloc_layout_extra)]
+#![feature(trusted_random_access)]
+#![cfg_attr(bootstrap, feature(try_trait))]
+#![cfg_attr(not(bootstrap), feature(try_trait_v2))]
+#![feature(min_type_alias_impl_trait)]
+#![feature(associated_type_bounds)]
+#![feature(slice_group_by)]
+#![feature(decl_macro)]
+#![feature(bindings_after_at)]
+// Allow testing this library
+
+#[cfg(test)]
+#[macro_use]
+extern crate std;
+#[cfg(test)]
+extern crate test;
+
+// Module with internal macros used by other modules (needs to be included before other modules).
+#[macro_use]
+mod macros;
+
+// Heaps provided for low-level allocation strategies
+
+pub mod alloc;
+
+// Primitive types using the heaps above
+
+// Need to conditionally define the mod from `boxed.rs` to avoid
+// duplicating the lang-items when building in test cfg; but also need
+// to allow code to have `use boxed::Box;` declarations.
+#[cfg(not(test))]
+pub mod boxed;
+#[cfg(test)]
+mod boxed {
+ pub use std::boxed::Box;
+}
+pub mod borrow;
+pub mod collections;
+pub mod fmt;
+pub mod prelude;
+pub mod raw_vec;
+pub mod rc;
+pub mod slice;
+pub mod str;
+pub mod string;
+#[cfg(target_has_atomic = "ptr")]
+pub mod sync;
+#[cfg(all(not(no_global_oom_handling), target_has_atomic = "ptr"))]
+pub mod task;
+#[cfg(test)]
+mod tests;
+pub mod vec;
+
+#[doc(hidden)]
+#[unstable(feature = "liballoc_internals", issue = "none", reason = "implementation detail")]
+pub mod __export {
+ pub use core::format_args;
+}
diff --git a/rust/alloc/macros.rs b/rust/alloc/macros.rs
new file mode 100644
index 000000000000..1dea4ec36c3e
--- /dev/null
+++ b/rust/alloc/macros.rs
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+/// Creates a [`Vec`] containing the arguments.
+///
+/// `vec!` allows `Vec`s to be defined with the same syntax as array expressions.
+/// There are two forms of this macro:
+///
+/// - Create a [`Vec`] containing a given list of elements:
+///
+/// ```
+/// let v = vec![1, 2, 3];
+/// assert_eq!(v[0], 1);
+/// assert_eq!(v[1], 2);
+/// assert_eq!(v[2], 3);
+/// ```
+///
+/// - Create a [`Vec`] from a given element and size:
+///
+/// ```
+/// let v = vec![1; 3];
+/// assert_eq!(v, [1, 1, 1]);
+/// ```
+///
+/// Note that unlike array expressions this syntax supports all elements
+/// which implement [`Clone`] and the number of elements doesn't have to be
+/// a constant.
+///
+/// This will use `clone` to duplicate an expression, so one should be careful
+/// using this with types having a nonstandard `Clone` implementation. For
+/// example, `vec![Rc::new(1); 5]` will create a vector of five references
+/// to the same boxed integer value, not five references pointing to independently
+/// boxed integers.
+///
+/// Also, note that `vec![expr; 0]` is allowed, and produces an empty vector.
+/// This will still evaluate `expr`, however, and immediately drop the resulting value, so
+/// be mindful of side effects.
+///
+/// [`Vec`]: crate::vec::Vec
+#[cfg(not(test))]
+#[doc(alias = "alloc")]
+#[doc(alias = "malloc")]
+#[macro_export]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow_internal_unstable(box_syntax, liballoc_internals)]
+macro_rules! vec {
+ () => (
+ $crate::__rust_force_expr!($crate::vec::Vec::new())
+ );
+ ($elem:expr; $n:expr) => (
+ $crate::__rust_force_expr!($crate::vec::from_elem($elem, $n))
+ );
+ ($($x:expr),+ $(,)?) => (
+ $crate::__rust_force_expr!(<[_]>::into_vec(box [$($x),+]))
+ );
+}
+
+// HACK(japaric): with cfg(test) the inherent `[T]::into_vec` method, which is
+// required for this macro definition, is not available. Instead use the
+// `slice::into_vec` function which is only available with cfg(test)
+// NB see the slice::hack module in slice.rs for more information
+#[cfg(test)]
+macro_rules! vec {
+ () => (
+ $crate::vec::Vec::new()
+ );
+ ($elem:expr; $n:expr) => (
+ $crate::vec::from_elem($elem, $n)
+ );
+ ($($x:expr),*) => (
+ $crate::slice::into_vec(box [$($x),*])
+ );
+ ($($x:expr,)*) => (vec![$($x),*])
+}
+
+/// Creates a `String` using interpolation of runtime expressions.
+///
+/// The first argument `format!` receives is a format string. This must be a string
+/// literal. The power of the formatting string is in the `{}`s contained.
+///
+/// Additional parameters passed to `format!` replace the `{}`s within the
+/// formatting string in the order given unless named or positional parameters
+/// are used; see [`std::fmt`] for more information.
+///
+/// A common use for `format!` is concatenation and interpolation of strings.
+/// The same convention is used with [`print!`] and [`write!`] macros,
+/// depending on the intended destination of the string.
+///
+/// To convert a single value to a string, use the [`to_string`] method. This
+/// will use the [`Display`] formatting trait.
+///
+/// [`std::fmt`]: ../std/fmt/index.html
+/// [`print!`]: ../std/macro.print.html
+/// [`write!`]: core::write
+/// [`to_string`]: crate::string::ToString
+/// [`Display`]: core::fmt::Display
+///
+/// # Panics
+///
+/// `format!` panics if a formatting trait implementation returns an error.
+/// This indicates an incorrect implementation
+/// since `fmt::Write for String` never returns an error itself.
+///
+/// # Examples
+///
+/// ```
+/// format!("test");
+/// format!("hello {}", "world!");
+/// format!("x = {}, y = {y}", 10, y = 30);
+/// ```
+#[macro_export]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "format_macro")]
+macro_rules! format {
+ ($($arg:tt)*) => {{
+ let res = $crate::fmt::format($crate::__export::format_args!($($arg)*));
+ res
+ }}
+}
+
+/// Force AST node to an expression to improve diagnostics in pattern position.
+#[doc(hidden)]
+#[macro_export]
+#[unstable(feature = "liballoc_internals", issue = "none", reason = "implementation detail")]
+macro_rules! __rust_force_expr {
+ ($e:expr) => {
+ $e
+ };
+}
diff --git a/rust/alloc/prelude/mod.rs b/rust/alloc/prelude/mod.rs
new file mode 100644
index 000000000000..a64a1843760e
--- /dev/null
+++ b/rust/alloc/prelude/mod.rs
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+//! The alloc Prelude
+//!
+//! The purpose of this module is to alleviate imports of commonly-used
+//! items of the `alloc` crate by adding a glob import to the top of modules:
+//!
+//! ```
+//! # #![allow(unused_imports)]
+//! #![feature(alloc_prelude)]
+//! extern crate alloc;
+//! use alloc::prelude::v1::*;
+//! ```
+
+#![unstable(feature = "alloc_prelude", issue = "58935")]
+
+pub mod v1;
diff --git a/rust/alloc/prelude/v1.rs b/rust/alloc/prelude/v1.rs
new file mode 100644
index 000000000000..48d75431c0d1
--- /dev/null
+++ b/rust/alloc/prelude/v1.rs
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+//! The first version of the prelude of `alloc` crate.
+//!
+//! See the [module-level documentation](../index.html) for more.
+
+#![unstable(feature = "alloc_prelude", issue = "58935")]
+
+#[unstable(feature = "alloc_prelude", issue = "58935")]
+pub use crate::borrow::ToOwned;
+#[unstable(feature = "alloc_prelude", issue = "58935")]
+pub use crate::boxed::Box;
+#[unstable(feature = "alloc_prelude", issue = "58935")]
+pub use crate::string::{String, ToString};
+#[unstable(feature = "alloc_prelude", issue = "58935")]
+pub use crate::vec::Vec;
diff --git a/rust/alloc/raw_vec.rs b/rust/alloc/raw_vec.rs
new file mode 100644
index 000000000000..629dbd3927d1
--- /dev/null
+++ b/rust/alloc/raw_vec.rs
@@ -0,0 +1,612 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+#![unstable(feature = "raw_vec_internals", reason = "implementation detail", issue = "none")]
+#![doc(hidden)]
+
+use core::alloc::LayoutError;
+use core::cmp;
+use core::intrinsics;
+use core::mem::{self, ManuallyDrop, MaybeUninit};
+use core::ops::Drop;
+use core::ptr::{self, NonNull, Unique};
+use core::slice;
+
+#[cfg(not(no_global_oom_handling))]
+use crate::alloc::handle_alloc_error;
+use crate::alloc::{Allocator, Global, Layout};
+use crate::boxed::Box;
+use crate::collections::TryReserveError::{self, *};
+
+#[cfg(test)]
+mod tests;
+
+#[allow(dead_code)]
+enum AllocInit {
+ /// The contents of the new memory are uninitialized.
+ Uninitialized,
+ /// The new memory is guaranteed to be zeroed.
+ Zeroed,
+}
+
+/// A low-level utility for more ergonomically allocating, reallocating, and deallocating
+/// a buffer of memory on the heap without having to worry about all the corner cases
+/// involved. This type is excellent for building your own data structures like Vec and VecDeque.
+/// In particular:
+///
+/// * Produces `Unique::dangling()` on zero-sized types.
+/// * Produces `Unique::dangling()` on zero-length allocations.
+/// * Avoids freeing `Unique::dangling()`.
+/// * Catches all overflows in capacity computations (promotes them to "capacity overflow" panics).
+/// * Guards against 32-bit systems allocating more than isize::MAX bytes.
+/// * Guards against overflowing your length.
+/// * Calls `handle_alloc_error` for fallible allocations.
+/// * Contains a `ptr::Unique` and thus endows the user with all related benefits.
+/// * Uses the excess returned from the allocator to use the largest available capacity.
+///
+/// This type does not in anyway inspect the memory that it manages. When dropped it *will*
+/// free its memory, but it *won't* try to drop its contents. It is up to the user of `RawVec`
+/// to handle the actual things *stored* inside of a `RawVec`.
+///
+/// Note that the excess of a zero-sized types is always infinite, so `capacity()` always returns
+/// `usize::MAX`. This means that you need to be careful when round-tripping this type with a
+/// `Box<[T]>`, since `capacity()` won't yield the length.
+#[allow(missing_debug_implementations)]
+pub struct RawVec<T, A: Allocator = Global> {
+ ptr: Unique<T>,
+ cap: usize,
+ alloc: A,
+}
+
+impl<T> RawVec<T, Global> {
+ /// HACK(Centril): This exists because stable `const fn` can only call stable `const fn`, so
+ /// they cannot call `Self::new()`.
+ ///
+ /// If you change `RawVec<T>::new` or dependencies, please take care to not introduce anything
+ /// that would truly const-call something unstable.
+ pub const NEW: Self = Self::new();
+
+ /// Creates the biggest possible `RawVec` (on the system heap)
+ /// without allocating. If `T` has positive size, then this makes a
+ /// `RawVec` with capacity `0`. If `T` is zero-sized, then it makes a
+ /// `RawVec` with capacity `usize::MAX`. Useful for implementing
+ /// delayed allocation.
+ pub const fn new() -> Self {
+ Self::new_in(Global)
+ }
+
+ /// Creates a `RawVec` (on the system heap) with exactly the
+ /// capacity and alignment requirements for a `[T; capacity]`. This is
+ /// equivalent to calling `RawVec::new` when `capacity` is `0` or `T` is
+ /// zero-sized. Note that if `T` is zero-sized this means you will
+ /// *not* get a `RawVec` with the requested capacity.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the requested capacity exceeds `isize::MAX` bytes.
+ ///
+ /// # Aborts
+ ///
+ /// Aborts on OOM.
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ pub fn with_capacity(capacity: usize) -> Self {
+ Self::with_capacity_in(capacity, Global)
+ }
+
+ /// Tries to create a `RawVec` (on the system heap) with exactly the
+ /// capacity and alignment requirements for a `[T; capacity]`. This is
+ /// equivalent to calling `RawVec::new` when `capacity` is `0` or `T` is
+ /// zero-sized. Note that if `T` is zero-sized this means you will
+ /// *not* get a `RawVec` with the requested capacity.
+ #[inline]
+ pub fn try_with_capacity(capacity: usize) -> Result<Self, TryReserveError> {
+ Self::try_with_capacity_in(capacity, Global)
+ }
+
+ /// Like `with_capacity`, but guarantees the buffer is zeroed.
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ pub fn with_capacity_zeroed(capacity: usize) -> Self {
+ Self::with_capacity_zeroed_in(capacity, Global)
+ }
+
+ /// Reconstitutes a `RawVec` from a pointer and capacity.
+ ///
+ /// # Safety
+ ///
+ /// The `ptr` must be allocated (on the system heap), and with the given `capacity`.
+ /// The `capacity` cannot exceed `isize::MAX` for sized types. (only a concern on 32-bit
+ /// systems). ZST vectors may have a capacity up to `usize::MAX`.
+ /// If the `ptr` and `capacity` come from a `RawVec`, then this is guaranteed.
+ #[inline]
+ pub unsafe fn from_raw_parts(ptr: *mut T, capacity: usize) -> Self {
+ unsafe { Self::from_raw_parts_in(ptr, capacity, Global) }
+ }
+}
+
+impl<T, A: Allocator> RawVec<T, A> {
+ // Tiny Vecs are dumb. Skip to:
+ // - 8 if the element size is 1, because any heap allocators is likely
+ // to round up a request of less than 8 bytes to at least 8 bytes.
+ // - 4 if elements are moderate-sized (<= 1 KiB).
+ // - 1 otherwise, to avoid wasting too much space for very short Vecs.
+ const MIN_NON_ZERO_CAP: usize = if mem::size_of::<T>() == 1 {
+ 8
+ } else if mem::size_of::<T>() <= 1024 {
+ 4
+ } else {
+ 1
+ };
+
+ /// Like `new`, but parameterized over the choice of allocator for
+ /// the returned `RawVec`.
+ #[rustc_allow_const_fn_unstable(const_fn)]
+ pub const fn new_in(alloc: A) -> Self {
+ // `cap: 0` means "unallocated". zero-sized types are ignored.
+ Self { ptr: Unique::dangling(), cap: 0, alloc }
+ }
+
+ /// Like `with_capacity`, but parameterized over the choice of
+ /// allocator for the returned `RawVec`.
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ pub fn with_capacity_in(capacity: usize, alloc: A) -> Self {
+ Self::allocate_in(capacity, AllocInit::Uninitialized, alloc)
+ }
+
+ /// Like `try_with_capacity`, but parameterized over the choice of
+ /// allocator for the returned `RawVec`.
+ #[inline]
+ pub fn try_with_capacity_in(capacity: usize, alloc: A) -> Result<Self, TryReserveError> {
+ Self::try_allocate_in(capacity, AllocInit::Uninitialized, alloc)
+ }
+
+ /// Like `with_capacity_zeroed`, but parameterized over the choice
+ /// of allocator for the returned `RawVec`.
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ pub fn with_capacity_zeroed_in(capacity: usize, alloc: A) -> Self {
+ Self::allocate_in(capacity, AllocInit::Zeroed, alloc)
+ }
+
+ /// Converts a `Box<[T]>` into a `RawVec<T>`.
+ pub fn from_box(slice: Box<[T], A>) -> Self {
+ unsafe {
+ let (slice, alloc) = Box::into_raw_with_allocator(slice);
+ RawVec::from_raw_parts_in(slice.as_mut_ptr(), slice.len(), alloc)
+ }
+ }
+
+ /// Converts the entire buffer into `Box<[MaybeUninit<T>]>` with the specified `len`.
+ ///
+ /// Note that this will correctly reconstitute any `cap` changes
+ /// that may have been performed. (See description of type for details.)
+ ///
+ /// # Safety
+ ///
+ /// * `len` must be greater than or equal to the most recently requested capacity, and
+ /// * `len` must be less than or equal to `self.capacity()`.
+ ///
+ /// Note, that the requested capacity and `self.capacity()` could differ, as
+ /// an allocator could overallocate and return a greater memory block than requested.
+ pub unsafe fn into_box(self, len: usize) -> Box<[MaybeUninit<T>], A> {
+ // Sanity-check one half of the safety requirement (we cannot check the other half).
+ debug_assert!(
+ len <= self.capacity(),
+ "`len` must be smaller than or equal to `self.capacity()`"
+ );
+
+ let me = ManuallyDrop::new(self);
+ unsafe {
+ let slice = slice::from_raw_parts_mut(me.ptr() as *mut MaybeUninit<T>, len);
+ Box::from_raw_in(slice, ptr::read(&me.alloc))
+ }
+ }
+
+ #[cfg(not(no_global_oom_handling))]
+ fn allocate_in(capacity: usize, init: AllocInit, alloc: A) -> Self {
+ if mem::size_of::<T>() == 0 {
+ Self::new_in(alloc)
+ } else {
+ // We avoid `unwrap_or_else` here because it bloats the amount of
+ // LLVM IR generated.
+ let layout = match Layout::array::<T>(capacity) {
+ Ok(layout) => layout,
+ Err(_) => capacity_overflow(),
+ };
+ match alloc_guard(layout.size()) {
+ Ok(_) => {}
+ Err(_) => capacity_overflow(),
+ }
+ let result = match init {
+ AllocInit::Uninitialized => alloc.allocate(layout),
+ AllocInit::Zeroed => alloc.allocate_zeroed(layout),
+ };
+ let ptr = match result {
+ Ok(ptr) => ptr,
+ Err(_) => handle_alloc_error(layout),
+ };
+
+ Self {
+ ptr: unsafe { Unique::new_unchecked(ptr.cast().as_ptr()) },
+ cap: Self::capacity_from_bytes(ptr.len()),
+ alloc,
+ }
+ }
+ }
+
+ fn try_allocate_in(capacity: usize, init: AllocInit, alloc: A) -> Result<Self, TryReserveError> {
+ if mem::size_of::<T>() == 0 {
+ return Ok(Self::new_in(alloc));
+ }
+
+ let layout = Layout::array::<T>(capacity)?;
+ alloc_guard(layout.size())?;
+ let result = match init {
+ AllocInit::Uninitialized => alloc.allocate(layout),
+ AllocInit::Zeroed => alloc.allocate_zeroed(layout),
+ };
+ let ptr = match result {
+ Ok(ptr) => ptr,
+ Err(_) => return Err(TryReserveError::AllocError { layout, non_exhaustive: () }),
+ };
+
+ Ok(Self {
+ ptr: unsafe { Unique::new_unchecked(ptr.cast().as_ptr()) },
+ cap: Self::capacity_from_bytes(ptr.len()),
+ alloc,
+ })
+ }
+
+ /// Reconstitutes a `RawVec` from a pointer, capacity, and allocator.
+ ///
+ /// # Safety
+ ///
+ /// The `ptr` must be allocated (via the given allocator `alloc`), and with the given
+ /// `capacity`.
+ /// The `capacity` cannot exceed `isize::MAX` for sized types. (only a concern on 32-bit
+ /// systems). ZST vectors may have a capacity up to `usize::MAX`.
+ /// If the `ptr` and `capacity` come from a `RawVec` created via `alloc`, then this is
+ /// guaranteed.
+ #[inline]
+ pub unsafe fn from_raw_parts_in(ptr: *mut T, capacity: usize, alloc: A) -> Self {
+ Self { ptr: unsafe { Unique::new_unchecked(ptr) }, cap: capacity, alloc }
+ }
+
+ /// Gets a raw pointer to the start of the allocation. Note that this is
+ /// `Unique::dangling()` if `capacity == 0` or `T` is zero-sized. In the former case, you must
+ /// be careful.
+ #[inline]
+ pub fn ptr(&self) -> *mut T {
+ self.ptr.as_ptr()
+ }
+
+ /// Gets the capacity of the allocation.
+ ///
+ /// This will always be `usize::MAX` if `T` is zero-sized.
+ #[inline(always)]
+ pub fn capacity(&self) -> usize {
+ if mem::size_of::<T>() == 0 { usize::MAX } else { self.cap }
+ }
+
+ /// Returns a shared reference to the allocator backing this `RawVec`.
+ pub fn allocator(&self) -> &A {
+ &self.alloc
+ }
+
+ fn current_memory(&self) -> Option<(NonNull<u8>, Layout)> {
+ if mem::size_of::<T>() == 0 || self.cap == 0 {
+ None
+ } else {
+ // We have an allocated chunk of memory, so we can bypass runtime
+ // checks to get our current layout.
+ unsafe {
+ let align = mem::align_of::<T>();
+ let size = mem::size_of::<T>() * self.cap;
+ let layout = Layout::from_size_align_unchecked(size, align);
+ Some((self.ptr.cast().into(), layout))
+ }
+ }
+ }
+
+ /// Ensures that the buffer contains at least enough space to hold `len +
+ /// additional` elements. If it doesn't already have enough capacity, will
+ /// reallocate enough space plus comfortable slack space to get amortized
+ /// *O*(1) behavior. Will limit this behavior if it would needlessly cause
+ /// itself to panic.
+ ///
+ /// If `len` exceeds `self.capacity()`, this may fail to actually allocate
+ /// the requested space. This is not really unsafe, but the unsafe
+ /// code *you* write that relies on the behavior of this function may break.
+ ///
+ /// This is ideal for implementing a bulk-push operation like `extend`.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new capacity exceeds `isize::MAX` bytes.
+ ///
+ /// # Aborts
+ ///
+ /// Aborts on OOM.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #![feature(raw_vec_internals)]
+ /// # extern crate alloc;
+ /// # use std::ptr;
+ /// # use alloc::raw_vec::RawVec;
+ /// struct MyVec<T> {
+ /// buf: RawVec<T>,
+ /// len: usize,
+ /// }
+ ///
+ /// impl<T: Clone> MyVec<T> {
+ /// pub fn push_all(&mut self, elems: &[T]) {
+ /// self.buf.reserve(self.len, elems.len());
+ /// // reserve would have aborted or panicked if the len exceeded
+ /// // `isize::MAX` so this is safe to do unchecked now.
+ /// for x in elems {
+ /// unsafe {
+ /// ptr::write(self.buf.ptr().add(self.len), x.clone());
+ /// }
+ /// self.len += 1;
+ /// }
+ /// }
+ /// }
+ /// # fn main() {
+ /// # let mut vector = MyVec { buf: RawVec::new(), len: 0 };
+ /// # vector.push_all(&[1, 3, 5, 7, 9]);
+ /// # }
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ pub fn reserve(&mut self, len: usize, additional: usize) {
+ // Callers expect this function to be very cheap when there is already sufficient capacity.
+ // Therefore, we move all the resizing and error-handling logic from grow_amortized and
+ // handle_reserve behind a call, while making sure that the this function is likely to be
+ // inlined as just a comparison and a call if the comparison fails.
+ #[cold]
+ fn do_reserve_and_handle<T, A: Allocator>(
+ slf: &mut RawVec<T, A>,
+ len: usize,
+ additional: usize,
+ ) {
+ handle_reserve(slf.grow_amortized(len, additional));
+ }
+
+ if self.needs_to_grow(len, additional) {
+ do_reserve_and_handle(self, len, additional);
+ }
+ }
+
+ /// The same as `reserve`, but returns on errors instead of panicking or aborting.
+ pub fn try_reserve(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
+ if self.needs_to_grow(len, additional) {
+ self.grow_amortized(len, additional)
+ } else {
+ Ok(())
+ }
+ }
+
+ /// Ensures that the buffer contains at least enough space to hold `len +
+ /// additional` elements. If it doesn't already, will reallocate the
+ /// minimum possible amount of memory necessary. Generally this will be
+ /// exactly the amount of memory necessary, but in principle the allocator
+ /// is free to give back more than we asked for.
+ ///
+ /// If `len` exceeds `self.capacity()`, this may fail to actually allocate
+ /// the requested space. This is not really unsafe, but the unsafe code
+ /// *you* write that relies on the behavior of this function may break.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new capacity exceeds `isize::MAX` bytes.
+ ///
+ /// # Aborts
+ ///
+ /// Aborts on OOM.
+ #[cfg(not(no_global_oom_handling))]
+ pub fn reserve_exact(&mut self, len: usize, additional: usize) {
+ handle_reserve(self.try_reserve_exact(len, additional));
+ }
+
+ /// The same as `reserve_exact`, but returns on errors instead of panicking or aborting.
+ pub fn try_reserve_exact(
+ &mut self,
+ len: usize,
+ additional: usize,
+ ) -> Result<(), TryReserveError> {
+ if self.needs_to_grow(len, additional) { self.grow_exact(len, additional) } else { Ok(()) }
+ }
+
+ /// Shrinks the allocation down to the specified amount. If the given amount
+ /// is 0, actually completely deallocates.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the given amount is *larger* than the current capacity.
+ ///
+ /// # Aborts
+ ///
+ /// Aborts on OOM.
+ #[cfg(not(no_global_oom_handling))]
+ pub fn shrink_to_fit(&mut self, amount: usize) {
+ handle_reserve(self.shrink(amount));
+ }
+
+ /// Tries to shrink the allocation down to the specified amount. If the given amount
+ /// is 0, actually completely deallocates.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the given amount is *larger* than the current capacity.
+ pub fn try_shrink_to_fit(&mut self, amount: usize) -> Result<(), TryReserveError> {
+ self.shrink(amount)
+ }
+}
+
+impl<T, A: Allocator> RawVec<T, A> {
+ /// Returns if the buffer needs to grow to fulfill the needed extra capacity.
+ /// Mainly used to make inlining reserve-calls possible without inlining `grow`.
+ fn needs_to_grow(&self, len: usize, additional: usize) -> bool {
+ additional > self.capacity().wrapping_sub(len)
+ }
+
+ fn capacity_from_bytes(excess: usize) -> usize {
+ debug_assert_ne!(mem::size_of::<T>(), 0);
+ excess / mem::size_of::<T>()
+ }
+
+ fn set_ptr(&mut self, ptr: NonNull<[u8]>) {
+ self.ptr = unsafe { Unique::new_unchecked(ptr.cast().as_ptr()) };
+ self.cap = Self::capacity_from_bytes(ptr.len());
+ }
+
+ // This method is usually instantiated many times. So we want it to be as
+ // small as possible, to improve compile times. But we also want as much of
+ // its contents to be statically computable as possible, to make the
+ // generated code run faster. Therefore, this method is carefully written
+ // so that all of the code that depends on `T` is within it, while as much
+ // of the code that doesn't depend on `T` as possible is in functions that
+ // are non-generic over `T`.
+ fn grow_amortized(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
+ // This is ensured by the calling contexts.
+ debug_assert!(additional > 0);
+
+ if mem::size_of::<T>() == 0 {
+ // Since we return a capacity of `usize::MAX` when `elem_size` is
+ // 0, getting to here necessarily means the `RawVec` is overfull.
+ return Err(CapacityOverflow);
+ }
+
+ // Nothing we can really do about these checks, sadly.
+ let required_cap = len.checked_add(additional).ok_or(CapacityOverflow)?;
+
+ // This guarantees exponential growth. The doubling cannot overflow
+ // because `cap <= isize::MAX` and the type of `cap` is `usize`.
+ let cap = cmp::max(self.cap * 2, required_cap);
+ let cap = cmp::max(Self::MIN_NON_ZERO_CAP, cap);
+
+ let new_layout = Layout::array::<T>(cap);
+
+ // `finish_grow` is non-generic over `T`.
+ let ptr = finish_grow(new_layout, self.current_memory(), &mut self.alloc)?;
+ self.set_ptr(ptr);
+ Ok(())
+ }
+
+ // The constraints on this method are much the same as those on
+ // `grow_amortized`, but this method is usually instantiated less often so
+ // it's less critical.
+ fn grow_exact(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
+ if mem::size_of::<T>() == 0 {
+ // Since we return a capacity of `usize::MAX` when the type size is
+ // 0, getting to here necessarily means the `RawVec` is overfull.
+ return Err(CapacityOverflow);
+ }
+
+ let cap = len.checked_add(additional).ok_or(CapacityOverflow)?;
+ let new_layout = Layout::array::<T>(cap);
+
+ // `finish_grow` is non-generic over `T`.
+ let ptr = finish_grow(new_layout, self.current_memory(), &mut self.alloc)?;
+ self.set_ptr(ptr);
+ Ok(())
+ }
+
+ fn shrink(&mut self, amount: usize) -> Result<(), TryReserveError> {
+ assert!(amount <= self.capacity(), "Tried to shrink to a larger capacity");
+
+ let (ptr, layout) = if let Some(mem) = self.current_memory() { mem } else { return Ok(()) };
+ let new_size = amount * mem::size_of::<T>();
+
+ let ptr = unsafe {
+ let new_layout = Layout::from_size_align_unchecked(new_size, layout.align());
+ self.alloc.shrink(ptr, layout, new_layout).map_err(|_| TryReserveError::AllocError {
+ layout: new_layout,
+ non_exhaustive: (),
+ })?
+ };
+ self.set_ptr(ptr);
+ Ok(())
+ }
+}
+
+// This function is outside `RawVec` to minimize compile times. See the comment
+// above `RawVec::grow_amortized` for details. (The `A` parameter isn't
+// significant, because the number of different `A` types seen in practice is
+// much smaller than the number of `T` types.)
+#[inline(never)]
+fn finish_grow<A>(
+ new_layout: Result<Layout, LayoutError>,
+ current_memory: Option<(NonNull<u8>, Layout)>,
+ alloc: &mut A,
+) -> Result<NonNull<[u8]>, TryReserveError>
+where
+ A: Allocator,
+{
+ // Check for the error here to minimize the size of `RawVec::grow_*`.
+ let new_layout = new_layout.map_err(|_| CapacityOverflow)?;
+
+ alloc_guard(new_layout.size())?;
+
+ let memory = if let Some((ptr, old_layout)) = current_memory {
+ debug_assert_eq!(old_layout.align(), new_layout.align());
+ unsafe {
+ // The allocator checks for alignment equality
+ intrinsics::assume(old_layout.align() == new_layout.align());
+ alloc.grow(ptr, old_layout, new_layout)
+ }
+ } else {
+ alloc.allocate(new_layout)
+ };
+
+ memory.map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })
+}
+
+unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawVec<T, A> {
+ /// Frees the memory owned by the `RawVec` *without* trying to drop its contents.
+ fn drop(&mut self) {
+ if let Some((ptr, layout)) = self.current_memory() {
+ unsafe { self.alloc.deallocate(ptr, layout) }
+ }
+ }
+}
+
+// Central function for reserve error handling.
+#[cfg(not(no_global_oom_handling))]
+#[inline]
+fn handle_reserve(result: Result<(), TryReserveError>) {
+ match result {
+ Err(CapacityOverflow) => capacity_overflow(),
+ Err(AllocError { layout, .. }) => handle_alloc_error(layout),
+ Ok(()) => { /* yay */ }
+ }
+}
+
+// We need to guarantee the following:
+// * We don't ever allocate `> isize::MAX` byte-size objects.
+// * We don't overflow `usize::MAX` and actually allocate too little.
+//
+// On 64-bit we just need to check for overflow since trying to allocate
+// `> isize::MAX` bytes will surely fail. On 32-bit and 16-bit we need to add
+// an extra guard for this in case we're running on a platform which can use
+// all 4GB in user-space, e.g., PAE or x32.
+
+#[inline]
+fn alloc_guard(alloc_size: usize) -> Result<(), TryReserveError> {
+ if usize::BITS < 64 && alloc_size > isize::MAX as usize {
+ Err(CapacityOverflow)
+ } else {
+ Ok(())
+ }
+}
+
+// One central function responsible for reporting capacity overflows. This'll
+// ensure that the code generation related to these panics is minimal as there's
+// only one location which panics rather than a bunch throughout the module.
+#[cfg(not(no_global_oom_handling))]
+fn capacity_overflow() -> ! {
+ panic!("capacity overflow");
+}
diff --git a/rust/alloc/rc.rs b/rust/alloc/rc.rs
new file mode 100644
index 000000000000..7344cd9a449e
--- /dev/null
+++ b/rust/alloc/rc.rs
@@ -0,0 +1,2539 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+//! Single-threaded reference-counting pointers. 'Rc' stands for 'Reference
+//! Counted'.
+//!
+//! The type [`Rc<T>`][`Rc`] provides shared ownership of a value of type `T`,
+//! allocated in the heap. Invoking [`clone`][clone] on [`Rc`] produces a new
+//! pointer to the same allocation in the heap. When the last [`Rc`] pointer to a
+//! given allocation is destroyed, the value stored in that allocation (often
+//! referred to as "inner value") is also dropped.
+//!
+//! Shared references in Rust disallow mutation by default, and [`Rc`]
+//! is no exception: you cannot generally obtain a mutable reference to
+//! something inside an [`Rc`]. If you need mutability, put a [`Cell`]
+//! or [`RefCell`] inside the [`Rc`]; see [an example of mutability
+//! inside an `Rc`][mutability].
+//!
+//! [`Rc`] uses non-atomic reference counting. This means that overhead is very
+//! low, but an [`Rc`] cannot be sent between threads, and consequently [`Rc`]
+//! does not implement [`Send`][send]. As a result, the Rust compiler
+//! will check *at compile time* that you are not sending [`Rc`]s between
+//! threads. If you need multi-threaded, atomic reference counting, use
+//! [`sync::Arc`][arc].
+//!
+//! The [`downgrade`][downgrade] method can be used to create a non-owning
+//! [`Weak`] pointer. A [`Weak`] pointer can be [`upgrade`][upgrade]d
+//! to an [`Rc`], but this will return [`None`] if the value stored in the allocation has
+//! already been dropped. In other words, `Weak` pointers do not keep the value
+//! inside the allocation alive; however, they *do* keep the allocation
+//! (the backing store for the inner value) alive.
+//!
+//! A cycle between [`Rc`] pointers will never be deallocated. For this reason,
+//! [`Weak`] is used to break cycles. For example, a tree could have strong
+//! [`Rc`] pointers from parent nodes to children, and [`Weak`] pointers from
+//! children back to their parents.
+//!
+//! `Rc<T>` automatically dereferences to `T` (via the [`Deref`] trait),
+//! so you can call `T`'s methods on a value of type [`Rc<T>`][`Rc`]. To avoid name
+//! clashes with `T`'s methods, the methods of [`Rc<T>`][`Rc`] itself are associated
+//! functions, called using [fully qualified syntax]:
+//!
+//! ```
+//! use std::rc::Rc;
+//!
+//! let my_rc = Rc::new(());
+//! Rc::downgrade(&my_rc);
+//! ```
+//!
+//! `Rc<T>`'s implementations of traits like `Clone` may also be called using
+//! fully qualified syntax. Some people prefer to use fully qualified syntax,
+//! while others prefer using method-call syntax.
+//!
+//! ```
+//! use std::rc::Rc;
+//!
+//! let rc = Rc::new(());
+//! // Method-call syntax
+//! let rc2 = rc.clone();
+//! // Fully qualified syntax
+//! let rc3 = Rc::clone(&rc);
+//! ```
+//!
+//! [`Weak<T>`][`Weak`] does not auto-dereference to `T`, because the inner value may have
+//! already been dropped.
+//!
+//! # Cloning references
+//!
+//! Creating a new reference to the same allocation as an existing reference counted pointer
+//! is done using the `Clone` trait implemented for [`Rc<T>`][`Rc`] and [`Weak<T>`][`Weak`].
+//!
+//! ```
+//! use std::rc::Rc;
+//!
+//! let foo = Rc::new(vec![1.0, 2.0, 3.0]);
+//! // The two syntaxes below are equivalent.
+//! let a = foo.clone();
+//! let b = Rc::clone(&foo);
+//! // a and b both point to the same memory location as foo.
+//! ```
+//!
+//! The `Rc::clone(&from)` syntax is the most idiomatic because it conveys more explicitly
+//! the meaning of the code. In the example above, this syntax makes it easier to see that
+//! this code is creating a new reference rather than copying the whole content of foo.
+//!
+//! # Examples
+//!
+//! Consider a scenario where a set of `Gadget`s are owned by a given `Owner`.
+//! We want to have our `Gadget`s point to their `Owner`. We can't do this with
+//! unique ownership, because more than one gadget may belong to the same
+//! `Owner`. [`Rc`] allows us to share an `Owner` between multiple `Gadget`s,
+//! and have the `Owner` remain allocated as long as any `Gadget` points at it.
+//!
+//! ```
+//! use std::rc::Rc;
+//!
+//! struct Owner {
+//! name: String,
+//! // ...other fields
+//! }
+//!
+//! struct Gadget {
+//! id: i32,
+//! owner: Rc<Owner>,
+//! // ...other fields
+//! }
+//!
+//! fn main() {
+//! // Create a reference-counted `Owner`.
+//! let gadget_owner: Rc<Owner> = Rc::new(
+//! Owner {
+//! name: "Gadget Man".to_string(),
+//! }
+//! );
+//!
+//! // Create `Gadget`s belonging to `gadget_owner`. Cloning the `Rc<Owner>`
+//! // gives us a new pointer to the same `Owner` allocation, incrementing
+//! // the reference count in the process.
+//! let gadget1 = Gadget {
+//! id: 1,
+//! owner: Rc::clone(&gadget_owner),
+//! };
+//! let gadget2 = Gadget {
+//! id: 2,
+//! owner: Rc::clone(&gadget_owner),
+//! };
+//!
+//! // Dispose of our local variable `gadget_owner`.
+//! drop(gadget_owner);
+//!
+//! // Despite dropping `gadget_owner`, we're still able to print out the name
+//! // of the `Owner` of the `Gadget`s. This is because we've only dropped a
+//! // single `Rc<Owner>`, not the `Owner` it points to. As long as there are
+//! // other `Rc<Owner>` pointing at the same `Owner` allocation, it will remain
+//! // live. The field projection `gadget1.owner.name` works because
+//! // `Rc<Owner>` automatically dereferences to `Owner`.
+//! println!("Gadget {} owned by {}", gadget1.id, gadget1.owner.name);
+//! println!("Gadget {} owned by {}", gadget2.id, gadget2.owner.name);
+//!
+//! // At the end of the function, `gadget1` and `gadget2` are destroyed, and
+//! // with them the last counted references to our `Owner`. Gadget Man now
+//! // gets destroyed as well.
+//! }
+//! ```
+//!
+//! If our requirements change, and we also need to be able to traverse from
+//! `Owner` to `Gadget`, we will run into problems. An [`Rc`] pointer from `Owner`
+//! to `Gadget` introduces a cycle. This means that their
+//! reference counts can never reach 0, and the allocation will never be destroyed:
+//! a memory leak. In order to get around this, we can use [`Weak`]
+//! pointers.
+//!
+//! Rust actually makes it somewhat difficult to produce this loop in the first
+//! place. In order to end up with two values that point at each other, one of
+//! them needs to be mutable. This is difficult because [`Rc`] enforces
+//! memory safety by only giving out shared references to the value it wraps,
+//! and these don't allow direct mutation. We need to wrap the part of the
+//! value we wish to mutate in a [`RefCell`], which provides *interior
+//! mutability*: a method to achieve mutability through a shared reference.
+//! [`RefCell`] enforces Rust's borrowing rules at runtime.
+//!
+//! ```
+//! use std::rc::Rc;
+//! use std::rc::Weak;
+//! use std::cell::RefCell;
+//!
+//! struct Owner {
+//! name: String,
+//! gadgets: RefCell<Vec<Weak<Gadget>>>,
+//! // ...other fields
+//! }
+//!
+//! struct Gadget {
+//! id: i32,
+//! owner: Rc<Owner>,
+//! // ...other fields
+//! }
+//!
+//! fn main() {
+//! // Create a reference-counted `Owner`. Note that we've put the `Owner`'s
+//! // vector of `Gadget`s inside a `RefCell` so that we can mutate it through
+//! // a shared reference.
+//! let gadget_owner: Rc<Owner> = Rc::new(
+//! Owner {
+//! name: "Gadget Man".to_string(),
+//! gadgets: RefCell::new(vec![]),
+//! }
+//! );
+//!
+//! // Create `Gadget`s belonging to `gadget_owner`, as before.
+//! let gadget1 = Rc::new(
+//! Gadget {
+//! id: 1,
+//! owner: Rc::clone(&gadget_owner),
+//! }
+//! );
+//! let gadget2 = Rc::new(
+//! Gadget {
+//! id: 2,
+//! owner: Rc::clone(&gadget_owner),
+//! }
+//! );
+//!
+//! // Add the `Gadget`s to their `Owner`.
+//! {
+//! let mut gadgets = gadget_owner.gadgets.borrow_mut();
+//! gadgets.push(Rc::downgrade(&gadget1));
+//! gadgets.push(Rc::downgrade(&gadget2));
+//!
+//! // `RefCell` dynamic borrow ends here.
+//! }
+//!
+//! // Iterate over our `Gadget`s, printing their details out.
+//! for gadget_weak in gadget_owner.gadgets.borrow().iter() {
+//!
+//! // `gadget_weak` is a `Weak<Gadget>`. Since `Weak` pointers can't
+//! // guarantee the allocation still exists, we need to call
+//! // `upgrade`, which returns an `Option<Rc<Gadget>>`.
+//! //
+//! // In this case we know the allocation still exists, so we simply
+//! // `unwrap` the `Option`. In a more complicated program, you might
+//! // need graceful error handling for a `None` result.
+//!
+//! let gadget = gadget_weak.upgrade().unwrap();
+//! println!("Gadget {} owned by {}", gadget.id, gadget.owner.name);
+//! }
+//!
+//! // At the end of the function, `gadget_owner`, `gadget1`, and `gadget2`
+//! // are destroyed. There are now no strong (`Rc`) pointers to the
+//! // gadgets, so they are destroyed. This zeroes the reference count on
+//! // Gadget Man, so he gets destroyed as well.
+//! }
+//! ```
+//!
+//! [clone]: Clone::clone
+//! [`Cell`]: core::cell::Cell
+//! [`RefCell`]: core::cell::RefCell
+//! [send]: core::marker::Send
+//! [arc]: crate::sync::Arc
+//! [`Deref`]: core::ops::Deref
+//! [downgrade]: Rc::downgrade
+//! [upgrade]: Weak::upgrade
+//! [mutability]: core::cell#introducing-mutability-inside-of-something-immutable
+//! [fully qualified syntax]: https://doc.rust-lang.org/book/ch19-03-advanced-traits.html#fully-qualified-syntax-for-disambiguation-calling-methods-with-the-same-name
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+#[cfg(not(test))]
+use crate::boxed::Box;
+#[cfg(test)]
+use std::boxed::Box;
+
+use core::any::Any;
+use core::borrow;
+use core::cell::Cell;
+use core::cmp::Ordering;
+use core::convert::{From, TryFrom};
+use core::fmt;
+use core::hash::{Hash, Hasher};
+use core::intrinsics::abort;
+#[cfg(not(no_global_oom_handling))]
+use core::iter;
+use core::marker::{self, PhantomData, Unpin, Unsize};
+#[cfg(not(no_global_oom_handling))]
+use core::mem::size_of_val;
+use core::mem::{self, align_of_val_raw, forget};
+use core::ops::{CoerceUnsized, Deref, DispatchFromDyn, Receiver};
+#[cfg(not(no_global_oom_handling))]
+use core::pin::Pin;
+use core::ptr::{self, NonNull};
+#[cfg(not(no_global_oom_handling))]
+use core::slice::from_raw_parts_mut;
+
+#[cfg(not(no_global_oom_handling))]
+use crate::alloc::handle_alloc_error;
+#[cfg(not(no_global_oom_handling))]
+use crate::alloc::{box_free, WriteCloneIntoRaw};
+use crate::alloc::{AllocError, Allocator, Global, Layout};
+use crate::borrow::{Cow, ToOwned};
+#[cfg(not(no_global_oom_handling))]
+use crate::string::String;
+#[cfg(not(no_global_oom_handling))]
+use crate::vec::Vec;
+
+#[cfg(test)]
+mod tests;
+
+// This is repr(C) to future-proof against possible field-reordering, which
+// would interfere with otherwise safe [into|from]_raw() of transmutable
+// inner types.
+#[repr(C)]
+struct RcBox<T: ?Sized> {
+ strong: Cell<usize>,
+ weak: Cell<usize>,
+ value: T,
+}
+
+/// A single-threaded reference-counting pointer. 'Rc' stands for 'Reference
+/// Counted'.
+///
+/// See the [module-level documentation](./index.html) for more details.
+///
+/// The inherent methods of `Rc` are all associated functions, which means
+/// that you have to call them as e.g., [`Rc::get_mut(&mut value)`][get_mut] instead of
+/// `value.get_mut()`. This avoids conflicts with methods of the inner type `T`.
+///
+/// [get_mut]: Rc::get_mut
+#[cfg_attr(not(test), rustc_diagnostic_item = "Rc")]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Rc<T: ?Sized> {
+ ptr: NonNull<RcBox<T>>,
+ phantom: PhantomData<RcBox<T>>,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> !marker::Send for Rc<T> {}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> !marker::Sync for Rc<T> {}
+
+#[unstable(feature = "coerce_unsized", issue = "27732")]
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Rc<U>> for Rc<T> {}
+
+#[unstable(feature = "dispatch_from_dyn", issue = "none")]
+impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Rc<U>> for Rc<T> {}
+
+impl<T: ?Sized> Rc<T> {
+ #[inline(always)]
+ fn inner(&self) -> &RcBox<T> {
+ // This unsafety is ok because while this Rc is alive we're guaranteed
+ // that the inner pointer is valid.
+ unsafe { self.ptr.as_ref() }
+ }
+
+ fn from_inner(ptr: NonNull<RcBox<T>>) -> Self {
+ Self { ptr, phantom: PhantomData }
+ }
+
+ unsafe fn from_ptr(ptr: *mut RcBox<T>) -> Self {
+ Self::from_inner(unsafe { NonNull::new_unchecked(ptr) })
+ }
+}
+
+impl<T> Rc<T> {
+ /// Constructs a new `Rc<T>`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ ///
+ /// let five = Rc::new(5);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn new(value: T) -> Rc<T> {
+ // There is an implicit weak pointer owned by all the strong
+ // pointers, which ensures that the weak destructor never frees
+ // the allocation while the strong destructor is running, even
+ // if the weak pointer is stored inside the strong one.
+ Self::from_inner(
+ Box::leak(box RcBox { strong: Cell::new(1), weak: Cell::new(1), value }).into(),
+ )
+ }
+
+ /// Constructs a new `Rc<T>` using a weak reference to itself. Attempting
+ /// to upgrade the weak reference before this function returns will result
+ /// in a `None` value. However, the weak reference may be cloned freely and
+ /// stored for use at a later time.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(arc_new_cyclic)]
+ /// #![allow(dead_code)]
+ /// use std::rc::{Rc, Weak};
+ ///
+ /// struct Gadget {
+ /// self_weak: Weak<Self>,
+ /// // ... more fields
+ /// }
+ /// impl Gadget {
+ /// pub fn new() -> Rc<Self> {
+ /// Rc::new_cyclic(|self_weak| {
+ /// Gadget { self_weak: self_weak.clone(), /* ... */ }
+ /// })
+ /// }
+ /// }
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "arc_new_cyclic", issue = "75861")]
+ pub fn new_cyclic(data_fn: impl FnOnce(&Weak<T>) -> T) -> Rc<T> {
+ // Construct the inner in the "uninitialized" state with a single
+ // weak reference.
+ let uninit_ptr: NonNull<_> = Box::leak(box RcBox {
+ strong: Cell::new(0),
+ weak: Cell::new(1),
+ value: mem::MaybeUninit::<T>::uninit(),
+ })
+ .into();
+
+ let init_ptr: NonNull<RcBox<T>> = uninit_ptr.cast();
+
+ let weak = Weak { ptr: init_ptr };
+
+ // It's important we don't give up ownership of the weak pointer, or
+ // else the memory might be freed by the time `data_fn` returns. If
+ // we really wanted to pass ownership, we could create an additional
+ // weak pointer for ourselves, but this would result in additional
+ // updates to the weak reference count which might not be necessary
+ // otherwise.
+ let data = data_fn(&weak);
+
+ unsafe {
+ let inner = init_ptr.as_ptr();
+ ptr::write(ptr::addr_of_mut!((*inner).value), data);
+
+ let prev_value = (*inner).strong.get();
+ debug_assert_eq!(prev_value, 0, "No prior strong references should exist");
+ (*inner).strong.set(1);
+ }
+
+ let strong = Rc::from_inner(init_ptr);
+
+ // Strong references should collectively own a shared weak reference,
+ // so don't run the destructor for our old weak reference.
+ mem::forget(weak);
+ strong
+ }
+
+ /// Constructs a new `Rc` with uninitialized contents.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ /// #![feature(get_mut_unchecked)]
+ ///
+ /// use std::rc::Rc;
+ ///
+ /// let mut five = Rc::<u32>::new_uninit();
+ ///
+ /// let five = unsafe {
+ /// // Deferred initialization:
+ /// Rc::get_mut_unchecked(&mut five).as_mut_ptr().write(5);
+ ///
+ /// five.assume_init()
+ /// };
+ ///
+ /// assert_eq!(*five, 5)
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "new_uninit", issue = "63291")]
+ pub fn new_uninit() -> Rc<mem::MaybeUninit<T>> {
+ unsafe {
+ Rc::from_ptr(Rc::allocate_for_layout(
+ Layout::new::<T>(),
+ |layout| Global.allocate(layout),
+ |mem| mem as *mut RcBox<mem::MaybeUninit<T>>,
+ ))
+ }
+ }
+
+ /// Constructs a new `Rc` with uninitialized contents, with the memory
+ /// being filled with `0` bytes.
+ ///
+ /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and
+ /// incorrect usage of this method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ ///
+ /// use std::rc::Rc;
+ ///
+ /// let zero = Rc::<u32>::new_zeroed();
+ /// let zero = unsafe { zero.assume_init() };
+ ///
+ /// assert_eq!(*zero, 0)
+ /// ```
+ ///
+ /// [zeroed]: mem::MaybeUninit::zeroed
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "new_uninit", issue = "63291")]
+ pub fn new_zeroed() -> Rc<mem::MaybeUninit<T>> {
+ unsafe {
+ Rc::from_ptr(Rc::allocate_for_layout(
+ Layout::new::<T>(),
+ |layout| Global.allocate_zeroed(layout),
+ |mem| mem as *mut RcBox<mem::MaybeUninit<T>>,
+ ))
+ }
+ }
+
+ /// Constructs a new `Rc<T>`, returning an error if the allocation fails
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api)]
+ /// use std::rc::Rc;
+ ///
+ /// let five = Rc::try_new(5);
+ /// # Ok::<(), std::alloc::AllocError>(())
+ /// ```
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ pub fn try_new(value: T) -> Result<Rc<T>, AllocError> {
+ // There is an implicit weak pointer owned by all the strong
+ // pointers, which ensures that the weak destructor never frees
+ // the allocation while the strong destructor is running, even
+ // if the weak pointer is stored inside the strong one.
+ Ok(Self::from_inner(
+ Box::leak(Box::try_new(RcBox { strong: Cell::new(1), weak: Cell::new(1), value })?)
+ .into(),
+ ))
+ }
+
+ /// Constructs a new `Rc` with uninitialized contents, returning an error if the allocation fails
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api, new_uninit)]
+ /// #![feature(get_mut_unchecked)]
+ ///
+ /// use std::rc::Rc;
+ ///
+ /// let mut five = Rc::<u32>::try_new_uninit()?;
+ ///
+ /// let five = unsafe {
+ /// // Deferred initialization:
+ /// Rc::get_mut_unchecked(&mut five).as_mut_ptr().write(5);
+ ///
+ /// five.assume_init()
+ /// };
+ ///
+ /// assert_eq!(*five, 5);
+ /// # Ok::<(), std::alloc::AllocError>(())
+ /// ```
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ // #[unstable(feature = "new_uninit", issue = "63291")]
+ pub fn try_new_uninit() -> Result<Rc<mem::MaybeUninit<T>>, AllocError> {
+ unsafe {
+ Ok(Rc::from_ptr(Rc::try_allocate_for_layout(
+ Layout::new::<T>(),
+ |layout| Global.allocate(layout),
+ |mem| mem as *mut RcBox<mem::MaybeUninit<T>>,
+ )?))
+ }
+ }
+
+ /// Constructs a new `Rc` with uninitialized contents, with the memory
+ /// being filled with `0` bytes, returning an error if the allocation fails
+ ///
+ /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and
+ /// incorrect usage of this method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api, new_uninit)]
+ ///
+ /// use std::rc::Rc;
+ ///
+ /// let zero = Rc::<u32>::try_new_zeroed()?;
+ /// let zero = unsafe { zero.assume_init() };
+ ///
+ /// assert_eq!(*zero, 0);
+ /// # Ok::<(), std::alloc::AllocError>(())
+ /// ```
+ ///
+ /// [zeroed]: mem::MaybeUninit::zeroed
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ //#[unstable(feature = "new_uninit", issue = "63291")]
+ pub fn try_new_zeroed() -> Result<Rc<mem::MaybeUninit<T>>, AllocError> {
+ unsafe {
+ Ok(Rc::from_ptr(Rc::try_allocate_for_layout(
+ Layout::new::<T>(),
+ |layout| Global.allocate_zeroed(layout),
+ |mem| mem as *mut RcBox<mem::MaybeUninit<T>>,
+ )?))
+ }
+ }
+ /// Constructs a new `Pin<Rc<T>>`. If `T` does not implement `Unpin`, then
+ /// `value` will be pinned in memory and unable to be moved.
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "pin", since = "1.33.0")]
+ pub fn pin(value: T) -> Pin<Rc<T>> {
+ unsafe { Pin::new_unchecked(Rc::new(value)) }
+ }
+
+ /// Returns the inner value, if the `Rc` has exactly one strong reference.
+ ///
+ /// Otherwise, an [`Err`] is returned with the same `Rc` that was
+ /// passed in.
+ ///
+ /// This will succeed even if there are outstanding weak references.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ ///
+ /// let x = Rc::new(3);
+ /// assert_eq!(Rc::try_unwrap(x), Ok(3));
+ ///
+ /// let x = Rc::new(4);
+ /// let _y = Rc::clone(&x);
+ /// assert_eq!(*Rc::try_unwrap(x).unwrap_err(), 4);
+ /// ```
+ #[inline]
+ #[stable(feature = "rc_unique", since = "1.4.0")]
+ pub fn try_unwrap(this: Self) -> Result<T, Self> {
+ if Rc::strong_count(&this) == 1 {
+ unsafe {
+ let val = ptr::read(&*this); // copy the contained object
+
+ // Indicate to Weaks that they can't be promoted by decrementing
+ // the strong count, and then remove the implicit "strong weak"
+ // pointer while also handling drop logic by just crafting a
+ // fake Weak.
+ this.inner().dec_strong();
+ let _weak = Weak { ptr: this.ptr };
+ forget(this);
+ Ok(val)
+ }
+ } else {
+ Err(this)
+ }
+ }
+}
+
+impl<T> Rc<[T]> {
+ /// Constructs a new reference-counted slice with uninitialized contents.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ /// #![feature(get_mut_unchecked)]
+ ///
+ /// use std::rc::Rc;
+ ///
+ /// let mut values = Rc::<[u32]>::new_uninit_slice(3);
+ ///
+ /// let values = unsafe {
+ /// // Deferred initialization:
+ /// Rc::get_mut_unchecked(&mut values)[0].as_mut_ptr().write(1);
+ /// Rc::get_mut_unchecked(&mut values)[1].as_mut_ptr().write(2);
+ /// Rc::get_mut_unchecked(&mut values)[2].as_mut_ptr().write(3);
+ ///
+ /// values.assume_init()
+ /// };
+ ///
+ /// assert_eq!(*values, [1, 2, 3])
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "new_uninit", issue = "63291")]
+ pub fn new_uninit_slice(len: usize) -> Rc<[mem::MaybeUninit<T>]> {
+ unsafe { Rc::from_ptr(Rc::allocate_for_slice(len)) }
+ }
+
+ /// Constructs a new reference-counted slice with uninitialized contents, with the memory being
+ /// filled with `0` bytes.
+ ///
+ /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and
+ /// incorrect usage of this method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ ///
+ /// use std::rc::Rc;
+ ///
+ /// let values = Rc::<[u32]>::new_zeroed_slice(3);
+ /// let values = unsafe { values.assume_init() };
+ ///
+ /// assert_eq!(*values, [0, 0, 0])
+ /// ```
+ ///
+ /// [zeroed]: mem::MaybeUninit::zeroed
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "new_uninit", issue = "63291")]
+ pub fn new_zeroed_slice(len: usize) -> Rc<[mem::MaybeUninit<T>]> {
+ unsafe {
+ Rc::from_ptr(Rc::allocate_for_layout(
+ Layout::array::<T>(len).unwrap(),
+ |layout| Global.allocate_zeroed(layout),
+ |mem| {
+ ptr::slice_from_raw_parts_mut(mem as *mut T, len)
+ as *mut RcBox<[mem::MaybeUninit<T>]>
+ },
+ ))
+ }
+ }
+}
+
+impl<T> Rc<mem::MaybeUninit<T>> {
+ /// Converts to `Rc<T>`.
+ ///
+ /// # Safety
+ ///
+ /// As with [`MaybeUninit::assume_init`],
+ /// it is up to the caller to guarantee that the inner value
+ /// really is in an initialized state.
+ /// Calling this when the content is not yet fully initialized
+ /// causes immediate undefined behavior.
+ ///
+ /// [`MaybeUninit::assume_init`]: mem::MaybeUninit::assume_init
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ /// #![feature(get_mut_unchecked)]
+ ///
+ /// use std::rc::Rc;
+ ///
+ /// let mut five = Rc::<u32>::new_uninit();
+ ///
+ /// let five = unsafe {
+ /// // Deferred initialization:
+ /// Rc::get_mut_unchecked(&mut five).as_mut_ptr().write(5);
+ ///
+ /// five.assume_init()
+ /// };
+ ///
+ /// assert_eq!(*five, 5)
+ /// ```
+ #[unstable(feature = "new_uninit", issue = "63291")]
+ #[inline]
+ pub unsafe fn assume_init(self) -> Rc<T> {
+ Rc::from_inner(mem::ManuallyDrop::new(self).ptr.cast())
+ }
+}
+
+impl<T> Rc<[mem::MaybeUninit<T>]> {
+ /// Converts to `Rc<[T]>`.
+ ///
+ /// # Safety
+ ///
+ /// As with [`MaybeUninit::assume_init`],
+ /// it is up to the caller to guarantee that the inner value
+ /// really is in an initialized state.
+ /// Calling this when the content is not yet fully initialized
+ /// causes immediate undefined behavior.
+ ///
+ /// [`MaybeUninit::assume_init`]: mem::MaybeUninit::assume_init
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ /// #![feature(get_mut_unchecked)]
+ ///
+ /// use std::rc::Rc;
+ ///
+ /// let mut values = Rc::<[u32]>::new_uninit_slice(3);
+ ///
+ /// let values = unsafe {
+ /// // Deferred initialization:
+ /// Rc::get_mut_unchecked(&mut values)[0].as_mut_ptr().write(1);
+ /// Rc::get_mut_unchecked(&mut values)[1].as_mut_ptr().write(2);
+ /// Rc::get_mut_unchecked(&mut values)[2].as_mut_ptr().write(3);
+ ///
+ /// values.assume_init()
+ /// };
+ ///
+ /// assert_eq!(*values, [1, 2, 3])
+ /// ```
+ #[unstable(feature = "new_uninit", issue = "63291")]
+ #[inline]
+ pub unsafe fn assume_init(self) -> Rc<[T]> {
+ unsafe { Rc::from_ptr(mem::ManuallyDrop::new(self).ptr.as_ptr() as _) }
+ }
+}
+
+impl<T: ?Sized> Rc<T> {
+ /// Consumes the `Rc`, returning the wrapped pointer.
+ ///
+ /// To avoid a memory leak the pointer must be converted back to an `Rc` using
+ /// [`Rc::from_raw`][from_raw].
+ ///
+ /// [from_raw]: Rc::from_raw
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ ///
+ /// let x = Rc::new("hello".to_owned());
+ /// let x_ptr = Rc::into_raw(x);
+ /// assert_eq!(unsafe { &*x_ptr }, "hello");
+ /// ```
+ #[stable(feature = "rc_raw", since = "1.17.0")]
+ pub fn into_raw(this: Self) -> *const T {
+ let ptr = Self::as_ptr(&this);
+ mem::forget(this);
+ ptr
+ }
+
+ /// Provides a raw pointer to the data.
+ ///
+ /// The counts are not affected in any way and the `Rc` is not consumed. The pointer is valid
+ /// for as long there are strong counts in the `Rc`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ ///
+ /// let x = Rc::new("hello".to_owned());
+ /// let y = Rc::clone(&x);
+ /// let x_ptr = Rc::as_ptr(&x);
+ /// assert_eq!(x_ptr, Rc::as_ptr(&y));
+ /// assert_eq!(unsafe { &*x_ptr }, "hello");
+ /// ```
+ #[stable(feature = "weak_into_raw", since = "1.45.0")]
+ pub fn as_ptr(this: &Self) -> *const T {
+ let ptr: *mut RcBox<T> = NonNull::as_ptr(this.ptr);
+
+ // SAFETY: This cannot go through Deref::deref or Rc::inner because
+ // this is required to retain raw/mut provenance such that e.g. `get_mut` can
+ // write through the pointer after the Rc is recovered through `from_raw`.
+ unsafe { ptr::addr_of_mut!((*ptr).value) }
+ }
+
+ /// Constructs an `Rc<T>` from a raw pointer.
+ ///
+ /// The raw pointer must have been previously returned by a call to
+ /// [`Rc<U>::into_raw`][into_raw] where `U` must have the same size
+ /// and alignment as `T`. This is trivially true if `U` is `T`.
+ /// Note that if `U` is not `T` but has the same size and alignment, this is
+ /// basically like transmuting references of different types. See
+ /// [`mem::transmute`][transmute] for more information on what
+ /// restrictions apply in this case.
+ ///
+ /// The user of `from_raw` has to make sure a specific value of `T` is only
+ /// dropped once.
+ ///
+ /// This function is unsafe because improper use may lead to memory unsafety,
+ /// even if the returned `Rc<T>` is never accessed.
+ ///
+ /// [into_raw]: Rc::into_raw
+ /// [transmute]: core::mem::transmute
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ ///
+ /// let x = Rc::new("hello".to_owned());
+ /// let x_ptr = Rc::into_raw(x);
+ ///
+ /// unsafe {
+ /// // Convert back to an `Rc` to prevent leak.
+ /// let x = Rc::from_raw(x_ptr);
+ /// assert_eq!(&*x, "hello");
+ ///
+ /// // Further calls to `Rc::from_raw(x_ptr)` would be memory-unsafe.
+ /// }
+ ///
+ /// // The memory was freed when `x` went out of scope above, so `x_ptr` is now dangling!
+ /// ```
+ #[stable(feature = "rc_raw", since = "1.17.0")]
+ pub unsafe fn from_raw(ptr: *const T) -> Self {
+ let offset = unsafe { data_offset(ptr) };
+
+ // Reverse the offset to find the original RcBox.
+ let rc_ptr =
+ unsafe { (ptr as *mut RcBox<T>).set_ptr_value((ptr as *mut u8).offset(-offset)) };
+
+ unsafe { Self::from_ptr(rc_ptr) }
+ }
+
+ /// Creates a new [`Weak`] pointer to this allocation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ ///
+ /// let five = Rc::new(5);
+ ///
+ /// let weak_five = Rc::downgrade(&five);
+ /// ```
+ #[stable(feature = "rc_weak", since = "1.4.0")]
+ pub fn downgrade(this: &Self) -> Weak<T> {
+ this.inner().inc_weak();
+ // Make sure we do not create a dangling Weak
+ debug_assert!(!is_dangling(this.ptr.as_ptr()));
+ Weak { ptr: this.ptr }
+ }
+
+ /// Gets the number of [`Weak`] pointers to this allocation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ ///
+ /// let five = Rc::new(5);
+ /// let _weak_five = Rc::downgrade(&five);
+ ///
+ /// assert_eq!(1, Rc::weak_count(&five));
+ /// ```
+ #[inline]
+ #[stable(feature = "rc_counts", since = "1.15.0")]
+ pub fn weak_count(this: &Self) -> usize {
+ this.inner().weak() - 1
+ }
+
+ /// Gets the number of strong (`Rc`) pointers to this allocation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ ///
+ /// let five = Rc::new(5);
+ /// let _also_five = Rc::clone(&five);
+ ///
+ /// assert_eq!(2, Rc::strong_count(&five));
+ /// ```
+ #[inline]
+ #[stable(feature = "rc_counts", since = "1.15.0")]
+ pub fn strong_count(this: &Self) -> usize {
+ this.inner().strong()
+ }
+
+ /// Increments the strong reference count on the `Rc<T>` associated with the
+ /// provided pointer by one.
+ ///
+ /// # Safety
+ ///
+ /// The pointer must have been obtained through `Rc::into_raw`, and the
+ /// associated `Rc` instance must be valid (i.e. the strong count must be at
+ /// least 1) for the duration of this method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ ///
+ /// let five = Rc::new(5);
+ ///
+ /// unsafe {
+ /// let ptr = Rc::into_raw(five);
+ /// Rc::increment_strong_count(ptr);
+ ///
+ /// let five = Rc::from_raw(ptr);
+ /// assert_eq!(2, Rc::strong_count(&five));
+ /// }
+ /// ```
+ #[inline]
+ #[stable(feature = "rc_mutate_strong_count", since = "1.53.0")]
+ pub unsafe fn increment_strong_count(ptr: *const T) {
+ // Retain Rc, but don't touch refcount by wrapping in ManuallyDrop
+ let rc = unsafe { mem::ManuallyDrop::new(Rc::<T>::from_raw(ptr)) };
+ // Now increase refcount, but don't drop new refcount either
+ let _rc_clone: mem::ManuallyDrop<_> = rc.clone();
+ }
+
+ /// Decrements the strong reference count on the `Rc<T>` associated with the
+ /// provided pointer by one.
+ ///
+ /// # Safety
+ ///
+ /// The pointer must have been obtained through `Rc::into_raw`, and the
+ /// associated `Rc` instance must be valid (i.e. the strong count must be at
+ /// least 1) when invoking this method. This method can be used to release
+ /// the final `Rc` and backing storage, but **should not** be called after
+ /// the final `Rc` has been released.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ ///
+ /// let five = Rc::new(5);
+ ///
+ /// unsafe {
+ /// let ptr = Rc::into_raw(five);
+ /// Rc::increment_strong_count(ptr);
+ ///
+ /// let five = Rc::from_raw(ptr);
+ /// assert_eq!(2, Rc::strong_count(&five));
+ /// Rc::decrement_strong_count(ptr);
+ /// assert_eq!(1, Rc::strong_count(&five));
+ /// }
+ /// ```
+ #[inline]
+ #[stable(feature = "rc_mutate_strong_count", since = "1.53.0")]
+ pub unsafe fn decrement_strong_count(ptr: *const T) {
+ unsafe { mem::drop(Rc::from_raw(ptr)) };
+ }
+
+ /// Returns `true` if there are no other `Rc` or [`Weak`] pointers to
+ /// this allocation.
+ #[inline]
+ fn is_unique(this: &Self) -> bool {
+ Rc::weak_count(this) == 0 && Rc::strong_count(this) == 1
+ }
+
+ /// Returns a mutable reference into the given `Rc`, if there are
+ /// no other `Rc` or [`Weak`] pointers to the same allocation.
+ ///
+ /// Returns [`None`] otherwise, because it is not safe to
+ /// mutate a shared value.
+ ///
+ /// See also [`make_mut`][make_mut], which will [`clone`][clone]
+ /// the inner value when there are other pointers.
+ ///
+ /// [make_mut]: Rc::make_mut
+ /// [clone]: Clone::clone
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ ///
+ /// let mut x = Rc::new(3);
+ /// *Rc::get_mut(&mut x).unwrap() = 4;
+ /// assert_eq!(*x, 4);
+ ///
+ /// let _y = Rc::clone(&x);
+ /// assert!(Rc::get_mut(&mut x).is_none());
+ /// ```
+ #[inline]
+ #[stable(feature = "rc_unique", since = "1.4.0")]
+ pub fn get_mut(this: &mut Self) -> Option<&mut T> {
+ if Rc::is_unique(this) { unsafe { Some(Rc::get_mut_unchecked(this)) } } else { None }
+ }
+
+ /// Returns a mutable reference into the given `Rc`,
+ /// without any check.
+ ///
+ /// See also [`get_mut`], which is safe and does appropriate checks.
+ ///
+ /// [`get_mut`]: Rc::get_mut
+ ///
+ /// # Safety
+ ///
+ /// Any other `Rc` or [`Weak`] pointers to the same allocation must not be dereferenced
+ /// for the duration of the returned borrow.
+ /// This is trivially the case if no such pointers exist,
+ /// for example immediately after `Rc::new`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(get_mut_unchecked)]
+ ///
+ /// use std::rc::Rc;
+ ///
+ /// let mut x = Rc::new(String::new());
+ /// unsafe {
+ /// Rc::get_mut_unchecked(&mut x).push_str("foo")
+ /// }
+ /// assert_eq!(*x, "foo");
+ /// ```
+ #[inline]
+ #[unstable(feature = "get_mut_unchecked", issue = "63292")]
+ pub unsafe fn get_mut_unchecked(this: &mut Self) -> &mut T {
+ // We are careful to *not* create a reference covering the "count" fields, as
+ // this would conflict with accesses to the reference counts (e.g. by `Weak`).
+ unsafe { &mut (*this.ptr.as_ptr()).value }
+ }
+
+ #[inline]
+ #[stable(feature = "ptr_eq", since = "1.17.0")]
+ /// Returns `true` if the two `Rc`s point to the same allocation
+ /// (in a vein similar to [`ptr::eq`]).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ ///
+ /// let five = Rc::new(5);
+ /// let same_five = Rc::clone(&five);
+ /// let other_five = Rc::new(5);
+ ///
+ /// assert!(Rc::ptr_eq(&five, &same_five));
+ /// assert!(!Rc::ptr_eq(&five, &other_five));
+ /// ```
+ ///
+ /// [`ptr::eq`]: core::ptr::eq
+ pub fn ptr_eq(this: &Self, other: &Self) -> bool {
+ this.ptr.as_ptr() == other.ptr.as_ptr()
+ }
+}
+
+impl<T: Clone> Rc<T> {
+ /// Makes a mutable reference into the given `Rc`.
+ ///
+ /// If there are other `Rc` pointers to the same allocation, then `make_mut` will
+ /// [`clone`] the inner value to a new allocation to ensure unique ownership. This is also
+ /// referred to as clone-on-write.
+ ///
+ /// If there are no other `Rc` pointers to this allocation, then [`Weak`]
+ /// pointers to this allocation will be disassociated.
+ ///
+ /// See also [`get_mut`], which will fail rather than cloning.
+ ///
+ /// [`clone`]: Clone::clone
+ /// [`get_mut`]: Rc::get_mut
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ ///
+ /// let mut data = Rc::new(5);
+ ///
+ /// *Rc::make_mut(&mut data) += 1; // Won't clone anything
+ /// let mut other_data = Rc::clone(&data); // Won't clone inner data
+ /// *Rc::make_mut(&mut data) += 1; // Clones inner data
+ /// *Rc::make_mut(&mut data) += 1; // Won't clone anything
+ /// *Rc::make_mut(&mut other_data) *= 2; // Won't clone anything
+ ///
+ /// // Now `data` and `other_data` point to different allocations.
+ /// assert_eq!(*data, 8);
+ /// assert_eq!(*other_data, 12);
+ /// ```
+ ///
+ /// [`Weak`] pointers will be disassociated:
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ ///
+ /// let mut data = Rc::new(75);
+ /// let weak = Rc::downgrade(&data);
+ ///
+ /// assert!(75 == *data);
+ /// assert!(75 == *weak.upgrade().unwrap());
+ ///
+ /// *Rc::make_mut(&mut data) += 1;
+ ///
+ /// assert!(76 == *data);
+ /// assert!(weak.upgrade().is_none());
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[stable(feature = "rc_unique", since = "1.4.0")]
+ pub fn make_mut(this: &mut Self) -> &mut T {
+ if Rc::strong_count(this) != 1 {
+ // Gotta clone the data, there are other Rcs.
+ // Pre-allocate memory to allow writing the cloned value directly.
+ let mut rc = Self::new_uninit();
+ unsafe {
+ let data = Rc::get_mut_unchecked(&mut rc);
+ (**this).write_clone_into_raw(data.as_mut_ptr());
+ *this = rc.assume_init();
+ }
+ } else if Rc::weak_count(this) != 0 {
+ // Can just steal the data, all that's left is Weaks
+ let mut rc = Self::new_uninit();
+ unsafe {
+ let data = Rc::get_mut_unchecked(&mut rc);
+ data.as_mut_ptr().copy_from_nonoverlapping(&**this, 1);
+
+ this.inner().dec_strong();
+ // Remove implicit strong-weak ref (no need to craft a fake
+ // Weak here -- we know other Weaks can clean up for us)
+ this.inner().dec_weak();
+ ptr::write(this, rc.assume_init());
+ }
+ }
+ // This unsafety is ok because we're guaranteed that the pointer
+ // returned is the *only* pointer that will ever be returned to T. Our
+ // reference count is guaranteed to be 1 at this point, and we required
+ // the `Rc<T>` itself to be `mut`, so we're returning the only possible
+ // reference to the allocation.
+ unsafe { &mut this.ptr.as_mut().value }
+ }
+}
+
+impl Rc<dyn Any> {
+ #[inline]
+ #[stable(feature = "rc_downcast", since = "1.29.0")]
+ /// Attempt to downcast the `Rc<dyn Any>` to a concrete type.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::any::Any;
+ /// use std::rc::Rc;
+ ///
+ /// fn print_if_string(value: Rc<dyn Any>) {
+ /// if let Ok(string) = value.downcast::<String>() {
+ /// println!("String ({}): {}", string.len(), string);
+ /// }
+ /// }
+ ///
+ /// let my_string = "Hello World".to_string();
+ /// print_if_string(Rc::new(my_string));
+ /// print_if_string(Rc::new(0i8));
+ /// ```
+ pub fn downcast<T: Any>(self) -> Result<Rc<T>, Rc<dyn Any>> {
+ if (*self).is::<T>() {
+ let ptr = self.ptr.cast::<RcBox<T>>();
+ forget(self);
+ Ok(Rc::from_inner(ptr))
+ } else {
+ Err(self)
+ }
+ }
+}
+
+impl<T: ?Sized> Rc<T> {
+ /// Allocates an `RcBox<T>` with sufficient space for
+ /// a possibly-unsized inner value where the value has the layout provided.
+ ///
+ /// The function `mem_to_rcbox` is called with the data pointer
+ /// and must return back a (potentially fat)-pointer for the `RcBox<T>`.
+ #[cfg(not(no_global_oom_handling))]
+ unsafe fn allocate_for_layout(
+ value_layout: Layout,
+ allocate: impl FnOnce(Layout) -> Result<NonNull<[u8]>, AllocError>,
+ mem_to_rcbox: impl FnOnce(*mut u8) -> *mut RcBox<T>,
+ ) -> *mut RcBox<T> {
+ // Calculate layout using the given value layout.
+ // Previously, layout was calculated on the expression
+ // `&*(ptr as *const RcBox<T>)`, but this created a misaligned
+ // reference (see #54908).
+ let layout = Layout::new::<RcBox<()>>().extend(value_layout).unwrap().0.pad_to_align();
+ unsafe {
+ Rc::try_allocate_for_layout(value_layout, allocate, mem_to_rcbox)
+ .unwrap_or_else(|_| handle_alloc_error(layout))
+ }
+ }
+
+ /// Allocates an `RcBox<T>` with sufficient space for
+ /// a possibly-unsized inner value where the value has the layout provided,
+ /// returning an error if allocation fails.
+ ///
+ /// The function `mem_to_rcbox` is called with the data pointer
+ /// and must return back a (potentially fat)-pointer for the `RcBox<T>`.
+ #[inline]
+ unsafe fn try_allocate_for_layout(
+ value_layout: Layout,
+ allocate: impl FnOnce(Layout) -> Result<NonNull<[u8]>, AllocError>,
+ mem_to_rcbox: impl FnOnce(*mut u8) -> *mut RcBox<T>,
+ ) -> Result<*mut RcBox<T>, AllocError> {
+ // Calculate layout using the given value layout.
+ // Previously, layout was calculated on the expression
+ // `&*(ptr as *const RcBox<T>)`, but this created a misaligned
+ // reference (see #54908).
+ let layout = Layout::new::<RcBox<()>>().extend(value_layout).unwrap().0.pad_to_align();
+
+ // Allocate for the layout.
+ let ptr = allocate(layout)?;
+
+ // Initialize the RcBox
+ let inner = mem_to_rcbox(ptr.as_non_null_ptr().as_ptr());
+ unsafe {
+ debug_assert_eq!(Layout::for_value(&*inner), layout);
+
+ ptr::write(&mut (*inner).strong, Cell::new(1));
+ ptr::write(&mut (*inner).weak, Cell::new(1));
+ }
+
+ Ok(inner)
+ }
+
+ /// Allocates an `RcBox<T>` with sufficient space for an unsized inner value
+ #[cfg(not(no_global_oom_handling))]
+ unsafe fn allocate_for_ptr(ptr: *const T) -> *mut RcBox<T> {
+ // Allocate for the `RcBox<T>` using the given value.
+ unsafe {
+ Self::allocate_for_layout(
+ Layout::for_value(&*ptr),
+ |layout| Global.allocate(layout),
+ |mem| (ptr as *mut RcBox<T>).set_ptr_value(mem),
+ )
+ }
+ }
+
+ #[cfg(not(no_global_oom_handling))]
+ fn from_box(v: Box<T>) -> Rc<T> {
+ unsafe {
+ let (box_unique, alloc) = Box::into_unique(v);
+ let bptr = box_unique.as_ptr();
+
+ let value_size = size_of_val(&*bptr);
+ let ptr = Self::allocate_for_ptr(bptr);
+
+ // Copy value as bytes
+ ptr::copy_nonoverlapping(
+ bptr as *const T as *const u8,
+ &mut (*ptr).value as *mut _ as *mut u8,
+ value_size,
+ );
+
+ // Free the allocation without dropping its contents
+ box_free(box_unique, alloc);
+
+ Self::from_ptr(ptr)
+ }
+ }
+}
+
+impl<T> Rc<[T]> {
+ /// Allocates an `RcBox<[T]>` with the given length.
+ #[cfg(not(no_global_oom_handling))]
+ unsafe fn allocate_for_slice(len: usize) -> *mut RcBox<[T]> {
+ unsafe {
+ Self::allocate_for_layout(
+ Layout::array::<T>(len).unwrap(),
+ |layout| Global.allocate(layout),
+ |mem| ptr::slice_from_raw_parts_mut(mem as *mut T, len) as *mut RcBox<[T]>,
+ )
+ }
+ }
+
+ /// Copy elements from slice into newly allocated Rc<\[T\]>
+ ///
+ /// Unsafe because the caller must either take ownership or bind `T: Copy`
+ #[cfg(not(no_global_oom_handling))]
+ unsafe fn copy_from_slice(v: &[T]) -> Rc<[T]> {
+ unsafe {
+ let ptr = Self::allocate_for_slice(v.len());
+ ptr::copy_nonoverlapping(v.as_ptr(), &mut (*ptr).value as *mut [T] as *mut T, v.len());
+ Self::from_ptr(ptr)
+ }
+ }
+
+ /// Constructs an `Rc<[T]>` from an iterator known to be of a certain size.
+ ///
+ /// Behavior is undefined should the size be wrong.
+ #[cfg(not(no_global_oom_handling))]
+ unsafe fn from_iter_exact(iter: impl iter::Iterator<Item = T>, len: usize) -> Rc<[T]> {
+ // Panic guard while cloning T elements.
+ // In the event of a panic, elements that have been written
+ // into the new RcBox will be dropped, then the memory freed.
+ struct Guard<T> {
+ mem: NonNull<u8>,
+ elems: *mut T,
+ layout: Layout,
+ n_elems: usize,
+ }
+
+ impl<T> Drop for Guard<T> {
+ fn drop(&mut self) {
+ unsafe {
+ let slice = from_raw_parts_mut(self.elems, self.n_elems);
+ ptr::drop_in_place(slice);
+
+ Global.deallocate(self.mem, self.layout);
+ }
+ }
+ }
+
+ unsafe {
+ let ptr = Self::allocate_for_slice(len);
+
+ let mem = ptr as *mut _ as *mut u8;
+ let layout = Layout::for_value(&*ptr);
+
+ // Pointer to first element
+ let elems = &mut (*ptr).value as *mut [T] as *mut T;
+
+ let mut guard = Guard { mem: NonNull::new_unchecked(mem), elems, layout, n_elems: 0 };
+
+ for (i, item) in iter.enumerate() {
+ ptr::write(elems.add(i), item);
+ guard.n_elems += 1;
+ }
+
+ // All clear. Forget the guard so it doesn't free the new RcBox.
+ forget(guard);
+
+ Self::from_ptr(ptr)
+ }
+ }
+}
+
+/// Specialization trait used for `From<&[T]>`.
+trait RcFromSlice<T> {
+ fn from_slice(slice: &[T]) -> Self;
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<T: Clone> RcFromSlice<T> for Rc<[T]> {
+ #[inline]
+ default fn from_slice(v: &[T]) -> Self {
+ unsafe { Self::from_iter_exact(v.iter().cloned(), v.len()) }
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<T: Copy> RcFromSlice<T> for Rc<[T]> {
+ #[inline]
+ fn from_slice(v: &[T]) -> Self {
+ unsafe { Rc::copy_from_slice(v) }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> Deref for Rc<T> {
+ type Target = T;
+
+ #[inline(always)]
+ fn deref(&self) -> &T {
+ &self.inner().value
+ }
+}
+
+#[unstable(feature = "receiver_trait", issue = "none")]
+impl<T: ?Sized> Receiver for Rc<T> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<#[may_dangle] T: ?Sized> Drop for Rc<T> {
+ /// Drops the `Rc`.
+ ///
+ /// This will decrement the strong reference count. If the strong reference
+ /// count reaches zero then the only other references (if any) are
+ /// [`Weak`], so we `drop` the inner value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ ///
+ /// struct Foo;
+ ///
+ /// impl Drop for Foo {
+ /// fn drop(&mut self) {
+ /// println!("dropped!");
+ /// }
+ /// }
+ ///
+ /// let foo = Rc::new(Foo);
+ /// let foo2 = Rc::clone(&foo);
+ ///
+ /// drop(foo); // Doesn't print anything
+ /// drop(foo2); // Prints "dropped!"
+ /// ```
+ fn drop(&mut self) {
+ unsafe {
+ self.inner().dec_strong();
+ if self.inner().strong() == 0 {
+ // destroy the contained object
+ ptr::drop_in_place(Self::get_mut_unchecked(self));
+
+ // remove the implicit "strong weak" pointer now that we've
+ // destroyed the contents.
+ self.inner().dec_weak();
+
+ if self.inner().weak() == 0 {
+ Global.deallocate(self.ptr.cast(), Layout::for_value(self.ptr.as_ref()));
+ }
+ }
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> Clone for Rc<T> {
+ /// Makes a clone of the `Rc` pointer.
+ ///
+ /// This creates another pointer to the same allocation, increasing the
+ /// strong reference count.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ ///
+ /// let five = Rc::new(5);
+ ///
+ /// let _ = Rc::clone(&five);
+ /// ```
+ #[inline]
+ fn clone(&self) -> Rc<T> {
+ self.inner().inc_strong();
+ Self::from_inner(self.ptr)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Default> Default for Rc<T> {
+ /// Creates a new `Rc<T>`, with the `Default` value for `T`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ ///
+ /// let x: Rc<i32> = Default::default();
+ /// assert_eq!(*x, 0);
+ /// ```
+ #[inline]
+ fn default() -> Rc<T> {
+ Rc::new(Default::default())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+trait RcEqIdent<T: ?Sized + PartialEq> {
+ fn eq(&self, other: &Rc<T>) -> bool;
+ fn ne(&self, other: &Rc<T>) -> bool;
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + PartialEq> RcEqIdent<T> for Rc<T> {
+ #[inline]
+ default fn eq(&self, other: &Rc<T>) -> bool {
+ **self == **other
+ }
+
+ #[inline]
+ default fn ne(&self, other: &Rc<T>) -> bool {
+ **self != **other
+ }
+}
+
+// Hack to allow specializing on `Eq` even though `Eq` has a method.
+#[rustc_unsafe_specialization_marker]
+pub(crate) trait MarkerEq: PartialEq<Self> {}
+
+impl<T: Eq> MarkerEq for T {}
+
+/// We're doing this specialization here, and not as a more general optimization on `&T`, because it
+/// would otherwise add a cost to all equality checks on refs. We assume that `Rc`s are used to
+/// store large values, that are slow to clone, but also heavy to check for equality, causing this
+/// cost to pay off more easily. It's also more likely to have two `Rc` clones, that point to
+/// the same value, than two `&T`s.
+///
+/// We can only do this when `T: Eq` as a `PartialEq` might be deliberately irreflexive.
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + MarkerEq> RcEqIdent<T> for Rc<T> {
+ #[inline]
+ fn eq(&self, other: &Rc<T>) -> bool {
+ Rc::ptr_eq(self, other) || **self == **other
+ }
+
+ #[inline]
+ fn ne(&self, other: &Rc<T>) -> bool {
+ !Rc::ptr_eq(self, other) && **self != **other
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + PartialEq> PartialEq for Rc<T> {
+ /// Equality for two `Rc`s.
+ ///
+ /// Two `Rc`s are equal if their inner values are equal, even if they are
+ /// stored in different allocation.
+ ///
+ /// If `T` also implements `Eq` (implying reflexivity of equality),
+ /// two `Rc`s that point to the same allocation are
+ /// always equal.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ ///
+ /// let five = Rc::new(5);
+ ///
+ /// assert!(five == Rc::new(5));
+ /// ```
+ #[inline]
+ fn eq(&self, other: &Rc<T>) -> bool {
+ RcEqIdent::eq(self, other)
+ }
+
+ /// Inequality for two `Rc`s.
+ ///
+ /// Two `Rc`s are unequal if their inner values are unequal.
+ ///
+ /// If `T` also implements `Eq` (implying reflexivity of equality),
+ /// two `Rc`s that point to the same allocation are
+ /// never unequal.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ ///
+ /// let five = Rc::new(5);
+ ///
+ /// assert!(five != Rc::new(6));
+ /// ```
+ #[inline]
+ fn ne(&self, other: &Rc<T>) -> bool {
+ RcEqIdent::ne(self, other)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + Eq> Eq for Rc<T> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + PartialOrd> PartialOrd for Rc<T> {
+ /// Partial comparison for two `Rc`s.
+ ///
+ /// The two are compared by calling `partial_cmp()` on their inner values.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ /// use std::cmp::Ordering;
+ ///
+ /// let five = Rc::new(5);
+ ///
+ /// assert_eq!(Some(Ordering::Less), five.partial_cmp(&Rc::new(6)));
+ /// ```
+ #[inline(always)]
+ fn partial_cmp(&self, other: &Rc<T>) -> Option<Ordering> {
+ (**self).partial_cmp(&**other)
+ }
+
+ /// Less-than comparison for two `Rc`s.
+ ///
+ /// The two are compared by calling `<` on their inner values.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ ///
+ /// let five = Rc::new(5);
+ ///
+ /// assert!(five < Rc::new(6));
+ /// ```
+ #[inline(always)]
+ fn lt(&self, other: &Rc<T>) -> bool {
+ **self < **other
+ }
+
+ /// 'Less than or equal to' comparison for two `Rc`s.
+ ///
+ /// The two are compared by calling `<=` on their inner values.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ ///
+ /// let five = Rc::new(5);
+ ///
+ /// assert!(five <= Rc::new(5));
+ /// ```
+ #[inline(always)]
+ fn le(&self, other: &Rc<T>) -> bool {
+ **self <= **other
+ }
+
+ /// Greater-than comparison for two `Rc`s.
+ ///
+ /// The two are compared by calling `>` on their inner values.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ ///
+ /// let five = Rc::new(5);
+ ///
+ /// assert!(five > Rc::new(4));
+ /// ```
+ #[inline(always)]
+ fn gt(&self, other: &Rc<T>) -> bool {
+ **self > **other
+ }
+
+ /// 'Greater than or equal to' comparison for two `Rc`s.
+ ///
+ /// The two are compared by calling `>=` on their inner values.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ ///
+ /// let five = Rc::new(5);
+ ///
+ /// assert!(five >= Rc::new(5));
+ /// ```
+ #[inline(always)]
+ fn ge(&self, other: &Rc<T>) -> bool {
+ **self >= **other
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + Ord> Ord for Rc<T> {
+ /// Comparison for two `Rc`s.
+ ///
+ /// The two are compared by calling `cmp()` on their inner values.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ /// use std::cmp::Ordering;
+ ///
+ /// let five = Rc::new(5);
+ ///
+ /// assert_eq!(Ordering::Less, five.cmp(&Rc::new(6)));
+ /// ```
+ #[inline]
+ fn cmp(&self, other: &Rc<T>) -> Ordering {
+ (**self).cmp(&**other)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + Hash> Hash for Rc<T> {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ (**self).hash(state);
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + fmt::Display> fmt::Display for Rc<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(&**self, f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + fmt::Debug> fmt::Debug for Rc<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&**self, f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> fmt::Pointer for Rc<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Pointer::fmt(&(&**self as *const T), f)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "from_for_ptrs", since = "1.6.0")]
+impl<T> From<T> for Rc<T> {
+ /// Converts a generic type `T` into a `Rc<T>`
+ ///
+ /// The conversion allocates on the heap and moves `t`
+ /// from the stack into it.
+ ///
+ /// # Example
+ /// ```rust
+ /// # use std::rc::Rc;
+ /// let x = 5;
+ /// let rc = Rc::new(5);
+ ///
+ /// assert_eq!(Rc::from(x), rc);
+ /// ```
+ fn from(t: T) -> Self {
+ Rc::new(t)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "shared_from_slice", since = "1.21.0")]
+impl<T: Clone> From<&[T]> for Rc<[T]> {
+ /// Allocate a reference-counted slice and fill it by cloning `v`'s items.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # use std::rc::Rc;
+ /// let original: &[i32] = &[1, 2, 3];
+ /// let shared: Rc<[i32]> = Rc::from(original);
+ /// assert_eq!(&[1, 2, 3], &shared[..]);
+ /// ```
+ #[inline]
+ fn from(v: &[T]) -> Rc<[T]> {
+ <Self as RcFromSlice<T>>::from_slice(v)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "shared_from_slice", since = "1.21.0")]
+impl From<&str> for Rc<str> {
+ /// Allocate a reference-counted string slice and copy `v` into it.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # use std::rc::Rc;
+ /// let shared: Rc<str> = Rc::from("statue");
+ /// assert_eq!("statue", &shared[..]);
+ /// ```
+ #[inline]
+ fn from(v: &str) -> Rc<str> {
+ let rc = Rc::<[u8]>::from(v.as_bytes());
+ unsafe { Rc::from_raw(Rc::into_raw(rc) as *const str) }
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "shared_from_slice", since = "1.21.0")]
+impl From<String> for Rc<str> {
+ /// Allocate a reference-counted string slice and copy `v` into it.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # use std::rc::Rc;
+ /// let original: String = "statue".to_owned();
+ /// let shared: Rc<str> = Rc::from(original);
+ /// assert_eq!("statue", &shared[..]);
+ /// ```
+ #[inline]
+ fn from(v: String) -> Rc<str> {
+ Rc::from(&v[..])
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "shared_from_slice", since = "1.21.0")]
+impl<T: ?Sized> From<Box<T>> for Rc<T> {
+ /// Move a boxed object to a new, reference counted, allocation.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # use std::rc::Rc;
+ /// let original: Box<i32> = Box::new(1);
+ /// let shared: Rc<i32> = Rc::from(original);
+ /// assert_eq!(1, *shared);
+ /// ```
+ #[inline]
+ fn from(v: Box<T>) -> Rc<T> {
+ Rc::from_box(v)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "shared_from_slice", since = "1.21.0")]
+impl<T> From<Vec<T>> for Rc<[T]> {
+ /// Allocate a reference-counted slice and move `v`'s items into it.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # use std::rc::Rc;
+ /// let original: Box<Vec<i32>> = Box::new(vec![1, 2, 3]);
+ /// let shared: Rc<Vec<i32>> = Rc::from(original);
+ /// assert_eq!(vec![1, 2, 3], *shared);
+ /// ```
+ #[inline]
+ fn from(mut v: Vec<T>) -> Rc<[T]> {
+ unsafe {
+ let rc = Rc::copy_from_slice(&v);
+
+ // Allow the Vec to free its memory, but not destroy its contents
+ v.set_len(0);
+
+ rc
+ }
+ }
+}
+
+#[stable(feature = "shared_from_cow", since = "1.45.0")]
+impl<'a, B> From<Cow<'a, B>> for Rc<B>
+where
+ B: ToOwned + ?Sized,
+ Rc<B>: From<&'a B> + From<B::Owned>,
+{
+ /// Create a reference-counted pointer from
+ /// a clone-on-write pointer by copying its content.
+ ///
+ /// # Example
+ ///
+ /// ```rust
+ /// # use std::rc::Rc;
+ /// # use std::borrow::Cow;
+ /// let cow: Cow<str> = Cow::Borrowed("eggplant");
+ /// let shared: Rc<str> = Rc::from(cow);
+ /// assert_eq!("eggplant", &shared[..]);
+ /// ```
+ #[inline]
+ fn from(cow: Cow<'a, B>) -> Rc<B> {
+ match cow {
+ Cow::Borrowed(s) => Rc::from(s),
+ Cow::Owned(s) => Rc::from(s),
+ }
+ }
+}
+
+#[stable(feature = "boxed_slice_try_from", since = "1.43.0")]
+impl<T, const N: usize> TryFrom<Rc<[T]>> for Rc<[T; N]> {
+ type Error = Rc<[T]>;
+
+ fn try_from(boxed_slice: Rc<[T]>) -> Result<Self, Self::Error> {
+ if boxed_slice.len() == N {
+ Ok(unsafe { Rc::from_raw(Rc::into_raw(boxed_slice) as *mut [T; N]) })
+ } else {
+ Err(boxed_slice)
+ }
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "shared_from_iter", since = "1.37.0")]
+impl<T> iter::FromIterator<T> for Rc<[T]> {
+ /// Takes each element in the `Iterator` and collects it into an `Rc<[T]>`.
+ ///
+ /// # Performance characteristics
+ ///
+ /// ## The general case
+ ///
+ /// In the general case, collecting into `Rc<[T]>` is done by first
+ /// collecting into a `Vec<T>`. That is, when writing the following:
+ ///
+ /// ```rust
+ /// # use std::rc::Rc;
+ /// let evens: Rc<[u8]> = (0..10).filter(|&x| x % 2 == 0).collect();
+ /// # assert_eq!(&*evens, &[0, 2, 4, 6, 8]);
+ /// ```
+ ///
+ /// this behaves as if we wrote:
+ ///
+ /// ```rust
+ /// # use std::rc::Rc;
+ /// let evens: Rc<[u8]> = (0..10).filter(|&x| x % 2 == 0)
+ /// .collect::<Vec<_>>() // The first set of allocations happens here.
+ /// .into(); // A second allocation for `Rc<[T]>` happens here.
+ /// # assert_eq!(&*evens, &[0, 2, 4, 6, 8]);
+ /// ```
+ ///
+ /// This will allocate as many times as needed for constructing the `Vec<T>`
+ /// and then it will allocate once for turning the `Vec<T>` into the `Rc<[T]>`.
+ ///
+ /// ## Iterators of known length
+ ///
+ /// When your `Iterator` implements `TrustedLen` and is of an exact size,
+ /// a single allocation will be made for the `Rc<[T]>`. For example:
+ ///
+ /// ```rust
+ /// # use std::rc::Rc;
+ /// let evens: Rc<[u8]> = (0..10).collect(); // Just a single allocation happens here.
+ /// # assert_eq!(&*evens, &*(0..10).collect::<Vec<_>>());
+ /// ```
+ fn from_iter<I: iter::IntoIterator<Item = T>>(iter: I) -> Self {
+ ToRcSlice::to_rc_slice(iter.into_iter())
+ }
+}
+
+/// Specialization trait used for collecting into `Rc<[T]>`.
+#[cfg(not(no_global_oom_handling))]
+trait ToRcSlice<T>: Iterator<Item = T> + Sized {
+ fn to_rc_slice(self) -> Rc<[T]>;
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<T, I: Iterator<Item = T>> ToRcSlice<T> for I {
+ default fn to_rc_slice(self) -> Rc<[T]> {
+ self.collect::<Vec<T>>().into()
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<T, I: iter::TrustedLen<Item = T>> ToRcSlice<T> for I {
+ fn to_rc_slice(self) -> Rc<[T]> {
+ // This is the case for a `TrustedLen` iterator.
+ let (low, high) = self.size_hint();
+ if let Some(high) = high {
+ debug_assert_eq!(
+ low,
+ high,
+ "TrustedLen iterator's size hint is not exact: {:?}",
+ (low, high)
+ );
+
+ unsafe {
+ // SAFETY: We need to ensure that the iterator has an exact length and we have.
+ Rc::from_iter_exact(self, low)
+ }
+ } else {
+ // TrustedLen contract guarantees that `upper_bound == `None` implies an iterator
+ // length exceeding `usize::MAX`.
+ // The default implementation would collect into a vec which would panic.
+ // Thus we panic here immediately without invoking `Vec` code.
+ panic!("capacity overflow");
+ }
+ }
+}
+
+/// `Weak` is a version of [`Rc`] that holds a non-owning reference to the
+/// managed allocation. The allocation is accessed by calling [`upgrade`] on the `Weak`
+/// pointer, which returns an [`Option`]`<`[`Rc`]`<T>>`.
+///
+/// Since a `Weak` reference does not count towards ownership, it will not
+/// prevent the value stored in the allocation from being dropped, and `Weak` itself makes no
+/// guarantees about the value still being present. Thus it may return [`None`]
+/// when [`upgrade`]d. Note however that a `Weak` reference *does* prevent the allocation
+/// itself (the backing store) from being deallocated.
+///
+/// A `Weak` pointer is useful for keeping a temporary reference to the allocation
+/// managed by [`Rc`] without preventing its inner value from being dropped. It is also used to
+/// prevent circular references between [`Rc`] pointers, since mutual owning references
+/// would never allow either [`Rc`] to be dropped. For example, a tree could
+/// have strong [`Rc`] pointers from parent nodes to children, and `Weak`
+/// pointers from children back to their parents.
+///
+/// The typical way to obtain a `Weak` pointer is to call [`Rc::downgrade`].
+///
+/// [`upgrade`]: Weak::upgrade
+#[stable(feature = "rc_weak", since = "1.4.0")]
+pub struct Weak<T: ?Sized> {
+ // This is a `NonNull` to allow optimizing the size of this type in enums,
+ // but it is not necessarily a valid pointer.
+ // `Weak::new` sets this to `usize::MAX` so that it doesn’t need
+ // to allocate space on the heap. That's not a value a real pointer
+ // will ever have because RcBox has alignment at least 2.
+ // This is only possible when `T: Sized`; unsized `T` never dangle.
+ ptr: NonNull<RcBox<T>>,
+}
+
+#[stable(feature = "rc_weak", since = "1.4.0")]
+impl<T: ?Sized> !marker::Send for Weak<T> {}
+#[stable(feature = "rc_weak", since = "1.4.0")]
+impl<T: ?Sized> !marker::Sync for Weak<T> {}
+
+#[unstable(feature = "coerce_unsized", issue = "27732")]
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Weak<U>> for Weak<T> {}
+
+#[unstable(feature = "dispatch_from_dyn", issue = "none")]
+impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Weak<U>> for Weak<T> {}
+
+impl<T> Weak<T> {
+ /// Constructs a new `Weak<T>`, without allocating any memory.
+ /// Calling [`upgrade`] on the return value always gives [`None`].
+ ///
+ /// [`upgrade`]: Weak::upgrade
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Weak;
+ ///
+ /// let empty: Weak<i64> = Weak::new();
+ /// assert!(empty.upgrade().is_none());
+ /// ```
+ #[stable(feature = "downgraded_weak", since = "1.10.0")]
+ pub fn new() -> Weak<T> {
+ Weak { ptr: NonNull::new(usize::MAX as *mut RcBox<T>).expect("MAX is not 0") }
+ }
+}
+
+pub(crate) fn is_dangling<T: ?Sized>(ptr: *mut T) -> bool {
+ let address = ptr as *mut () as usize;
+ address == usize::MAX
+}
+
+/// Helper type to allow accessing the reference counts without
+/// making any assertions about the data field.
+struct WeakInner<'a> {
+ weak: &'a Cell<usize>,
+ strong: &'a Cell<usize>,
+}
+
+impl<T: ?Sized> Weak<T> {
+ /// Returns a raw pointer to the object `T` pointed to by this `Weak<T>`.
+ ///
+ /// The pointer is valid only if there are some strong references. The pointer may be dangling,
+ /// unaligned or even [`null`] otherwise.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ /// use std::ptr;
+ ///
+ /// let strong = Rc::new("hello".to_owned());
+ /// let weak = Rc::downgrade(&strong);
+ /// // Both point to the same object
+ /// assert!(ptr::eq(&*strong, weak.as_ptr()));
+ /// // The strong here keeps it alive, so we can still access the object.
+ /// assert_eq!("hello", unsafe { &*weak.as_ptr() });
+ ///
+ /// drop(strong);
+ /// // But not any more. We can do weak.as_ptr(), but accessing the pointer would lead to
+ /// // undefined behaviour.
+ /// // assert_eq!("hello", unsafe { &*weak.as_ptr() });
+ /// ```
+ ///
+ /// [`null`]: core::ptr::null
+ #[stable(feature = "rc_as_ptr", since = "1.45.0")]
+ pub fn as_ptr(&self) -> *const T {
+ let ptr: *mut RcBox<T> = NonNull::as_ptr(self.ptr);
+
+ if is_dangling(ptr) {
+ // If the pointer is dangling, we return the sentinel directly. This cannot be
+ // a valid payload address, as the payload is at least as aligned as RcBox (usize).
+ ptr as *const T
+ } else {
+ // SAFETY: if is_dangling returns false, then the pointer is dereferencable.
+ // The payload may be dropped at this point, and we have to maintain provenance,
+ // so use raw pointer manipulation.
+ unsafe { ptr::addr_of_mut!((*ptr).value) }
+ }
+ }
+
+ /// Consumes the `Weak<T>` and turns it into a raw pointer.
+ ///
+ /// This converts the weak pointer into a raw pointer, while still preserving the ownership of
+ /// one weak reference (the weak count is not modified by this operation). It can be turned
+ /// back into the `Weak<T>` with [`from_raw`].
+ ///
+ /// The same restrictions of accessing the target of the pointer as with
+ /// [`as_ptr`] apply.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::{Rc, Weak};
+ ///
+ /// let strong = Rc::new("hello".to_owned());
+ /// let weak = Rc::downgrade(&strong);
+ /// let raw = weak.into_raw();
+ ///
+ /// assert_eq!(1, Rc::weak_count(&strong));
+ /// assert_eq!("hello", unsafe { &*raw });
+ ///
+ /// drop(unsafe { Weak::from_raw(raw) });
+ /// assert_eq!(0, Rc::weak_count(&strong));
+ /// ```
+ ///
+ /// [`from_raw`]: Weak::from_raw
+ /// [`as_ptr`]: Weak::as_ptr
+ #[stable(feature = "weak_into_raw", since = "1.45.0")]
+ pub fn into_raw(self) -> *const T {
+ let result = self.as_ptr();
+ mem::forget(self);
+ result
+ }
+
+ /// Converts a raw pointer previously created by [`into_raw`] back into `Weak<T>`.
+ ///
+ /// This can be used to safely get a strong reference (by calling [`upgrade`]
+ /// later) or to deallocate the weak count by dropping the `Weak<T>`.
+ ///
+ /// It takes ownership of one weak reference (with the exception of pointers created by [`new`],
+ /// as these don't own anything; the method still works on them).
+ ///
+ /// # Safety
+ ///
+ /// The pointer must have originated from the [`into_raw`] and must still own its potential
+ /// weak reference.
+ ///
+ /// It is allowed for the strong count to be 0 at the time of calling this. Nevertheless, this
+ /// takes ownership of one weak reference currently represented as a raw pointer (the weak
+ /// count is not modified by this operation) and therefore it must be paired with a previous
+ /// call to [`into_raw`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::{Rc, Weak};
+ ///
+ /// let strong = Rc::new("hello".to_owned());
+ ///
+ /// let raw_1 = Rc::downgrade(&strong).into_raw();
+ /// let raw_2 = Rc::downgrade(&strong).into_raw();
+ ///
+ /// assert_eq!(2, Rc::weak_count(&strong));
+ ///
+ /// assert_eq!("hello", &*unsafe { Weak::from_raw(raw_1) }.upgrade().unwrap());
+ /// assert_eq!(1, Rc::weak_count(&strong));
+ ///
+ /// drop(strong);
+ ///
+ /// // Decrement the last weak count.
+ /// assert!(unsafe { Weak::from_raw(raw_2) }.upgrade().is_none());
+ /// ```
+ ///
+ /// [`into_raw`]: Weak::into_raw
+ /// [`upgrade`]: Weak::upgrade
+ /// [`new`]: Weak::new
+ #[stable(feature = "weak_into_raw", since = "1.45.0")]
+ pub unsafe fn from_raw(ptr: *const T) -> Self {
+ // See Weak::as_ptr for context on how the input pointer is derived.
+
+ let ptr = if is_dangling(ptr as *mut T) {
+ // This is a dangling Weak.
+ ptr as *mut RcBox<T>
+ } else {
+ // Otherwise, we're guaranteed the pointer came from a nondangling Weak.
+ // SAFETY: data_offset is safe to call, as ptr references a real (potentially dropped) T.
+ let offset = unsafe { data_offset(ptr) };
+ // Thus, we reverse the offset to get the whole RcBox.
+ // SAFETY: the pointer originated from a Weak, so this offset is safe.
+ unsafe { (ptr as *mut RcBox<T>).set_ptr_value((ptr as *mut u8).offset(-offset)) }
+ };
+
+ // SAFETY: we now have recovered the original Weak pointer, so can create the Weak.
+ Weak { ptr: unsafe { NonNull::new_unchecked(ptr) } }
+ }
+
+ /// Attempts to upgrade the `Weak` pointer to an [`Rc`], delaying
+ /// dropping of the inner value if successful.
+ ///
+ /// Returns [`None`] if the inner value has since been dropped.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ ///
+ /// let five = Rc::new(5);
+ ///
+ /// let weak_five = Rc::downgrade(&five);
+ ///
+ /// let strong_five: Option<Rc<_>> = weak_five.upgrade();
+ /// assert!(strong_five.is_some());
+ ///
+ /// // Destroy all strong pointers.
+ /// drop(strong_five);
+ /// drop(five);
+ ///
+ /// assert!(weak_five.upgrade().is_none());
+ /// ```
+ #[stable(feature = "rc_weak", since = "1.4.0")]
+ pub fn upgrade(&self) -> Option<Rc<T>> {
+ let inner = self.inner()?;
+ if inner.strong() == 0 {
+ None
+ } else {
+ inner.inc_strong();
+ Some(Rc::from_inner(self.ptr))
+ }
+ }
+
+ /// Gets the number of strong (`Rc`) pointers pointing to this allocation.
+ ///
+ /// If `self` was created using [`Weak::new`], this will return 0.
+ #[stable(feature = "weak_counts", since = "1.41.0")]
+ pub fn strong_count(&self) -> usize {
+ if let Some(inner) = self.inner() { inner.strong() } else { 0 }
+ }
+
+ /// Gets the number of `Weak` pointers pointing to this allocation.
+ ///
+ /// If no strong pointers remain, this will return zero.
+ #[stable(feature = "weak_counts", since = "1.41.0")]
+ pub fn weak_count(&self) -> usize {
+ self.inner()
+ .map(|inner| {
+ if inner.strong() > 0 {
+ inner.weak() - 1 // subtract the implicit weak ptr
+ } else {
+ 0
+ }
+ })
+ .unwrap_or(0)
+ }
+
+ /// Returns `None` when the pointer is dangling and there is no allocated `RcBox`,
+ /// (i.e., when this `Weak` was created by `Weak::new`).
+ #[inline]
+ fn inner(&self) -> Option<WeakInner<'_>> {
+ if is_dangling(self.ptr.as_ptr()) {
+ None
+ } else {
+ // We are careful to *not* create a reference covering the "data" field, as
+ // the field may be mutated concurrently (for example, if the last `Rc`
+ // is dropped, the data field will be dropped in-place).
+ Some(unsafe {
+ let ptr = self.ptr.as_ptr();
+ WeakInner { strong: &(*ptr).strong, weak: &(*ptr).weak }
+ })
+ }
+ }
+
+ /// Returns `true` if the two `Weak`s point to the same allocation (similar to
+ /// [`ptr::eq`]), or if both don't point to any allocation
+ /// (because they were created with `Weak::new()`).
+ ///
+ /// # Notes
+ ///
+ /// Since this compares pointers it means that `Weak::new()` will equal each
+ /// other, even though they don't point to any allocation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ ///
+ /// let first_rc = Rc::new(5);
+ /// let first = Rc::downgrade(&first_rc);
+ /// let second = Rc::downgrade(&first_rc);
+ ///
+ /// assert!(first.ptr_eq(&second));
+ ///
+ /// let third_rc = Rc::new(5);
+ /// let third = Rc::downgrade(&third_rc);
+ ///
+ /// assert!(!first.ptr_eq(&third));
+ /// ```
+ ///
+ /// Comparing `Weak::new`.
+ ///
+ /// ```
+ /// use std::rc::{Rc, Weak};
+ ///
+ /// let first = Weak::new();
+ /// let second = Weak::new();
+ /// assert!(first.ptr_eq(&second));
+ ///
+ /// let third_rc = Rc::new(());
+ /// let third = Rc::downgrade(&third_rc);
+ /// assert!(!first.ptr_eq(&third));
+ /// ```
+ ///
+ /// [`ptr::eq`]: core::ptr::eq
+ #[inline]
+ #[stable(feature = "weak_ptr_eq", since = "1.39.0")]
+ pub fn ptr_eq(&self, other: &Self) -> bool {
+ self.ptr.as_ptr() == other.ptr.as_ptr()
+ }
+}
+
+#[stable(feature = "rc_weak", since = "1.4.0")]
+unsafe impl<#[may_dangle] T: ?Sized> Drop for Weak<T> {
+ /// Drops the `Weak` pointer.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::{Rc, Weak};
+ ///
+ /// struct Foo;
+ ///
+ /// impl Drop for Foo {
+ /// fn drop(&mut self) {
+ /// println!("dropped!");
+ /// }
+ /// }
+ ///
+ /// let foo = Rc::new(Foo);
+ /// let weak_foo = Rc::downgrade(&foo);
+ /// let other_weak_foo = Weak::clone(&weak_foo);
+ ///
+ /// drop(weak_foo); // Doesn't print anything
+ /// drop(foo); // Prints "dropped!"
+ ///
+ /// assert!(other_weak_foo.upgrade().is_none());
+ /// ```
+ fn drop(&mut self) {
+ let inner = if let Some(inner) = self.inner() { inner } else { return };
+
+ inner.dec_weak();
+ // the weak count starts at 1, and will only go to zero if all
+ // the strong pointers have disappeared.
+ if inner.weak() == 0 {
+ unsafe {
+ Global.deallocate(self.ptr.cast(), Layout::for_value_raw(self.ptr.as_ptr()));
+ }
+ }
+ }
+}
+
+#[stable(feature = "rc_weak", since = "1.4.0")]
+impl<T: ?Sized> Clone for Weak<T> {
+ /// Makes a clone of the `Weak` pointer that points to the same allocation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::{Rc, Weak};
+ ///
+ /// let weak_five = Rc::downgrade(&Rc::new(5));
+ ///
+ /// let _ = Weak::clone(&weak_five);
+ /// ```
+ #[inline]
+ fn clone(&self) -> Weak<T> {
+ if let Some(inner) = self.inner() {
+ inner.inc_weak()
+ }
+ Weak { ptr: self.ptr }
+ }
+}
+
+#[stable(feature = "rc_weak", since = "1.4.0")]
+impl<T: ?Sized + fmt::Debug> fmt::Debug for Weak<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "(Weak)")
+ }
+}
+
+#[stable(feature = "downgraded_weak", since = "1.10.0")]
+impl<T> Default for Weak<T> {
+ /// Constructs a new `Weak<T>`, without allocating any memory.
+ /// Calling [`upgrade`] on the return value always gives [`None`].
+ ///
+ /// [`None`]: Option
+ /// [`upgrade`]: Weak::upgrade
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Weak;
+ ///
+ /// let empty: Weak<i64> = Default::default();
+ /// assert!(empty.upgrade().is_none());
+ /// ```
+ fn default() -> Weak<T> {
+ Weak::new()
+ }
+}
+
+// NOTE: We checked_add here to deal with mem::forget safely. In particular
+// if you mem::forget Rcs (or Weaks), the ref-count can overflow, and then
+// you can free the allocation while outstanding Rcs (or Weaks) exist.
+// We abort because this is such a degenerate scenario that we don't care about
+// what happens -- no real program should ever experience this.
+//
+// This should have negligible overhead since you don't actually need to
+// clone these much in Rust thanks to ownership and move-semantics.
+
+#[doc(hidden)]
+trait RcInnerPtr {
+ fn weak_ref(&self) -> &Cell<usize>;
+ fn strong_ref(&self) -> &Cell<usize>;
+
+ #[inline]
+ fn strong(&self) -> usize {
+ self.strong_ref().get()
+ }
+
+ #[inline]
+ fn inc_strong(&self) {
+ let strong = self.strong();
+
+ // We want to abort on overflow instead of dropping the value.
+ // The reference count will never be zero when this is called;
+ // nevertheless, we insert an abort here to hint LLVM at
+ // an otherwise missed optimization.
+ if strong == 0 || strong == usize::MAX {
+ abort();
+ }
+ self.strong_ref().set(strong + 1);
+ }
+
+ #[inline]
+ fn dec_strong(&self) {
+ self.strong_ref().set(self.strong() - 1);
+ }
+
+ #[inline]
+ fn weak(&self) -> usize {
+ self.weak_ref().get()
+ }
+
+ #[inline]
+ fn inc_weak(&self) {
+ let weak = self.weak();
+
+ // We want to abort on overflow instead of dropping the value.
+ // The reference count will never be zero when this is called;
+ // nevertheless, we insert an abort here to hint LLVM at
+ // an otherwise missed optimization.
+ if weak == 0 || weak == usize::MAX {
+ abort();
+ }
+ self.weak_ref().set(weak + 1);
+ }
+
+ #[inline]
+ fn dec_weak(&self) {
+ self.weak_ref().set(self.weak() - 1);
+ }
+}
+
+impl<T: ?Sized> RcInnerPtr for RcBox<T> {
+ #[inline(always)]
+ fn weak_ref(&self) -> &Cell<usize> {
+ &self.weak
+ }
+
+ #[inline(always)]
+ fn strong_ref(&self) -> &Cell<usize> {
+ &self.strong
+ }
+}
+
+impl<'a> RcInnerPtr for WeakInner<'a> {
+ #[inline(always)]
+ fn weak_ref(&self) -> &Cell<usize> {
+ self.weak
+ }
+
+ #[inline(always)]
+ fn strong_ref(&self) -> &Cell<usize> {
+ self.strong
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> borrow::Borrow<T> for Rc<T> {
+ fn borrow(&self) -> &T {
+ &**self
+ }
+}
+
+#[stable(since = "1.5.0", feature = "smart_ptr_as_ref")]
+impl<T: ?Sized> AsRef<T> for Rc<T> {
+ fn as_ref(&self) -> &T {
+ &**self
+ }
+}
+
+#[stable(feature = "pin", since = "1.33.0")]
+impl<T: ?Sized> Unpin for Rc<T> {}
+
+/// Get the offset within an `RcBox` for the payload behind a pointer.
+///
+/// # Safety
+///
+/// The pointer must point to (and have valid metadata for) a previously
+/// valid instance of T, but the T is allowed to be dropped.
+unsafe fn data_offset<T: ?Sized>(ptr: *const T) -> isize {
+ // Align the unsized value to the end of the RcBox.
+ // Because RcBox is repr(C), it will always be the last field in memory.
+ // SAFETY: since the only unsized types possible are slices, trait objects,
+ // and extern types, the input safety requirement is currently enough to
+ // satisfy the requirements of align_of_val_raw; this is an implementation
+ // detail of the language that may not be relied upon outside of std.
+ unsafe { data_offset_align(align_of_val_raw(ptr)) }
+}
+
+#[inline]
+fn data_offset_align(align: usize) -> isize {
+ let layout = Layout::new::<RcBox<()>>();
+ (layout.size() + layout.padding_needed_for(align)) as isize
+}
diff --git a/rust/alloc/slice.rs b/rust/alloc/slice.rs
new file mode 100644
index 000000000000..455d1be60c13
--- /dev/null
+++ b/rust/alloc/slice.rs
@@ -0,0 +1,1271 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+//! A dynamically-sized view into a contiguous sequence, `[T]`.
+//!
+//! *[See also the slice primitive type](slice).*
+//!
+//! Slices are a view into a block of memory represented as a pointer and a
+//! length.
+//!
+//! ```
+//! // slicing a Vec
+//! let vec = vec![1, 2, 3];
+//! let int_slice = &vec[..];
+//! // coercing an array to a slice
+//! let str_slice: &[&str] = &["one", "two", "three"];
+//! ```
+//!
+//! Slices are either mutable or shared. The shared slice type is `&[T]`,
+//! while the mutable slice type is `&mut [T]`, where `T` represents the element
+//! type. For example, you can mutate the block of memory that a mutable slice
+//! points to:
+//!
+//! ```
+//! let x = &mut [1, 2, 3];
+//! x[1] = 7;
+//! assert_eq!(x, &[1, 7, 3]);
+//! ```
+//!
+//! Here are some of the things this module contains:
+//!
+//! ## Structs
+//!
+//! There are several structs that are useful for slices, such as [`Iter`], which
+//! represents iteration over a slice.
+//!
+//! ## Trait Implementations
+//!
+//! There are several implementations of common traits for slices. Some examples
+//! include:
+//!
+//! * [`Clone`]
+//! * [`Eq`], [`Ord`] - for slices whose element type are [`Eq`] or [`Ord`].
+//! * [`Hash`] - for slices whose element type is [`Hash`].
+//!
+//! ## Iteration
+//!
+//! The slices implement `IntoIterator`. The iterator yields references to the
+//! slice elements.
+//!
+//! ```
+//! let numbers = &[0, 1, 2];
+//! for n in numbers {
+//! println!("{} is a number!", n);
+//! }
+//! ```
+//!
+//! The mutable slice yields mutable references to the elements:
+//!
+//! ```
+//! let mut scores = [7, 8, 9];
+//! for score in &mut scores[..] {
+//! *score += 1;
+//! }
+//! ```
+//!
+//! This iterator yields mutable references to the slice's elements, so while
+//! the element type of the slice is `i32`, the element type of the iterator is
+//! `&mut i32`.
+//!
+//! * [`.iter`] and [`.iter_mut`] are the explicit methods to return the default
+//! iterators.
+//! * Further methods that return iterators are [`.split`], [`.splitn`],
+//! [`.chunks`], [`.windows`] and more.
+//!
+//! [`Hash`]: core::hash::Hash
+//! [`.iter`]: slice::iter
+//! [`.iter_mut`]: slice::iter_mut
+//! [`.split`]: slice::split
+//! [`.splitn`]: slice::splitn
+//! [`.chunks`]: slice::chunks
+//! [`.windows`]: slice::windows
+#![stable(feature = "rust1", since = "1.0.0")]
+// Many of the usings in this module are only used in the test configuration.
+// It's cleaner to just turn off the unused_imports warning than to fix them.
+#![cfg_attr(test, allow(unused_imports, dead_code))]
+
+use core::borrow::{Borrow, BorrowMut};
+#[cfg(not(no_global_oom_handling))]
+use core::cmp::Ordering::{self, Less};
+#[cfg(not(no_global_oom_handling))]
+use core::mem;
+#[cfg(not(no_global_oom_handling))]
+use core::mem::size_of;
+#[cfg(not(no_global_oom_handling))]
+use core::ptr;
+
+use crate::alloc::Allocator;
+use crate::alloc::Global;
+#[cfg(not(no_global_oom_handling))]
+use crate::borrow::ToOwned;
+use crate::boxed::Box;
+use crate::collections::TryReserveError;
+use crate::vec::Vec;
+
+#[unstable(feature = "slice_range", issue = "76393")]
+pub use core::slice::range;
+#[unstable(feature = "array_chunks", issue = "74985")]
+pub use core::slice::ArrayChunks;
+#[unstable(feature = "array_chunks", issue = "74985")]
+pub use core::slice::ArrayChunksMut;
+#[unstable(feature = "array_windows", issue = "75027")]
+pub use core::slice::ArrayWindows;
+#[stable(feature = "slice_get_slice", since = "1.28.0")]
+pub use core::slice::SliceIndex;
+#[stable(feature = "from_ref", since = "1.28.0")]
+pub use core::slice::{from_mut, from_ref};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::slice::{from_raw_parts, from_raw_parts_mut};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::slice::{Chunks, Windows};
+#[stable(feature = "chunks_exact", since = "1.31.0")]
+pub use core::slice::{ChunksExact, ChunksExactMut};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::slice::{ChunksMut, Split, SplitMut};
+#[unstable(feature = "slice_group_by", issue = "80552")]
+pub use core::slice::{GroupBy, GroupByMut};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::slice::{Iter, IterMut};
+#[stable(feature = "rchunks", since = "1.31.0")]
+pub use core::slice::{RChunks, RChunksExact, RChunksExactMut, RChunksMut};
+#[stable(feature = "slice_rsplit", since = "1.27.0")]
+pub use core::slice::{RSplit, RSplitMut};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::slice::{RSplitN, RSplitNMut, SplitN, SplitNMut};
+
+////////////////////////////////////////////////////////////////////////////////
+// Basic slice extension methods
+////////////////////////////////////////////////////////////////////////////////
+
+// HACK(japaric) needed for the implementation of `vec!` macro during testing
+// N.B., see the `hack` module in this file for more details.
+#[cfg(test)]
+pub use hack::into_vec;
+
+// HACK(japaric) needed for the implementation of `Vec::clone` during testing
+// N.B., see the `hack` module in this file for more details.
+#[cfg(test)]
+pub use hack::to_vec;
+
+// HACK(japaric): With cfg(test) `impl [T]` is not available, these three
+// functions are actually methods that are in `impl [T]` but not in
+// `core::slice::SliceExt` - we need to supply these functions for the
+// `test_permutations` test
+mod hack {
+ use core::alloc::Allocator;
+
+ use crate::boxed::Box;
+ use crate::collections::TryReserveError;
+ use crate::vec::Vec;
+
+ // We shouldn't add inline attribute to this since this is used in
+ // `vec!` macro mostly and causes perf regression. See #71204 for
+ // discussion and perf results.
+ pub fn into_vec<T, A: Allocator>(b: Box<[T], A>) -> Vec<T, A> {
+ unsafe {
+ let len = b.len();
+ let (b, alloc) = Box::into_raw_with_allocator(b);
+ Vec::from_raw_parts_in(b as *mut T, len, len, alloc)
+ }
+ }
+
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ pub fn to_vec<T: ConvertVec, A: Allocator>(s: &[T], alloc: A) -> Vec<T, A> {
+ T::to_vec(s, alloc)
+ }
+
+ #[inline]
+ pub fn try_to_vec<T: TryConvertVec, A: Allocator>(s: &[T], alloc: A) -> Result<Vec<T, A>, TryReserveError> {
+ T::try_to_vec(s, alloc)
+ }
+
+ #[cfg(not(no_global_oom_handling))]
+ pub trait ConvertVec {
+ fn to_vec<A: Allocator>(s: &[Self], alloc: A) -> Vec<Self, A>
+ where
+ Self: Sized;
+ }
+
+ pub trait TryConvertVec {
+ fn try_to_vec<A: Allocator>(s: &[Self], alloc: A) -> Result<Vec<Self, A>, TryReserveError>
+ where
+ Self: Sized;
+ }
+
+ #[cfg(not(no_global_oom_handling))]
+ impl<T: Clone> ConvertVec for T {
+ #[inline]
+ default fn to_vec<A: Allocator>(s: &[Self], alloc: A) -> Vec<Self, A> {
+ struct DropGuard<'a, T, A: Allocator> {
+ vec: &'a mut Vec<T, A>,
+ num_init: usize,
+ }
+ impl<'a, T, A: Allocator> Drop for DropGuard<'a, T, A> {
+ #[inline]
+ fn drop(&mut self) {
+ // SAFETY:
+ // items were marked initialized in the loop below
+ unsafe {
+ self.vec.set_len(self.num_init);
+ }
+ }
+ }
+ let mut vec = Vec::with_capacity_in(s.len(), alloc);
+ let mut guard = DropGuard { vec: &mut vec, num_init: 0 };
+ let slots = guard.vec.spare_capacity_mut();
+ // .take(slots.len()) is necessary for LLVM to remove bounds checks
+ // and has better codegen than zip.
+ for (i, b) in s.iter().enumerate().take(slots.len()) {
+ guard.num_init = i;
+ slots[i].write(b.clone());
+ }
+ core::mem::forget(guard);
+ // SAFETY:
+ // the vec was allocated and initialized above to at least this length.
+ unsafe {
+ vec.set_len(s.len());
+ }
+ vec
+ }
+ }
+
+ #[cfg(not(no_global_oom_handling))]
+ impl<T: Copy> ConvertVec for T {
+ #[inline]
+ fn to_vec<A: Allocator>(s: &[Self], alloc: A) -> Vec<Self, A> {
+ let mut v = Vec::with_capacity_in(s.len(), alloc);
+ // SAFETY:
+ // allocated above with the capacity of `s`, and initialize to `s.len()` in
+ // ptr::copy_to_non_overlapping below.
+ unsafe {
+ s.as_ptr().copy_to_nonoverlapping(v.as_mut_ptr(), s.len());
+ v.set_len(s.len());
+ }
+ v
+ }
+ }
+
+ impl<T: Clone> TryConvertVec for T {
+ #[inline]
+ default fn try_to_vec<A: Allocator>(s: &[Self], alloc: A) -> Result<Vec<Self, A>, TryReserveError> {
+ struct DropGuard<'a, T, A: Allocator> {
+ vec: &'a mut Vec<T, A>,
+ num_init: usize,
+ }
+ impl<'a, T, A: Allocator> Drop for DropGuard<'a, T, A> {
+ #[inline]
+ fn drop(&mut self) {
+ // SAFETY:
+ // items were marked initialized in the loop below
+ unsafe {
+ self.vec.set_len(self.num_init);
+ }
+ }
+ }
+ let mut vec = Vec::try_with_capacity_in(s.len(), alloc)?;
+ let mut guard = DropGuard { vec: &mut vec, num_init: 0 };
+ let slots = guard.vec.spare_capacity_mut();
+ // .take(slots.len()) is necessary for LLVM to remove bounds checks
+ // and has better codegen than zip.
+ for (i, b) in s.iter().enumerate().take(slots.len()) {
+ guard.num_init = i;
+ slots[i].write(b.clone());
+ }
+ core::mem::forget(guard);
+ // SAFETY:
+ // the vec was allocated and initialized above to at least this length.
+ unsafe {
+ vec.set_len(s.len());
+ }
+ Ok(vec)
+ }
+ }
+}
+
+#[lang = "slice_alloc"]
+#[cfg(not(test))]
+impl<T> [T] {
+ /// Sorts the slice.
+ ///
+ /// This sort is stable (i.e., does not reorder equal elements) and *O*(*n* \* log(*n*)) worst-case.
+ ///
+ /// When applicable, unstable sorting is preferred because it is generally faster than stable
+ /// sorting and it doesn't allocate auxiliary memory.
+ /// See [`sort_unstable`](slice::sort_unstable).
+ ///
+ /// # Current implementation
+ ///
+ /// The current algorithm is an adaptive, iterative merge sort inspired by
+ /// [timsort](https://en.wikipedia.org/wiki/Timsort).
+ /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
+ /// two or more sorted sequences concatenated one after another.
+ ///
+ /// Also, it allocates temporary storage half the size of `self`, but for short slices a
+ /// non-allocating insertion sort is used instead.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = [-5, 4, 1, -3, 2];
+ ///
+ /// v.sort();
+ /// assert!(v == [-5, -3, 1, 2, 4]);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn sort(&mut self)
+ where
+ T: Ord,
+ {
+ merge_sort(self, |a, b| a.lt(b));
+ }
+
+ /// Sorts the slice with a comparator function.
+ ///
+ /// This sort is stable (i.e., does not reorder equal elements) and *O*(*n* \* log(*n*)) worst-case.
+ ///
+ /// The comparator function must define a total ordering for the elements in the slice. If
+ /// the ordering is not total, the order of the elements is unspecified. An order is a
+ /// total order if it is (for all `a`, `b` and `c`):
+ ///
+ /// * total and antisymmetric: exactly one of `a < b`, `a == b` or `a > b` is true, and
+ /// * transitive, `a < b` and `b < c` implies `a < c`. The same must hold for both `==` and `>`.
+ ///
+ /// For example, while [`f64`] doesn't implement [`Ord`] because `NaN != NaN`, we can use
+ /// `partial_cmp` as our sort function when we know the slice doesn't contain a `NaN`.
+ ///
+ /// ```
+ /// let mut floats = [5f64, 4.0, 1.0, 3.0, 2.0];
+ /// floats.sort_by(|a, b| a.partial_cmp(b).unwrap());
+ /// assert_eq!(floats, [1.0, 2.0, 3.0, 4.0, 5.0]);
+ /// ```
+ ///
+ /// When applicable, unstable sorting is preferred because it is generally faster than stable
+ /// sorting and it doesn't allocate auxiliary memory.
+ /// See [`sort_unstable_by`](slice::sort_unstable_by).
+ ///
+ /// # Current implementation
+ ///
+ /// The current algorithm is an adaptive, iterative merge sort inspired by
+ /// [timsort](https://en.wikipedia.org/wiki/Timsort).
+ /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
+ /// two or more sorted sequences concatenated one after another.
+ ///
+ /// Also, it allocates temporary storage half the size of `self`, but for short slices a
+ /// non-allocating insertion sort is used instead.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = [5, 4, 1, 3, 2];
+ /// v.sort_by(|a, b| a.cmp(b));
+ /// assert!(v == [1, 2, 3, 4, 5]);
+ ///
+ /// // reverse sorting
+ /// v.sort_by(|a, b| b.cmp(a));
+ /// assert!(v == [5, 4, 3, 2, 1]);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn sort_by<F>(&mut self, mut compare: F)
+ where
+ F: FnMut(&T, &T) -> Ordering,
+ {
+ merge_sort(self, |a, b| compare(a, b) == Less);
+ }
+
+ /// Sorts the slice with a key extraction function.
+ ///
+ /// This sort is stable (i.e., does not reorder equal elements) and *O*(*m* \* *n* \* log(*n*))
+ /// worst-case, where the key function is *O*(*m*).
+ ///
+ /// For expensive key functions (e.g. functions that are not simple property accesses or
+ /// basic operations), [`sort_by_cached_key`](slice::sort_by_cached_key) is likely to be
+ /// significantly faster, as it does not recompute element keys.
+ ///
+ /// When applicable, unstable sorting is preferred because it is generally faster than stable
+ /// sorting and it doesn't allocate auxiliary memory.
+ /// See [`sort_unstable_by_key`](slice::sort_unstable_by_key).
+ ///
+ /// # Current implementation
+ ///
+ /// The current algorithm is an adaptive, iterative merge sort inspired by
+ /// [timsort](https://en.wikipedia.org/wiki/Timsort).
+ /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
+ /// two or more sorted sequences concatenated one after another.
+ ///
+ /// Also, it allocates temporary storage half the size of `self`, but for short slices a
+ /// non-allocating insertion sort is used instead.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = [-5i32, 4, 1, -3, 2];
+ ///
+ /// v.sort_by_key(|k| k.abs());
+ /// assert!(v == [1, 2, -3, 4, -5]);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "slice_sort_by_key", since = "1.7.0")]
+ #[inline]
+ pub fn sort_by_key<K, F>(&mut self, mut f: F)
+ where
+ F: FnMut(&T) -> K,
+ K: Ord,
+ {
+ merge_sort(self, |a, b| f(a).lt(&f(b)));
+ }
+
+ /// Sorts the slice with a key extraction function.
+ ///
+ /// During sorting, the key function is called only once per element.
+ ///
+ /// This sort is stable (i.e., does not reorder equal elements) and *O*(*m* \* *n* + *n* \* log(*n*))
+ /// worst-case, where the key function is *O*(*m*).
+ ///
+ /// For simple key functions (e.g., functions that are property accesses or
+ /// basic operations), [`sort_by_key`](slice::sort_by_key) is likely to be
+ /// faster.
+ ///
+ /// # Current implementation
+ ///
+ /// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
+ /// which combines the fast average case of randomized quicksort with the fast worst case of
+ /// heapsort, while achieving linear time on slices with certain patterns. It uses some
+ /// randomization to avoid degenerate cases, but with a fixed seed to always provide
+ /// deterministic behavior.
+ ///
+ /// In the worst case, the algorithm allocates temporary storage in a `Vec<(K, usize)>` the
+ /// length of the slice.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = [-5i32, 4, 32, -3, 2];
+ ///
+ /// v.sort_by_cached_key(|k| k.to_string());
+ /// assert!(v == [-3, -5, 2, 32, 4]);
+ /// ```
+ ///
+ /// [pdqsort]: https://github.com/orlp/pdqsort
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "slice_sort_by_cached_key", since = "1.34.0")]
+ #[inline]
+ pub fn sort_by_cached_key<K, F>(&mut self, f: F)
+ where
+ F: FnMut(&T) -> K,
+ K: Ord,
+ {
+ // Helper macro for indexing our vector by the smallest possible type, to reduce allocation.
+ macro_rules! sort_by_key {
+ ($t:ty, $slice:ident, $f:ident) => {{
+ let mut indices: Vec<_> =
+ $slice.iter().map($f).enumerate().map(|(i, k)| (k, i as $t)).collect();
+ // The elements of `indices` are unique, as they are indexed, so any sort will be
+ // stable with respect to the original slice. We use `sort_unstable` here because
+ // it requires less memory allocation.
+ indices.sort_unstable();
+ for i in 0..$slice.len() {
+ let mut index = indices[i].1;
+ while (index as usize) < i {
+ index = indices[index as usize].1;
+ }
+ indices[i].1 = index;
+ $slice.swap(i, index as usize);
+ }
+ }};
+ }
+
+ let sz_u8 = mem::size_of::<(K, u8)>();
+ let sz_u16 = mem::size_of::<(K, u16)>();
+ let sz_u32 = mem::size_of::<(K, u32)>();
+ let sz_usize = mem::size_of::<(K, usize)>();
+
+ let len = self.len();
+ if len < 2 {
+ return;
+ }
+ if sz_u8 < sz_u16 && len <= (u8::MAX as usize) {
+ return sort_by_key!(u8, self, f);
+ }
+ if sz_u16 < sz_u32 && len <= (u16::MAX as usize) {
+ return sort_by_key!(u16, self, f);
+ }
+ if sz_u32 < sz_usize && len <= (u32::MAX as usize) {
+ return sort_by_key!(u32, self, f);
+ }
+ sort_by_key!(usize, self, f)
+ }
+
+ /// Copies `self` into a new `Vec`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let s = [10, 40, 30];
+ /// let x = s.to_vec();
+ /// // Here, `s` and `x` can be modified independently.
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[rustc_conversion_suggestion]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn to_vec(&self) -> Vec<T>
+ where
+ T: Clone,
+ {
+ self.to_vec_in(Global)
+ }
+
+ /// Tries to copy `self` into a new `Vec`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let s = [10, 40, 30];
+ /// let x = s.try_to_vec().unwrap();
+ /// // Here, `s` and `x` can be modified independently.
+ /// ```
+ #[inline]
+ #[stable(feature = "kernel", since = "1.0.0")]
+ pub fn try_to_vec(&self) -> Result<Vec<T>, TryReserveError>
+ where
+ T: Clone,
+ {
+ self.try_to_vec_in(Global)
+ }
+
+ /// Copies `self` into a new `Vec` with an allocator.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// let s = [10, 40, 30];
+ /// let x = s.to_vec_in(System);
+ /// // Here, `s` and `x` can be modified independently.
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ pub fn to_vec_in<A: Allocator>(&self, alloc: A) -> Vec<T, A>
+ where
+ T: Clone,
+ {
+ // N.B., see the `hack` module in this file for more details.
+ hack::to_vec(self, alloc)
+ }
+
+ /// Tries to copy `self` into a new `Vec` with an allocator.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// let s = [10, 40, 30];
+ /// let x = s.try_to_vec_in(System).unwrap();
+ /// // Here, `s` and `x` can be modified independently.
+ /// ```
+ #[inline]
+ #[stable(feature = "kernel", since = "1.0.0")]
+ pub fn try_to_vec_in<A: Allocator>(&self, alloc: A) -> Result<Vec<T, A>, TryReserveError>
+ where
+ T: Clone,
+ {
+ // N.B., see the `hack` module in this file for more details.
+ hack::try_to_vec(self, alloc)
+ }
+
+ /// Converts `self` into a vector without clones or allocation.
+ ///
+ /// The resulting vector can be converted back into a box via
+ /// `Vec<T>`'s `into_boxed_slice` method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let s: Box<[i32]> = Box::new([10, 40, 30]);
+ /// let x = s.into_vec();
+ /// // `s` cannot be used anymore because it has been converted into `x`.
+ ///
+ /// assert_eq!(x, vec![10, 40, 30]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn into_vec<A: Allocator>(self: Box<Self, A>) -> Vec<T, A> {
+ // N.B., see the `hack` module in this file for more details.
+ hack::into_vec(self)
+ }
+
+ /// Creates a vector by repeating a slice `n` times.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if the capacity would overflow.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert_eq!([1, 2].repeat(3), vec![1, 2, 1, 2, 1, 2]);
+ /// ```
+ ///
+ /// A panic upon overflow:
+ ///
+ /// ```should_panic
+ /// // this will panic at runtime
+ /// b"0123456789abcdef".repeat(usize::MAX);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "repeat_generic_slice", since = "1.40.0")]
+ pub fn repeat(&self, n: usize) -> Vec<T>
+ where
+ T: Copy,
+ {
+ if n == 0 {
+ return Vec::new();
+ }
+
+ // If `n` is larger than zero, it can be split as
+ // `n = 2^expn + rem (2^expn > rem, expn >= 0, rem >= 0)`.
+ // `2^expn` is the number represented by the leftmost '1' bit of `n`,
+ // and `rem` is the remaining part of `n`.
+
+ // Using `Vec` to access `set_len()`.
+ let capacity = self.len().checked_mul(n).expect("capacity overflow");
+ let mut buf = Vec::with_capacity(capacity);
+
+ // `2^expn` repetition is done by doubling `buf` `expn`-times.
+ buf.extend(self);
+ {
+ let mut m = n >> 1;
+ // If `m > 0`, there are remaining bits up to the leftmost '1'.
+ while m > 0 {
+ // `buf.extend(buf)`:
+ unsafe {
+ ptr::copy_nonoverlapping(
+ buf.as_ptr(),
+ (buf.as_mut_ptr() as *mut T).add(buf.len()),
+ buf.len(),
+ );
+ // `buf` has capacity of `self.len() * n`.
+ let buf_len = buf.len();
+ buf.set_len(buf_len * 2);
+ }
+
+ m >>= 1;
+ }
+ }
+
+ // `rem` (`= n - 2^expn`) repetition is done by copying
+ // first `rem` repetitions from `buf` itself.
+ let rem_len = capacity - buf.len(); // `self.len() * rem`
+ if rem_len > 0 {
+ // `buf.extend(buf[0 .. rem_len])`:
+ unsafe {
+ // This is non-overlapping since `2^expn > rem`.
+ ptr::copy_nonoverlapping(
+ buf.as_ptr(),
+ (buf.as_mut_ptr() as *mut T).add(buf.len()),
+ rem_len,
+ );
+ // `buf.len() + rem_len` equals to `buf.capacity()` (`= self.len() * n`).
+ buf.set_len(capacity);
+ }
+ }
+ buf
+ }
+
+ /// Flattens a slice of `T` into a single value `Self::Output`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!(["hello", "world"].concat(), "helloworld");
+ /// assert_eq!([[1, 2], [3, 4]].concat(), [1, 2, 3, 4]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn concat<Item: ?Sized>(&self) -> <Self as Concat<Item>>::Output
+ where
+ Self: Concat<Item>,
+ {
+ Concat::concat(self)
+ }
+
+ /// Flattens a slice of `T` into a single value `Self::Output`, placing a
+ /// given separator between each.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!(["hello", "world"].join(" "), "hello world");
+ /// assert_eq!([[1, 2], [3, 4]].join(&0), [1, 2, 0, 3, 4]);
+ /// assert_eq!([[1, 2], [3, 4]].join(&[0, 0][..]), [1, 2, 0, 0, 3, 4]);
+ /// ```
+ #[stable(feature = "rename_connect_to_join", since = "1.3.0")]
+ pub fn join<Separator>(&self, sep: Separator) -> <Self as Join<Separator>>::Output
+ where
+ Self: Join<Separator>,
+ {
+ Join::join(self, sep)
+ }
+
+ /// Flattens a slice of `T` into a single value `Self::Output`, placing a
+ /// given separator between each.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #![allow(deprecated)]
+ /// assert_eq!(["hello", "world"].connect(" "), "hello world");
+ /// assert_eq!([[1, 2], [3, 4]].connect(&0), [1, 2, 0, 3, 4]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_deprecated(since = "1.3.0", reason = "renamed to join")]
+ pub fn connect<Separator>(&self, sep: Separator) -> <Self as Join<Separator>>::Output
+ where
+ Self: Join<Separator>,
+ {
+ Join::join(self, sep)
+ }
+}
+
+#[lang = "slice_u8_alloc"]
+#[cfg(not(test))]
+impl [u8] {
+ /// Returns a vector containing a copy of this slice where each byte
+ /// is mapped to its ASCII upper case equivalent.
+ ///
+ /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
+ /// but non-ASCII letters are unchanged.
+ ///
+ /// To uppercase the value in-place, use [`make_ascii_uppercase`].
+ ///
+ /// [`make_ascii_uppercase`]: slice::make_ascii_uppercase
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
+ #[inline]
+ pub fn to_ascii_uppercase(&self) -> Vec<u8> {
+ let mut me = self.to_vec();
+ me.make_ascii_uppercase();
+ me
+ }
+
+ /// Returns a vector containing a copy of this slice where each byte
+ /// is mapped to its ASCII lower case equivalent.
+ ///
+ /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
+ /// but non-ASCII letters are unchanged.
+ ///
+ /// To lowercase the value in-place, use [`make_ascii_lowercase`].
+ ///
+ /// [`make_ascii_lowercase`]: slice::make_ascii_lowercase
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
+ #[inline]
+ pub fn to_ascii_lowercase(&self) -> Vec<u8> {
+ let mut me = self.to_vec();
+ me.make_ascii_lowercase();
+ me
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Extension traits for slices over specific kinds of data
+////////////////////////////////////////////////////////////////////////////////
+
+/// Helper trait for [`[T]::concat`](slice::concat).
+///
+/// Note: the `Item` type parameter is not used in this trait,
+/// but it allows impls to be more generic.
+/// Without it, we get this error:
+///
+/// ```error
+/// error[E0207]: the type parameter `T` is not constrained by the impl trait, self type, or predica
+/// --> src/liballoc/slice.rs:608:6
+/// |
+/// 608 | impl<T: Clone, V: Borrow<[T]>> Concat for [V] {
+/// | ^ unconstrained type parameter
+/// ```
+///
+/// This is because there could exist `V` types with multiple `Borrow<[_]>` impls,
+/// such that multiple `T` types would apply:
+///
+/// ```
+/// # #[allow(dead_code)]
+/// pub struct Foo(Vec<u32>, Vec<String>);
+///
+/// impl std::borrow::Borrow<[u32]> for Foo {
+/// fn borrow(&self) -> &[u32] { &self.0 }
+/// }
+///
+/// impl std::borrow::Borrow<[String]> for Foo {
+/// fn borrow(&self) -> &[String] { &self.1 }
+/// }
+/// ```
+#[unstable(feature = "slice_concat_trait", issue = "27747")]
+pub trait Concat<Item: ?Sized> {
+ #[unstable(feature = "slice_concat_trait", issue = "27747")]
+ /// The resulting type after concatenation
+ type Output;
+
+ /// Implementation of [`[T]::concat`](slice::concat)
+ #[unstable(feature = "slice_concat_trait", issue = "27747")]
+ fn concat(slice: &Self) -> Self::Output;
+}
+
+/// Helper trait for [`[T]::join`](slice::join)
+#[unstable(feature = "slice_concat_trait", issue = "27747")]
+pub trait Join<Separator> {
+ #[unstable(feature = "slice_concat_trait", issue = "27747")]
+ /// The resulting type after concatenation
+ type Output;
+
+ /// Implementation of [`[T]::join`](slice::join)
+ #[unstable(feature = "slice_concat_trait", issue = "27747")]
+ fn join(slice: &Self, sep: Separator) -> Self::Output;
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[unstable(feature = "slice_concat_ext", issue = "27747")]
+impl<T: Clone, V: Borrow<[T]>> Concat<T> for [V] {
+ type Output = Vec<T>;
+
+ fn concat(slice: &Self) -> Vec<T> {
+ let size = slice.iter().map(|slice| slice.borrow().len()).sum();
+ let mut result = Vec::with_capacity(size);
+ for v in slice {
+ result.extend_from_slice(v.borrow())
+ }
+ result
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[unstable(feature = "slice_concat_ext", issue = "27747")]
+impl<T: Clone, V: Borrow<[T]>> Join<&T> for [V] {
+ type Output = Vec<T>;
+
+ fn join(slice: &Self, sep: &T) -> Vec<T> {
+ let mut iter = slice.iter();
+ let first = match iter.next() {
+ Some(first) => first,
+ None => return vec![],
+ };
+ let size = slice.iter().map(|v| v.borrow().len()).sum::<usize>() + slice.len() - 1;
+ let mut result = Vec::with_capacity(size);
+ result.extend_from_slice(first.borrow());
+
+ for v in iter {
+ result.push(sep.clone());
+ result.extend_from_slice(v.borrow())
+ }
+ result
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[unstable(feature = "slice_concat_ext", issue = "27747")]
+impl<T: Clone, V: Borrow<[T]>> Join<&[T]> for [V] {
+ type Output = Vec<T>;
+
+ fn join(slice: &Self, sep: &[T]) -> Vec<T> {
+ let mut iter = slice.iter();
+ let first = match iter.next() {
+ Some(first) => first,
+ None => return vec![],
+ };
+ let size =
+ slice.iter().map(|v| v.borrow().len()).sum::<usize>() + sep.len() * (slice.len() - 1);
+ let mut result = Vec::with_capacity(size);
+ result.extend_from_slice(first.borrow());
+
+ for v in iter {
+ result.extend_from_slice(sep);
+ result.extend_from_slice(v.borrow())
+ }
+ result
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Standard trait implementations for slices
+////////////////////////////////////////////////////////////////////////////////
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> Borrow<[T]> for Vec<T> {
+ fn borrow(&self) -> &[T] {
+ &self[..]
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> BorrowMut<[T]> for Vec<T> {
+ fn borrow_mut(&mut self) -> &mut [T] {
+ &mut self[..]
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Clone> ToOwned for [T] {
+ type Owned = Vec<T>;
+ #[cfg(not(test))]
+ fn to_owned(&self) -> Vec<T> {
+ self.to_vec()
+ }
+
+ #[cfg(test)]
+ fn to_owned(&self) -> Vec<T> {
+ hack::to_vec(self, Global)
+ }
+
+ fn clone_into(&self, target: &mut Vec<T>) {
+ // drop anything in target that will not be overwritten
+ target.truncate(self.len());
+
+ // target.len <= self.len due to the truncate above, so the
+ // slices here are always in-bounds.
+ let (init, tail) = self.split_at(target.len());
+
+ // reuse the contained values' allocations/resources.
+ target.clone_from_slice(init);
+ target.extend_from_slice(tail);
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Sorting
+////////////////////////////////////////////////////////////////////////////////
+
+/// Inserts `v[0]` into pre-sorted sequence `v[1..]` so that whole `v[..]` becomes sorted.
+///
+/// This is the integral subroutine of insertion sort.
+#[cfg(not(no_global_oom_handling))]
+fn insert_head<T, F>(v: &mut [T], is_less: &mut F)
+where
+ F: FnMut(&T, &T) -> bool,
+{
+ if v.len() >= 2 && is_less(&v[1], &v[0]) {
+ unsafe {
+ // There are three ways to implement insertion here:
+ //
+ // 1. Swap adjacent elements until the first one gets to its final destination.
+ // However, this way we copy data around more than is necessary. If elements are big
+ // structures (costly to copy), this method will be slow.
+ //
+ // 2. Iterate until the right place for the first element is found. Then shift the
+ // elements succeeding it to make room for it and finally place it into the
+ // remaining hole. This is a good method.
+ //
+ // 3. Copy the first element into a temporary variable. Iterate until the right place
+ // for it is found. As we go along, copy every traversed element into the slot
+ // preceding it. Finally, copy data from the temporary variable into the remaining
+ // hole. This method is very good. Benchmarks demonstrated slightly better
+ // performance than with the 2nd method.
+ //
+ // All methods were benchmarked, and the 3rd showed best results. So we chose that one.
+ let mut tmp = mem::ManuallyDrop::new(ptr::read(&v[0]));
+
+ // Intermediate state of the insertion process is always tracked by `hole`, which
+ // serves two purposes:
+ // 1. Protects integrity of `v` from panics in `is_less`.
+ // 2. Fills the remaining hole in `v` in the end.
+ //
+ // Panic safety:
+ //
+ // If `is_less` panics at any point during the process, `hole` will get dropped and
+ // fill the hole in `v` with `tmp`, thus ensuring that `v` still holds every object it
+ // initially held exactly once.
+ let mut hole = InsertionHole { src: &mut *tmp, dest: &mut v[1] };
+ ptr::copy_nonoverlapping(&v[1], &mut v[0], 1);
+
+ for i in 2..v.len() {
+ if !is_less(&v[i], &*tmp) {
+ break;
+ }
+ ptr::copy_nonoverlapping(&v[i], &mut v[i - 1], 1);
+ hole.dest = &mut v[i];
+ }
+ // `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`.
+ }
+ }
+
+ // When dropped, copies from `src` into `dest`.
+ struct InsertionHole<T> {
+ src: *mut T,
+ dest: *mut T,
+ }
+
+ impl<T> Drop for InsertionHole<T> {
+ fn drop(&mut self) {
+ unsafe {
+ ptr::copy_nonoverlapping(self.src, self.dest, 1);
+ }
+ }
+ }
+}
+
+/// Merges non-decreasing runs `v[..mid]` and `v[mid..]` using `buf` as temporary storage, and
+/// stores the result into `v[..]`.
+///
+/// # Safety
+///
+/// The two slices must be non-empty and `mid` must be in bounds. Buffer `buf` must be long enough
+/// to hold a copy of the shorter slice. Also, `T` must not be a zero-sized type.
+#[cfg(not(no_global_oom_handling))]
+unsafe fn merge<T, F>(v: &mut [T], mid: usize, buf: *mut T, is_less: &mut F)
+where
+ F: FnMut(&T, &T) -> bool,
+{
+ let len = v.len();
+ let v = v.as_mut_ptr();
+ let (v_mid, v_end) = unsafe { (v.add(mid), v.add(len)) };
+
+ // The merge process first copies the shorter run into `buf`. Then it traces the newly copied
+ // run and the longer run forwards (or backwards), comparing their next unconsumed elements and
+ // copying the lesser (or greater) one into `v`.
+ //
+ // As soon as the shorter run is fully consumed, the process is done. If the longer run gets
+ // consumed first, then we must copy whatever is left of the shorter run into the remaining
+ // hole in `v`.
+ //
+ // Intermediate state of the process is always tracked by `hole`, which serves two purposes:
+ // 1. Protects integrity of `v` from panics in `is_less`.
+ // 2. Fills the remaining hole in `v` if the longer run gets consumed first.
+ //
+ // Panic safety:
+ //
+ // If `is_less` panics at any point during the process, `hole` will get dropped and fill the
+ // hole in `v` with the unconsumed range in `buf`, thus ensuring that `v` still holds every
+ // object it initially held exactly once.
+ let mut hole;
+
+ if mid <= len - mid {
+ // The left run is shorter.
+ unsafe {
+ ptr::copy_nonoverlapping(v, buf, mid);
+ hole = MergeHole { start: buf, end: buf.add(mid), dest: v };
+ }
+
+ // Initially, these pointers point to the beginnings of their arrays.
+ let left = &mut hole.start;
+ let mut right = v_mid;
+ let out = &mut hole.dest;
+
+ while *left < hole.end && right < v_end {
+ // Consume the lesser side.
+ // If equal, prefer the left run to maintain stability.
+ unsafe {
+ let to_copy = if is_less(&*right, &**left) {
+ get_and_increment(&mut right)
+ } else {
+ get_and_increment(left)
+ };
+ ptr::copy_nonoverlapping(to_copy, get_and_increment(out), 1);
+ }
+ }
+ } else {
+ // The right run is shorter.
+ unsafe {
+ ptr::copy_nonoverlapping(v_mid, buf, len - mid);
+ hole = MergeHole { start: buf, end: buf.add(len - mid), dest: v_mid };
+ }
+
+ // Initially, these pointers point past the ends of their arrays.
+ let left = &mut hole.dest;
+ let right = &mut hole.end;
+ let mut out = v_end;
+
+ while v < *left && buf < *right {
+ // Consume the greater side.
+ // If equal, prefer the right run to maintain stability.
+ unsafe {
+ let to_copy = if is_less(&*right.offset(-1), &*left.offset(-1)) {
+ decrement_and_get(left)
+ } else {
+ decrement_and_get(right)
+ };
+ ptr::copy_nonoverlapping(to_copy, decrement_and_get(&mut out), 1);
+ }
+ }
+ }
+ // Finally, `hole` gets dropped. If the shorter run was not fully consumed, whatever remains of
+ // it will now be copied into the hole in `v`.
+
+ unsafe fn get_and_increment<T>(ptr: &mut *mut T) -> *mut T {
+ let old = *ptr;
+ *ptr = unsafe { ptr.offset(1) };
+ old
+ }
+
+ unsafe fn decrement_and_get<T>(ptr: &mut *mut T) -> *mut T {
+ *ptr = unsafe { ptr.offset(-1) };
+ *ptr
+ }
+
+ // When dropped, copies the range `start..end` into `dest..`.
+ struct MergeHole<T> {
+ start: *mut T,
+ end: *mut T,
+ dest: *mut T,
+ }
+
+ impl<T> Drop for MergeHole<T> {
+ fn drop(&mut self) {
+ // `T` is not a zero-sized type, so it's okay to divide by its size.
+ let len = (self.end as usize - self.start as usize) / mem::size_of::<T>();
+ unsafe {
+ ptr::copy_nonoverlapping(self.start, self.dest, len);
+ }
+ }
+ }
+}
+
+/// This merge sort borrows some (but not all) ideas from TimSort, which is described in detail
+/// [here](http://svn.python.org/projects/python/trunk/Objects/listsort.txt).
+///
+/// The algorithm identifies strictly descending and non-descending subsequences, which are called
+/// natural runs. There is a stack of pending runs yet to be merged. Each newly found run is pushed
+/// onto the stack, and then some pairs of adjacent runs are merged until these two invariants are
+/// satisfied:
+///
+/// 1. for every `i` in `1..runs.len()`: `runs[i - 1].len > runs[i].len`
+/// 2. for every `i` in `2..runs.len()`: `runs[i - 2].len > runs[i - 1].len + runs[i].len`
+///
+/// The invariants ensure that the total running time is *O*(*n* \* log(*n*)) worst-case.
+#[cfg(not(no_global_oom_handling))]
+fn merge_sort<T, F>(v: &mut [T], mut is_less: F)
+where
+ F: FnMut(&T, &T) -> bool,
+{
+ // Slices of up to this length get sorted using insertion sort.
+ const MAX_INSERTION: usize = 20;
+ // Very short runs are extended using insertion sort to span at least this many elements.
+ const MIN_RUN: usize = 10;
+
+ // Sorting has no meaningful behavior on zero-sized types.
+ if size_of::<T>() == 0 {
+ return;
+ }
+
+ let len = v.len();
+
+ // Short arrays get sorted in-place via insertion sort to avoid allocations.
+ if len <= MAX_INSERTION {
+ if len >= 2 {
+ for i in (0..len - 1).rev() {
+ insert_head(&mut v[i..], &mut is_less);
+ }
+ }
+ return;
+ }
+
+ // Allocate a buffer to use as scratch memory. We keep the length 0 so we can keep in it
+ // shallow copies of the contents of `v` without risking the dtors running on copies if
+ // `is_less` panics. When merging two sorted runs, this buffer holds a copy of the shorter run,
+ // which will always have length at most `len / 2`.
+ let mut buf = Vec::with_capacity(len / 2);
+
+ // In order to identify natural runs in `v`, we traverse it backwards. That might seem like a
+ // strange decision, but consider the fact that merges more often go in the opposite direction
+ // (forwards). According to benchmarks, merging forwards is slightly faster than merging
+ // backwards. To conclude, identifying runs by traversing backwards improves performance.
+ let mut runs = vec![];
+ let mut end = len;
+ while end > 0 {
+ // Find the next natural run, and reverse it if it's strictly descending.
+ let mut start = end - 1;
+ if start > 0 {
+ start -= 1;
+ unsafe {
+ if is_less(v.get_unchecked(start + 1), v.get_unchecked(start)) {
+ while start > 0 && is_less(v.get_unchecked(start), v.get_unchecked(start - 1)) {
+ start -= 1;
+ }
+ v[start..end].reverse();
+ } else {
+ while start > 0 && !is_less(v.get_unchecked(start), v.get_unchecked(start - 1))
+ {
+ start -= 1;
+ }
+ }
+ }
+ }
+
+ // Insert some more elements into the run if it's too short. Insertion sort is faster than
+ // merge sort on short sequences, so this significantly improves performance.
+ while start > 0 && end - start < MIN_RUN {
+ start -= 1;
+ insert_head(&mut v[start..end], &mut is_less);
+ }
+
+ // Push this run onto the stack.
+ runs.push(Run { start, len: end - start });
+ end = start;
+
+ // Merge some pairs of adjacent runs to satisfy the invariants.
+ while let Some(r) = collapse(&runs) {
+ let left = runs[r + 1];
+ let right = runs[r];
+ unsafe {
+ merge(
+ &mut v[left.start..right.start + right.len],
+ left.len,
+ buf.as_mut_ptr(),
+ &mut is_less,
+ );
+ }
+ runs[r] = Run { start: left.start, len: left.len + right.len };
+ runs.remove(r + 1);
+ }
+ }
+
+ // Finally, exactly one run must remain in the stack.
+ debug_assert!(runs.len() == 1 && runs[0].start == 0 && runs[0].len == len);
+
+ // Examines the stack of runs and identifies the next pair of runs to merge. More specifically,
+ // if `Some(r)` is returned, that means `runs[r]` and `runs[r + 1]` must be merged next. If the
+ // algorithm should continue building a new run instead, `None` is returned.
+ //
+ // TimSort is infamous for its buggy implementations, as described here:
+ // http://envisage-project.eu/timsort-specification-and-verification/
+ //
+ // The gist of the story is: we must enforce the invariants on the top four runs on the stack.
+ // Enforcing them on just top three is not sufficient to ensure that the invariants will still
+ // hold for *all* runs in the stack.
+ //
+ // This function correctly checks invariants for the top four runs. Additionally, if the top
+ // run starts at index 0, it will always demand a merge operation until the stack is fully
+ // collapsed, in order to complete the sort.
+ #[inline]
+ fn collapse(runs: &[Run]) -> Option<usize> {
+ let n = runs.len();
+ if n >= 2
+ && (runs[n - 1].start == 0
+ || runs[n - 2].len <= runs[n - 1].len
+ || (n >= 3 && runs[n - 3].len <= runs[n - 2].len + runs[n - 1].len)
+ || (n >= 4 && runs[n - 4].len <= runs[n - 3].len + runs[n - 2].len))
+ {
+ if n >= 3 && runs[n - 3].len < runs[n - 1].len { Some(n - 3) } else { Some(n - 2) }
+ } else {
+ None
+ }
+ }
+
+ #[derive(Clone, Copy)]
+ struct Run {
+ start: usize,
+ len: usize,
+ }
+}
diff --git a/rust/alloc/str.rs b/rust/alloc/str.rs
new file mode 100644
index 000000000000..ed31405fa1c4
--- /dev/null
+++ b/rust/alloc/str.rs
@@ -0,0 +1,614 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+//! Unicode string slices.
+//!
+//! *[See also the `str` primitive type](str).*
+//!
+//! The `&str` type is one of the two main string types, the other being `String`.
+//! Unlike its `String` counterpart, its contents are borrowed.
+//!
+//! # Basic Usage
+//!
+//! A basic string declaration of `&str` type:
+//!
+//! ```
+//! let hello_world = "Hello, World!";
+//! ```
+//!
+//! Here we have declared a string literal, also known as a string slice.
+//! String literals have a static lifetime, which means the string `hello_world`
+//! is guaranteed to be valid for the duration of the entire program.
+//! We can explicitly specify `hello_world`'s lifetime as well:
+//!
+//! ```
+//! let hello_world: &'static str = "Hello, world!";
+//! ```
+
+#![stable(feature = "rust1", since = "1.0.0")]
+// Many of the usings in this module are only used in the test configuration.
+// It's cleaner to just turn off the unused_imports warning than to fix them.
+#![allow(unused_imports)]
+
+use core::borrow::{Borrow, BorrowMut};
+use core::iter::FusedIterator;
+use core::mem;
+use core::ptr;
+use core::str::pattern::{DoubleEndedSearcher, Pattern, ReverseSearcher, Searcher};
+use core::unicode::conversions;
+
+use crate::borrow::ToOwned;
+use crate::boxed::Box;
+use crate::collections::TryReserveError;
+use crate::slice::{Concat, Join, SliceIndex};
+use crate::string::String;
+use crate::vec::Vec;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::str::pattern;
+#[stable(feature = "encode_utf16", since = "1.8.0")]
+pub use core::str::EncodeUtf16;
+#[stable(feature = "split_ascii_whitespace", since = "1.34.0")]
+pub use core::str::SplitAsciiWhitespace;
+#[stable(feature = "split_inclusive", since = "1.53.0")]
+pub use core::str::SplitInclusive;
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::str::SplitWhitespace;
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::str::{from_utf8, from_utf8_mut, Bytes, CharIndices, Chars};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::str::{from_utf8_unchecked, from_utf8_unchecked_mut, ParseBoolError};
+#[stable(feature = "str_escape", since = "1.34.0")]
+pub use core::str::{EscapeDebug, EscapeDefault, EscapeUnicode};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::str::{FromStr, Utf8Error};
+#[allow(deprecated)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::str::{Lines, LinesAny};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::str::{MatchIndices, RMatchIndices};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::str::{Matches, RMatches};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::str::{RSplit, Split};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::str::{RSplitN, SplitN};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::str::{RSplitTerminator, SplitTerminator};
+
+/// Note: `str` in `Concat<str>` is not meaningful here.
+/// This type parameter of the trait only exists to enable another impl.
+#[cfg(not(no_global_oom_handling))]
+#[unstable(feature = "slice_concat_ext", issue = "27747")]
+impl<S: Borrow<str>> Concat<str> for [S] {
+ type Output = String;
+
+ fn concat(slice: &Self) -> String {
+ Join::join(slice, "")
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[unstable(feature = "slice_concat_ext", issue = "27747")]
+impl<S: Borrow<str>> Join<&str> for [S] {
+ type Output = String;
+
+ fn join(slice: &Self, sep: &str) -> String {
+ unsafe { String::from_utf8_unchecked(join_generic_copy(slice, sep.as_bytes())) }
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+macro_rules! specialize_for_lengths {
+ ($separator:expr, $target:expr, $iter:expr; $($num:expr),*) => {{
+ let mut target = $target;
+ let iter = $iter;
+ let sep_bytes = $separator;
+ match $separator.len() {
+ $(
+ // loops with hardcoded sizes run much faster
+ // specialize the cases with small separator lengths
+ $num => {
+ for s in iter {
+ copy_slice_and_advance!(target, sep_bytes);
+ let content_bytes = s.borrow().as_ref();
+ copy_slice_and_advance!(target, content_bytes);
+ }
+ },
+ )*
+ _ => {
+ // arbitrary non-zero size fallback
+ for s in iter {
+ copy_slice_and_advance!(target, sep_bytes);
+ let content_bytes = s.borrow().as_ref();
+ copy_slice_and_advance!(target, content_bytes);
+ }
+ }
+ }
+ target
+ }}
+}
+
+#[cfg(not(no_global_oom_handling))]
+macro_rules! copy_slice_and_advance {
+ ($target:expr, $bytes:expr) => {
+ let len = $bytes.len();
+ let (head, tail) = { $target }.split_at_mut(len);
+ head.copy_from_slice($bytes);
+ $target = tail;
+ };
+}
+
+// Optimized join implementation that works for both Vec<T> (T: Copy) and String's inner vec
+// Currently (2018-05-13) there is a bug with type inference and specialization (see issue #36262)
+// For this reason SliceConcat<T> is not specialized for T: Copy and SliceConcat<str> is the
+// only user of this function. It is left in place for the time when that is fixed.
+//
+// the bounds for String-join are S: Borrow<str> and for Vec-join Borrow<[T]>
+// [T] and str both impl AsRef<[T]> for some T
+// => s.borrow().as_ref() and we always have slices
+#[cfg(not(no_global_oom_handling))]
+fn join_generic_copy<B, T, S>(slice: &[S], sep: &[T]) -> Vec<T>
+where
+ T: Copy,
+ B: AsRef<[T]> + ?Sized,
+ S: Borrow<B>,
+{
+ let sep_len = sep.len();
+ let mut iter = slice.iter();
+
+ // the first slice is the only one without a separator preceding it
+ let first = match iter.next() {
+ Some(first) => first,
+ None => return vec![],
+ };
+
+ // compute the exact total length of the joined Vec
+ // if the `len` calculation overflows, we'll panic
+ // we would have run out of memory anyway and the rest of the function requires
+ // the entire Vec pre-allocated for safety
+ let reserved_len = sep_len
+ .checked_mul(iter.len())
+ .and_then(|n| {
+ slice.iter().map(|s| s.borrow().as_ref().len()).try_fold(n, usize::checked_add)
+ })
+ .expect("attempt to join into collection with len > usize::MAX");
+
+ // prepare an uninitialized buffer
+ let mut result = Vec::with_capacity(reserved_len);
+ debug_assert!(result.capacity() >= reserved_len);
+
+ result.extend_from_slice(first.borrow().as_ref());
+
+ unsafe {
+ let pos = result.len();
+ let target = result.get_unchecked_mut(pos..reserved_len);
+
+ // copy separator and slices over without bounds checks
+ // generate loops with hardcoded offsets for small separators
+ // massive improvements possible (~ x2)
+ let remain = specialize_for_lengths!(sep, target, iter; 0, 1, 2, 3, 4);
+
+ // A weird borrow implementation may return different
+ // slices for the length calculation and the actual copy.
+ // Make sure we don't expose uninitialized bytes to the caller.
+ let result_len = reserved_len - remain.len();
+ result.set_len(result_len);
+ }
+ result
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Borrow<str> for String {
+ #[inline]
+ fn borrow(&self) -> &str {
+ &self[..]
+ }
+}
+
+#[stable(feature = "string_borrow_mut", since = "1.36.0")]
+impl BorrowMut<str> for String {
+ #[inline]
+ fn borrow_mut(&mut self) -> &mut str {
+ &mut self[..]
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl ToOwned for str {
+ type Owned = String;
+ #[inline]
+ fn to_owned(&self) -> String {
+ unsafe { String::from_utf8_unchecked(self.as_bytes().to_owned()) }
+ }
+
+ fn clone_into(&self, target: &mut String) {
+ let mut b = mem::take(target).into_bytes();
+ self.as_bytes().clone_into(&mut b);
+ *target = unsafe { String::from_utf8_unchecked(b) }
+ }
+}
+
+/// Methods for string slices.
+#[lang = "str_alloc"]
+#[cfg(not(test))]
+impl str {
+ /// Converts a `Box<str>` into a `Box<[u8]>` without copying or allocating.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s = "this is a string";
+ /// let boxed_str = s.to_owned().into_boxed_str();
+ /// let boxed_bytes = boxed_str.into_boxed_bytes();
+ /// assert_eq!(*boxed_bytes, *s.as_bytes());
+ /// ```
+ #[stable(feature = "str_box_extras", since = "1.20.0")]
+ #[inline]
+ pub fn into_boxed_bytes(self: Box<str>) -> Box<[u8]> {
+ self.into()
+ }
+
+ /// Replaces all matches of a pattern with another string.
+ ///
+ /// `replace` creates a new [`String`], and copies the data from this string slice into it.
+ /// While doing so, it attempts to find matches of a pattern. If it finds any, it
+ /// replaces them with the replacement string slice.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s = "this is old";
+ ///
+ /// assert_eq!("this is new", s.replace("old", "new"));
+ /// ```
+ ///
+ /// When the pattern doesn't match:
+ ///
+ /// ```
+ /// let s = "this is old";
+ /// assert_eq!(s, s.replace("cookie monster", "little lamb"));
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[must_use = "this returns the replaced string as a new allocation, \
+ without modifying the original"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn replace<'a, P: Pattern<'a>>(&'a self, from: P, to: &str) -> String {
+ let mut result = String::new();
+ let mut last_end = 0;
+ for (start, part) in self.match_indices(from) {
+ result.push_str(unsafe { self.get_unchecked(last_end..start) });
+ result.push_str(to);
+ last_end = start + part.len();
+ }
+ result.push_str(unsafe { self.get_unchecked(last_end..self.len()) });
+ result
+ }
+
+ /// Replaces first N matches of a pattern with another string.
+ ///
+ /// `replacen` creates a new [`String`], and copies the data from this string slice into it.
+ /// While doing so, it attempts to find matches of a pattern. If it finds any, it
+ /// replaces them with the replacement string slice at most `count` times.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s = "foo foo 123 foo";
+ /// assert_eq!("new new 123 foo", s.replacen("foo", "new", 2));
+ /// assert_eq!("faa fao 123 foo", s.replacen('o', "a", 3));
+ /// assert_eq!("foo foo new23 foo", s.replacen(char::is_numeric, "new", 1));
+ /// ```
+ ///
+ /// When the pattern doesn't match:
+ ///
+ /// ```
+ /// let s = "this is old";
+ /// assert_eq!(s, s.replacen("cookie monster", "little lamb", 10));
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[must_use = "this returns the replaced string as a new allocation, \
+ without modifying the original"]
+ #[stable(feature = "str_replacen", since = "1.16.0")]
+ pub fn replacen<'a, P: Pattern<'a>>(&'a self, pat: P, to: &str, count: usize) -> String {
+ // Hope to reduce the times of re-allocation
+ let mut result = String::with_capacity(32);
+ let mut last_end = 0;
+ for (start, part) in self.match_indices(pat).take(count) {
+ result.push_str(unsafe { self.get_unchecked(last_end..start) });
+ result.push_str(to);
+ last_end = start + part.len();
+ }
+ result.push_str(unsafe { self.get_unchecked(last_end..self.len()) });
+ result
+ }
+
+ /// Returns the lowercase equivalent of this string slice, as a new [`String`].
+ ///
+ /// 'Lowercase' is defined according to the terms of the Unicode Derived Core Property
+ /// `Lowercase`.
+ ///
+ /// Since some characters can expand into multiple characters when changing
+ /// the case, this function returns a [`String`] instead of modifying the
+ /// parameter in-place.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s = "HELLO";
+ ///
+ /// assert_eq!("hello", s.to_lowercase());
+ /// ```
+ ///
+ /// A tricky example, with sigma:
+ ///
+ /// ```
+ /// let sigma = "Σ";
+ ///
+ /// assert_eq!("σ", sigma.to_lowercase());
+ ///
+ /// // but at the end of a word, it's ς, not σ:
+ /// let odysseus = "ὈΔΥΣΣΕΎΣ";
+ ///
+ /// assert_eq!("ὀδυσσεύς", odysseus.to_lowercase());
+ /// ```
+ ///
+ /// Languages without case are not changed:
+ ///
+ /// ```
+ /// let new_year = "农历新年";
+ ///
+ /// assert_eq!(new_year, new_year.to_lowercase());
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "unicode_case_mapping", since = "1.2.0")]
+ pub fn to_lowercase(&self) -> String {
+ let mut s = String::with_capacity(self.len());
+ for (i, c) in self[..].char_indices() {
+ if c == 'Σ' {
+ // Σ maps to σ, except at the end of a word where it maps to ς.
+ // This is the only conditional (contextual) but language-independent mapping
+ // in `SpecialCasing.txt`,
+ // so hard-code it rather than have a generic "condition" mechanism.
+ // See https://github.com/rust-lang/rust/issues/26035
+ map_uppercase_sigma(self, i, &mut s)
+ } else {
+ match conversions::to_lower(c) {
+ [a, '\0', _] => s.push(a),
+ [a, b, '\0'] => {
+ s.push(a);
+ s.push(b);
+ }
+ [a, b, c] => {
+ s.push(a);
+ s.push(b);
+ s.push(c);
+ }
+ }
+ }
+ }
+ return s;
+
+ fn map_uppercase_sigma(from: &str, i: usize, to: &mut String) {
+ // See http://www.unicode.org/versions/Unicode7.0.0/ch03.pdf#G33992
+ // for the definition of `Final_Sigma`.
+ debug_assert!('Σ'.len_utf8() == 2);
+ let is_word_final = case_ignoreable_then_cased(from[..i].chars().rev())
+ && !case_ignoreable_then_cased(from[i + 2..].chars());
+ to.push_str(if is_word_final { "ς" } else { "σ" });
+ }
+
+ fn case_ignoreable_then_cased<I: Iterator<Item = char>>(iter: I) -> bool {
+ use core::unicode::{Case_Ignorable, Cased};
+ match iter.skip_while(|&c| Case_Ignorable(c)).next() {
+ Some(c) => Cased(c),
+ None => false,
+ }
+ }
+ }
+
+ /// Returns the uppercase equivalent of this string slice, as a new [`String`].
+ ///
+ /// 'Uppercase' is defined according to the terms of the Unicode Derived Core Property
+ /// `Uppercase`.
+ ///
+ /// Since some characters can expand into multiple characters when changing
+ /// the case, this function returns a [`String`] instead of modifying the
+ /// parameter in-place.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s = "hello";
+ ///
+ /// assert_eq!("HELLO", s.to_uppercase());
+ /// ```
+ ///
+ /// Scripts without case are not changed:
+ ///
+ /// ```
+ /// let new_year = "农历新年";
+ ///
+ /// assert_eq!(new_year, new_year.to_uppercase());
+ /// ```
+ ///
+ /// One character can become multiple:
+ /// ```
+ /// let s = "tschüß";
+ ///
+ /// assert_eq!("TSCHÜSS", s.to_uppercase());
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "unicode_case_mapping", since = "1.2.0")]
+ pub fn to_uppercase(&self) -> String {
+ let mut s = String::with_capacity(self.len());
+ for c in self[..].chars() {
+ match conversions::to_upper(c) {
+ [a, '\0', _] => s.push(a),
+ [a, b, '\0'] => {
+ s.push(a);
+ s.push(b);
+ }
+ [a, b, c] => {
+ s.push(a);
+ s.push(b);
+ s.push(c);
+ }
+ }
+ }
+ s
+ }
+
+ /// Converts a [`Box<str>`] into a [`String`] without copying or allocating.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let string = String::from("birthday gift");
+ /// let boxed_str = string.clone().into_boxed_str();
+ ///
+ /// assert_eq!(boxed_str.into_string(), string);
+ /// ```
+ #[stable(feature = "box_str", since = "1.4.0")]
+ #[inline]
+ pub fn into_string(self: Box<str>) -> String {
+ let slice = Box::<[u8]>::from(self);
+ unsafe { String::from_utf8_unchecked(slice.into_vec()) }
+ }
+
+ /// Creates a new [`String`] by repeating a string `n` times.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if the capacity would overflow.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert_eq!("abc".repeat(4), String::from("abcabcabcabc"));
+ /// ```
+ ///
+ /// A panic upon overflow:
+ ///
+ /// ```should_panic
+ /// // this will panic at runtime
+ /// "0123456789abcdef".repeat(usize::MAX);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "repeat_str", since = "1.16.0")]
+ pub fn repeat(&self, n: usize) -> String {
+ unsafe { String::from_utf8_unchecked(self.as_bytes().repeat(n)) }
+ }
+
+ /// Returns a copy of this string where each character is mapped to its
+ /// ASCII upper case equivalent.
+ ///
+ /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
+ /// but non-ASCII letters are unchanged.
+ ///
+ /// To uppercase the value in-place, use [`make_ascii_uppercase`].
+ ///
+ /// To uppercase ASCII characters in addition to non-ASCII characters, use
+ /// [`to_uppercase`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let s = "Grüße, Jürgen ❤";
+ ///
+ /// assert_eq!("GRüßE, JüRGEN ❤", s.to_ascii_uppercase());
+ /// ```
+ ///
+ /// [`make_ascii_uppercase`]: str::make_ascii_uppercase
+ /// [`to_uppercase`]: #method.to_uppercase
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
+ #[inline]
+ pub fn to_ascii_uppercase(&self) -> String {
+ let mut bytes = self.as_bytes().to_vec();
+ bytes.make_ascii_uppercase();
+ // make_ascii_uppercase() preserves the UTF-8 invariant.
+ unsafe { String::from_utf8_unchecked(bytes) }
+ }
+
+ /// Returns a copy of this string where each character is mapped to its
+ /// ASCII lower case equivalent.
+ ///
+ /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
+ /// but non-ASCII letters are unchanged.
+ ///
+ /// To lowercase the value in-place, use [`make_ascii_lowercase`].
+ ///
+ /// To lowercase ASCII characters in addition to non-ASCII characters, use
+ /// [`to_lowercase`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let s = "Grüße, Jürgen ❤";
+ ///
+ /// assert_eq!("grüße, jürgen ❤", s.to_ascii_lowercase());
+ /// ```
+ ///
+ /// [`make_ascii_lowercase`]: str::make_ascii_lowercase
+ /// [`to_lowercase`]: #method.to_lowercase
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
+ #[inline]
+ pub fn to_ascii_lowercase(&self) -> String {
+ let mut bytes = self.as_bytes().to_vec();
+ bytes.make_ascii_lowercase();
+ // make_ascii_lowercase() preserves the UTF-8 invariant.
+ unsafe { String::from_utf8_unchecked(bytes) }
+ }
+
+ /// Tries to create a `String`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s: &str = "a";
+ /// let ss: String = s.try_to_owned().unwrap();
+ /// ```
+ #[inline]
+ #[stable(feature = "kernel", since = "1.0.0")]
+ pub fn try_to_owned(&self) -> Result<String, TryReserveError> {
+ unsafe { Ok(String::from_utf8_unchecked(self.as_bytes().try_to_vec()?)) }
+ }
+}
+
+/// Converts a boxed slice of bytes to a boxed string slice without checking
+/// that the string contains valid UTF-8.
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// let smile_utf8 = Box::new([226, 152, 186]);
+/// let smile = unsafe { std::str::from_boxed_utf8_unchecked(smile_utf8) };
+///
+/// assert_eq!("☺", &*smile);
+/// ```
+#[stable(feature = "str_box_extras", since = "1.20.0")]
+#[inline]
+pub unsafe fn from_boxed_utf8_unchecked(v: Box<[u8]>) -> Box<str> {
+ unsafe { Box::from_raw(Box::into_raw(v) as *mut str) }
+}
diff --git a/rust/alloc/string.rs b/rust/alloc/string.rs
new file mode 100644
index 000000000000..55293c3041e7
--- /dev/null
+++ b/rust/alloc/string.rs
@@ -0,0 +1,2847 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+//! A UTF-8–encoded, growable string.
+//!
+//! This module contains the [`String`] type, the [`ToString`] trait for
+//! converting to strings, and several error types that may result from
+//! working with [`String`]s.
+//!
+//! # Examples
+//!
+//! There are multiple ways to create a new [`String`] from a string literal:
+//!
+//! ```
+//! let s = "Hello".to_string();
+//!
+//! let s = String::from("world");
+//! let s: String = "also this".into();
+//! ```
+//!
+//! You can create a new [`String`] from an existing one by concatenating with
+//! `+`:
+//!
+//! ```
+//! let s = "Hello".to_string();
+//!
+//! let message = s + " world!";
+//! ```
+//!
+//! If you have a vector of valid UTF-8 bytes, you can make a [`String`] out of
+//! it. You can do the reverse too.
+//!
+//! ```
+//! let sparkle_heart = vec![240, 159, 146, 150];
+//!
+//! // We know these bytes are valid, so we'll use `unwrap()`.
+//! let sparkle_heart = String::from_utf8(sparkle_heart).unwrap();
+//!
+//! assert_eq!("💖", sparkle_heart);
+//!
+//! let bytes = sparkle_heart.into_bytes();
+//!
+//! assert_eq!(bytes, [240, 159, 146, 150]);
+//! ```
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+#[cfg(not(no_global_oom_handling))]
+use core::char::{decode_utf16, REPLACEMENT_CHARACTER};
+use core::fmt;
+use core::hash;
+#[cfg(not(no_global_oom_handling))]
+use core::iter::{from_fn, FromIterator};
+use core::iter::FusedIterator;
+#[cfg(not(no_global_oom_handling))]
+use core::ops::Add;
+#[cfg(not(no_global_oom_handling))]
+use core::ops::AddAssign;
+#[cfg(not(no_global_oom_handling))]
+use core::ops::Bound::{Excluded, Included, Unbounded};
+use core::ops::{self, Index, IndexMut, Range, RangeBounds};
+use core::ptr;
+use core::slice;
+#[cfg(not(no_global_oom_handling))]
+use core::str::lossy;
+use core::str::pattern::Pattern;
+
+#[cfg(not(no_global_oom_handling))]
+use crate::borrow::{Cow, ToOwned};
+use crate::boxed::Box;
+use crate::collections::TryReserveError;
+use crate::str::{self, Chars, Utf8Error};
+#[cfg(not(no_global_oom_handling))]
+use crate::str::{from_boxed_utf8_unchecked, FromStr};
+use crate::vec::Vec;
+
+/// A UTF-8–encoded, growable string.
+///
+/// The `String` type is the most common string type that has ownership over the
+/// contents of the string. It has a close relationship with its borrowed
+/// counterpart, the primitive [`str`].
+///
+/// # Examples
+///
+/// You can create a `String` from [a literal string][`str`] with [`String::from`]:
+///
+/// [`String::from`]: From::from
+///
+/// ```
+/// let hello = String::from("Hello, world!");
+/// ```
+///
+/// You can append a [`char`] to a `String` with the [`push`] method, and
+/// append a [`&str`] with the [`push_str`] method:
+///
+/// ```
+/// let mut hello = String::from("Hello, ");
+///
+/// hello.push('w');
+/// hello.push_str("orld!");
+/// ```
+///
+/// [`push`]: String::push
+/// [`push_str`]: String::push_str
+///
+/// If you have a vector of UTF-8 bytes, you can create a `String` from it with
+/// the [`from_utf8`] method:
+///
+/// ```
+/// // some bytes, in a vector
+/// let sparkle_heart = vec![240, 159, 146, 150];
+///
+/// // We know these bytes are valid, so we'll use `unwrap()`.
+/// let sparkle_heart = String::from_utf8(sparkle_heart).unwrap();
+///
+/// assert_eq!("💖", sparkle_heart);
+/// ```
+///
+/// [`from_utf8`]: String::from_utf8
+///
+/// # UTF-8
+///
+/// `String`s are always valid UTF-8. This has a few implications, the first of
+/// which is that if you need a non-UTF-8 string, consider [`OsString`]. It is
+/// similar, but without the UTF-8 constraint. The second implication is that
+/// you cannot index into a `String`:
+///
+/// ```compile_fail,E0277
+/// let s = "hello";
+///
+/// println!("The first letter of s is {}", s[0]); // ERROR!!!
+/// ```
+///
+/// [`OsString`]: ../../std/ffi/struct.OsString.html
+///
+/// Indexing is intended to be a constant-time operation, but UTF-8 encoding
+/// does not allow us to do this. Furthermore, it's not clear what sort of
+/// thing the index should return: a byte, a codepoint, or a grapheme cluster.
+/// The [`bytes`] and [`chars`] methods return iterators over the first
+/// two, respectively.
+///
+/// [`bytes`]: str::bytes
+/// [`chars`]: str::chars
+///
+/// # Deref
+///
+/// `String`s implement [`Deref`]`<Target=str>`, and so inherit all of [`str`]'s
+/// methods. In addition, this means that you can pass a `String` to a
+/// function which takes a [`&str`] by using an ampersand (`&`):
+///
+/// ```
+/// fn takes_str(s: &str) { }
+///
+/// let s = String::from("Hello");
+///
+/// takes_str(&s);
+/// ```
+///
+/// This will create a [`&str`] from the `String` and pass it in. This
+/// conversion is very inexpensive, and so generally, functions will accept
+/// [`&str`]s as arguments unless they need a `String` for some specific
+/// reason.
+///
+/// In certain cases Rust doesn't have enough information to make this
+/// conversion, known as [`Deref`] coercion. In the following example a string
+/// slice [`&'a str`][`&str`] implements the trait `TraitExample`, and the function
+/// `example_func` takes anything that implements the trait. In this case Rust
+/// would need to make two implicit conversions, which Rust doesn't have the
+/// means to do. For that reason, the following example will not compile.
+///
+/// ```compile_fail,E0277
+/// trait TraitExample {}
+///
+/// impl<'a> TraitExample for &'a str {}
+///
+/// fn example_func<A: TraitExample>(example_arg: A) {}
+///
+/// let example_string = String::from("example_string");
+/// example_func(&example_string);
+/// ```
+///
+/// There are two options that would work instead. The first would be to
+/// change the line `example_func(&example_string);` to
+/// `example_func(example_string.as_str());`, using the method [`as_str()`]
+/// to explicitly extract the string slice containing the string. The second
+/// way changes `example_func(&example_string);` to
+/// `example_func(&*example_string);`. In this case we are dereferencing a
+/// `String` to a [`str`][`&str`], then referencing the [`str`][`&str`] back to
+/// [`&str`]. The second way is more idiomatic, however both work to do the
+/// conversion explicitly rather than relying on the implicit conversion.
+///
+/// # Representation
+///
+/// A `String` is made up of three components: a pointer to some bytes, a
+/// length, and a capacity. The pointer points to an internal buffer `String`
+/// uses to store its data. The length is the number of bytes currently stored
+/// in the buffer, and the capacity is the size of the buffer in bytes. As such,
+/// the length will always be less than or equal to the capacity.
+///
+/// This buffer is always stored on the heap.
+///
+/// You can look at these with the [`as_ptr`], [`len`], and [`capacity`]
+/// methods:
+///
+/// ```
+/// use std::mem;
+///
+/// let story = String::from("Once upon a time...");
+///
+// FIXME Update this when vec_into_raw_parts is stabilized
+/// // Prevent automatically dropping the String's data
+/// let mut story = mem::ManuallyDrop::new(story);
+///
+/// let ptr = story.as_mut_ptr();
+/// let len = story.len();
+/// let capacity = story.capacity();
+///
+/// // story has nineteen bytes
+/// assert_eq!(19, len);
+///
+/// // We can re-build a String out of ptr, len, and capacity. This is all
+/// // unsafe because we are responsible for making sure the components are
+/// // valid:
+/// let s = unsafe { String::from_raw_parts(ptr, len, capacity) } ;
+///
+/// assert_eq!(String::from("Once upon a time..."), s);
+/// ```
+///
+/// [`as_ptr`]: str::as_ptr
+/// [`len`]: String::len
+/// [`capacity`]: String::capacity
+///
+/// If a `String` has enough capacity, adding elements to it will not
+/// re-allocate. For example, consider this program:
+///
+/// ```
+/// let mut s = String::new();
+///
+/// println!("{}", s.capacity());
+///
+/// for _ in 0..5 {
+/// s.push_str("hello");
+/// println!("{}", s.capacity());
+/// }
+/// ```
+///
+/// This will output the following:
+///
+/// ```text
+/// 0
+/// 5
+/// 10
+/// 20
+/// 20
+/// 40
+/// ```
+///
+/// At first, we have no memory allocated at all, but as we append to the
+/// string, it increases its capacity appropriately. If we instead use the
+/// [`with_capacity`] method to allocate the correct capacity initially:
+///
+/// ```
+/// let mut s = String::with_capacity(25);
+///
+/// println!("{}", s.capacity());
+///
+/// for _ in 0..5 {
+/// s.push_str("hello");
+/// println!("{}", s.capacity());
+/// }
+/// ```
+///
+/// [`with_capacity`]: String::with_capacity
+///
+/// We end up with a different output:
+///
+/// ```text
+/// 25
+/// 25
+/// 25
+/// 25
+/// 25
+/// 25
+/// ```
+///
+/// Here, there's no need to allocate more memory inside the loop.
+///
+/// [`str`]: prim@str
+/// [`&str`]: prim@str
+/// [`Deref`]: core::ops::Deref
+/// [`as_str()`]: String::as_str
+#[derive(PartialOrd, Eq, Ord)]
+#[cfg_attr(not(test), rustc_diagnostic_item = "string_type")]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct String {
+ vec: Vec<u8>,
+}
+
+/// A possible error value when converting a `String` from a UTF-8 byte vector.
+///
+/// This type is the error type for the [`from_utf8`] method on [`String`]. It
+/// is designed in such a way to carefully avoid reallocations: the
+/// [`into_bytes`] method will give back the byte vector that was used in the
+/// conversion attempt.
+///
+/// [`from_utf8`]: String::from_utf8
+/// [`into_bytes`]: FromUtf8Error::into_bytes
+///
+/// The [`Utf8Error`] type provided by [`std::str`] represents an error that may
+/// occur when converting a slice of [`u8`]s to a [`&str`]. In this sense, it's
+/// an analogue to `FromUtf8Error`, and you can get one from a `FromUtf8Error`
+/// through the [`utf8_error`] method.
+///
+/// [`Utf8Error`]: core::str::Utf8Error
+/// [`std::str`]: core::str
+/// [`&str`]: prim@str
+/// [`utf8_error`]: Self::utf8_error
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// // some invalid bytes, in a vector
+/// let bytes = vec![0, 159];
+///
+/// let value = String::from_utf8(bytes);
+///
+/// assert!(value.is_err());
+/// assert_eq!(vec![0, 159], value.unwrap_err().into_bytes());
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(no_global_oom_handling), derive(Clone))]
+#[derive(Debug, PartialEq, Eq)]
+pub struct FromUtf8Error {
+ bytes: Vec<u8>,
+ error: Utf8Error,
+}
+
+/// A possible error value when converting a `String` from a UTF-16 byte slice.
+///
+/// This type is the error type for the [`from_utf16`] method on [`String`].
+///
+/// [`from_utf16`]: String::from_utf16
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// // 𝄞mu<invalid>ic
+/// let v = &[0xD834, 0xDD1E, 0x006d, 0x0075,
+/// 0xD800, 0x0069, 0x0063];
+///
+/// assert!(String::from_utf16(v).is_err());
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Debug)]
+pub struct FromUtf16Error(());
+
+impl String {
+ /// Creates a new empty `String`.
+ ///
+ /// Given that the `String` is empty, this will not allocate any initial
+ /// buffer. While that means that this initial operation is very
+ /// inexpensive, it may cause excessive allocation later when you add
+ /// data. If you have an idea of how much data the `String` will hold,
+ /// consider the [`with_capacity`] method to prevent excessive
+ /// re-allocation.
+ ///
+ /// [`with_capacity`]: String::with_capacity
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s = String::new();
+ /// ```
+ #[inline]
+ #[rustc_const_stable(feature = "const_string_new", since = "1.39.0")]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const fn new() -> String {
+ String { vec: Vec::new() }
+ }
+
+ /// Creates a new empty `String` with a particular capacity.
+ ///
+ /// `String`s have an internal buffer to hold their data. The capacity is
+ /// the length of that buffer, and can be queried with the [`capacity`]
+ /// method. This method creates an empty `String`, but one with an initial
+ /// buffer that can hold `capacity` bytes. This is useful when you may be
+ /// appending a bunch of data to the `String`, reducing the number of
+ /// reallocations it needs to do.
+ ///
+ /// [`capacity`]: String::capacity
+ ///
+ /// If the given capacity is `0`, no allocation will occur, and this method
+ /// is identical to the [`new`] method.
+ ///
+ /// [`new`]: String::new
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = String::with_capacity(10);
+ ///
+ /// // The String contains no chars, even though it has capacity for more
+ /// assert_eq!(s.len(), 0);
+ ///
+ /// // These are all done without reallocating...
+ /// let cap = s.capacity();
+ /// for _ in 0..10 {
+ /// s.push('a');
+ /// }
+ ///
+ /// assert_eq!(s.capacity(), cap);
+ ///
+ /// // ...but this may make the string reallocate
+ /// s.push('a');
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[doc(alias = "alloc")]
+ #[doc(alias = "malloc")]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn with_capacity(capacity: usize) -> String {
+ String { vec: Vec::with_capacity(capacity) }
+ }
+
+ // HACK(japaric): with cfg(test) the inherent `[T]::to_vec` method, which is
+ // required for this method definition, is not available. Since we don't
+ // require this method for testing purposes, I'll just stub it
+ // NB see the slice::hack module in slice.rs for more information
+ #[inline]
+ #[cfg(test)]
+ pub fn from_str(_: &str) -> String {
+ panic!("not available with cfg(test)");
+ }
+
+ /// Converts a vector of bytes to a `String`.
+ ///
+ /// A string ([`String`]) is made of bytes ([`u8`]), and a vector of bytes
+ /// ([`Vec<u8>`]) is made of bytes, so this function converts between the
+ /// two. Not all byte slices are valid `String`s, however: `String`
+ /// requires that it is valid UTF-8. `from_utf8()` checks to ensure that
+ /// the bytes are valid UTF-8, and then does the conversion.
+ ///
+ /// If you are sure that the byte slice is valid UTF-8, and you don't want
+ /// to incur the overhead of the validity check, there is an unsafe version
+ /// of this function, [`from_utf8_unchecked`], which has the same behavior
+ /// but skips the check.
+ ///
+ /// This method will take care to not copy the vector, for efficiency's
+ /// sake.
+ ///
+ /// If you need a [`&str`] instead of a `String`, consider
+ /// [`str::from_utf8`].
+ ///
+ /// The inverse of this method is [`into_bytes`].
+ ///
+ /// # Errors
+ ///
+ /// Returns [`Err`] if the slice is not UTF-8 with a description as to why the
+ /// provided bytes are not UTF-8. The vector you moved in is also included.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// // some bytes, in a vector
+ /// let sparkle_heart = vec![240, 159, 146, 150];
+ ///
+ /// // We know these bytes are valid, so we'll use `unwrap()`.
+ /// let sparkle_heart = String::from_utf8(sparkle_heart).unwrap();
+ ///
+ /// assert_eq!("💖", sparkle_heart);
+ /// ```
+ ///
+ /// Incorrect bytes:
+ ///
+ /// ```
+ /// // some invalid bytes, in a vector
+ /// let sparkle_heart = vec![0, 159, 146, 150];
+ ///
+ /// assert!(String::from_utf8(sparkle_heart).is_err());
+ /// ```
+ ///
+ /// See the docs for [`FromUtf8Error`] for more details on what you can do
+ /// with this error.
+ ///
+ /// [`from_utf8_unchecked`]: String::from_utf8_unchecked
+ /// [`Vec<u8>`]: crate::vec::Vec
+ /// [`&str`]: prim@str
+ /// [`into_bytes`]: String::into_bytes
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn from_utf8(vec: Vec<u8>) -> Result<String, FromUtf8Error> {
+ match str::from_utf8(&vec) {
+ Ok(..) => Ok(String { vec }),
+ Err(e) => Err(FromUtf8Error { bytes: vec, error: e }),
+ }
+ }
+
+ /// Converts a slice of bytes to a string, including invalid characters.
+ ///
+ /// Strings are made of bytes ([`u8`]), and a slice of bytes
+ /// ([`&[u8]`][byteslice]) is made of bytes, so this function converts
+ /// between the two. Not all byte slices are valid strings, however: strings
+ /// are required to be valid UTF-8. During this conversion,
+ /// `from_utf8_lossy()` will replace any invalid UTF-8 sequences with
+ /// [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD], which looks like this: �
+ ///
+ /// [byteslice]: prim@slice
+ /// [U+FFFD]: core::char::REPLACEMENT_CHARACTER
+ ///
+ /// If you are sure that the byte slice is valid UTF-8, and you don't want
+ /// to incur the overhead of the conversion, there is an unsafe version
+ /// of this function, [`from_utf8_unchecked`], which has the same behavior
+ /// but skips the checks.
+ ///
+ /// [`from_utf8_unchecked`]: String::from_utf8_unchecked
+ ///
+ /// This function returns a [`Cow<'a, str>`]. If our byte slice is invalid
+ /// UTF-8, then we need to insert the replacement characters, which will
+ /// change the size of the string, and hence, require a `String`. But if
+ /// it's already valid UTF-8, we don't need a new allocation. This return
+ /// type allows us to handle both cases.
+ ///
+ /// [`Cow<'a, str>`]: crate::borrow::Cow
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// // some bytes, in a vector
+ /// let sparkle_heart = vec![240, 159, 146, 150];
+ ///
+ /// let sparkle_heart = String::from_utf8_lossy(&sparkle_heart);
+ ///
+ /// assert_eq!("💖", sparkle_heart);
+ /// ```
+ ///
+ /// Incorrect bytes:
+ ///
+ /// ```
+ /// // some invalid bytes
+ /// let input = b"Hello \xF0\x90\x80World";
+ /// let output = String::from_utf8_lossy(input);
+ ///
+ /// assert_eq!("Hello �World", output);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn from_utf8_lossy(v: &[u8]) -> Cow<'_, str> {
+ let mut iter = lossy::Utf8Lossy::from_bytes(v).chunks();
+
+ let (first_valid, first_broken) = if let Some(chunk) = iter.next() {
+ let lossy::Utf8LossyChunk { valid, broken } = chunk;
+ if valid.len() == v.len() {
+ debug_assert!(broken.is_empty());
+ return Cow::Borrowed(valid);
+ }
+ (valid, broken)
+ } else {
+ return Cow::Borrowed("");
+ };
+
+ const REPLACEMENT: &str = "\u{FFFD}";
+
+ let mut res = String::with_capacity(v.len());
+ res.push_str(first_valid);
+ if !first_broken.is_empty() {
+ res.push_str(REPLACEMENT);
+ }
+
+ for lossy::Utf8LossyChunk { valid, broken } in iter {
+ res.push_str(valid);
+ if !broken.is_empty() {
+ res.push_str(REPLACEMENT);
+ }
+ }
+
+ Cow::Owned(res)
+ }
+
+ /// Decode a UTF-16–encoded vector `v` into a `String`, returning [`Err`]
+ /// if `v` contains any invalid data.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// // 𝄞music
+ /// let v = &[0xD834, 0xDD1E, 0x006d, 0x0075,
+ /// 0x0073, 0x0069, 0x0063];
+ /// assert_eq!(String::from("𝄞music"),
+ /// String::from_utf16(v).unwrap());
+ ///
+ /// // 𝄞mu<invalid>ic
+ /// let v = &[0xD834, 0xDD1E, 0x006d, 0x0075,
+ /// 0xD800, 0x0069, 0x0063];
+ /// assert!(String::from_utf16(v).is_err());
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn from_utf16(v: &[u16]) -> Result<String, FromUtf16Error> {
+ // This isn't done via collect::<Result<_, _>>() for performance reasons.
+ // FIXME: the function can be simplified again when #48994 is closed.
+ let mut ret = String::with_capacity(v.len());
+ for c in decode_utf16(v.iter().cloned()) {
+ if let Ok(c) = c {
+ ret.push(c);
+ } else {
+ return Err(FromUtf16Error(()));
+ }
+ }
+ Ok(ret)
+ }
+
+ /// Decode a UTF-16–encoded slice `v` into a `String`, replacing
+ /// invalid data with [the replacement character (`U+FFFD`)][U+FFFD].
+ ///
+ /// Unlike [`from_utf8_lossy`] which returns a [`Cow<'a, str>`],
+ /// `from_utf16_lossy` returns a `String` since the UTF-16 to UTF-8
+ /// conversion requires a memory allocation.
+ ///
+ /// [`from_utf8_lossy`]: String::from_utf8_lossy
+ /// [`Cow<'a, str>`]: crate::borrow::Cow
+ /// [U+FFFD]: core::char::REPLACEMENT_CHARACTER
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// // 𝄞mus<invalid>ic<invalid>
+ /// let v = &[0xD834, 0xDD1E, 0x006d, 0x0075,
+ /// 0x0073, 0xDD1E, 0x0069, 0x0063,
+ /// 0xD834];
+ ///
+ /// assert_eq!(String::from("𝄞mus\u{FFFD}ic\u{FFFD}"),
+ /// String::from_utf16_lossy(v));
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn from_utf16_lossy(v: &[u16]) -> String {
+ decode_utf16(v.iter().cloned()).map(|r| r.unwrap_or(REPLACEMENT_CHARACTER)).collect()
+ }
+
+ /// Decomposes a `String` into its raw components.
+ ///
+ /// Returns the raw pointer to the underlying data, the length of
+ /// the string (in bytes), and the allocated capacity of the data
+ /// (in bytes). These are the same arguments in the same order as
+ /// the arguments to [`from_raw_parts`].
+ ///
+ /// After calling this function, the caller is responsible for the
+ /// memory previously managed by the `String`. The only way to do
+ /// this is to convert the raw pointer, length, and capacity back
+ /// into a `String` with the [`from_raw_parts`] function, allowing
+ /// the destructor to perform the cleanup.
+ ///
+ /// [`from_raw_parts`]: String::from_raw_parts
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(vec_into_raw_parts)]
+ /// let s = String::from("hello");
+ ///
+ /// let (ptr, len, cap) = s.into_raw_parts();
+ ///
+ /// let rebuilt = unsafe { String::from_raw_parts(ptr, len, cap) };
+ /// assert_eq!(rebuilt, "hello");
+ /// ```
+ #[unstable(feature = "vec_into_raw_parts", reason = "new API", issue = "65816")]
+ pub fn into_raw_parts(self) -> (*mut u8, usize, usize) {
+ self.vec.into_raw_parts()
+ }
+
+ /// Creates a new `String` from a length, capacity, and pointer.
+ ///
+ /// # Safety
+ ///
+ /// This is highly unsafe, due to the number of invariants that aren't
+ /// checked:
+ ///
+ /// * The memory at `buf` needs to have been previously allocated by the
+ /// same allocator the standard library uses, with a required alignment of exactly 1.
+ /// * `length` needs to be less than or equal to `capacity`.
+ /// * `capacity` needs to be the correct value.
+ /// * The first `length` bytes at `buf` need to be valid UTF-8.
+ ///
+ /// Violating these may cause problems like corrupting the allocator's
+ /// internal data structures.
+ ///
+ /// The ownership of `buf` is effectively transferred to the
+ /// `String` which may then deallocate, reallocate or change the
+ /// contents of memory pointed to by the pointer at will. Ensure
+ /// that nothing else uses the pointer after calling this
+ /// function.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::mem;
+ ///
+ /// unsafe {
+ /// let s = String::from("hello");
+ ///
+ // FIXME Update this when vec_into_raw_parts is stabilized
+ /// // Prevent automatically dropping the String's data
+ /// let mut s = mem::ManuallyDrop::new(s);
+ ///
+ /// let ptr = s.as_mut_ptr();
+ /// let len = s.len();
+ /// let capacity = s.capacity();
+ ///
+ /// let s = String::from_raw_parts(ptr, len, capacity);
+ ///
+ /// assert_eq!(String::from("hello"), s);
+ /// }
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub unsafe fn from_raw_parts(buf: *mut u8, length: usize, capacity: usize) -> String {
+ unsafe { String { vec: Vec::from_raw_parts(buf, length, capacity) } }
+ }
+
+ /// Converts a vector of bytes to a `String` without checking that the
+ /// string contains valid UTF-8.
+ ///
+ /// See the safe version, [`from_utf8`], for more details.
+ ///
+ /// [`from_utf8`]: String::from_utf8
+ ///
+ /// # Safety
+ ///
+ /// This function is unsafe because it does not check that the bytes passed
+ /// to it are valid UTF-8. If this constraint is violated, it may cause
+ /// memory unsafety issues with future users of the `String`, as the rest of
+ /// the standard library assumes that `String`s are valid UTF-8.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// // some bytes, in a vector
+ /// let sparkle_heart = vec![240, 159, 146, 150];
+ ///
+ /// let sparkle_heart = unsafe {
+ /// String::from_utf8_unchecked(sparkle_heart)
+ /// };
+ ///
+ /// assert_eq!("💖", sparkle_heart);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub unsafe fn from_utf8_unchecked(bytes: Vec<u8>) -> String {
+ String { vec: bytes }
+ }
+
+ /// Converts a `String` into a byte vector.
+ ///
+ /// This consumes the `String`, so we do not need to copy its contents.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s = String::from("hello");
+ /// let bytes = s.into_bytes();
+ ///
+ /// assert_eq!(&[104, 101, 108, 108, 111][..], &bytes[..]);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn into_bytes(self) -> Vec<u8> {
+ self.vec
+ }
+
+ /// Extracts a string slice containing the entire `String`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s = String::from("foo");
+ ///
+ /// assert_eq!("foo", s.as_str());
+ /// ```
+ #[inline]
+ #[stable(feature = "string_as_str", since = "1.7.0")]
+ pub fn as_str(&self) -> &str {
+ self
+ }
+
+ /// Converts a `String` into a mutable string slice.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = String::from("foobar");
+ /// let s_mut_str = s.as_mut_str();
+ ///
+ /// s_mut_str.make_ascii_uppercase();
+ ///
+ /// assert_eq!("FOOBAR", s_mut_str);
+ /// ```
+ #[inline]
+ #[stable(feature = "string_as_str", since = "1.7.0")]
+ pub fn as_mut_str(&mut self) -> &mut str {
+ self
+ }
+
+ /// Appends a given string slice onto the end of this `String`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = String::from("foo");
+ ///
+ /// s.push_str("bar");
+ ///
+ /// assert_eq!("foobar", s);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn push_str(&mut self, string: &str) {
+ self.vec.extend_from_slice(string.as_bytes())
+ }
+
+ /// Copies elements from `src` range to the end of the string.
+ ///
+ /// ## Panics
+ ///
+ /// Panics if the starting point or end point do not lie on a [`char`]
+ /// boundary, or if they're out of bounds.
+ ///
+ /// ## Examples
+ ///
+ /// ```
+ /// #![feature(string_extend_from_within)]
+ /// let mut string = String::from("abcde");
+ ///
+ /// string.extend_from_within(2..);
+ /// assert_eq!(string, "abcdecde");
+ ///
+ /// string.extend_from_within(..2);
+ /// assert_eq!(string, "abcdecdeab");
+ ///
+ /// string.extend_from_within(4..8);
+ /// assert_eq!(string, "abcdecdeabecde");
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "string_extend_from_within", issue = "none")]
+ pub fn extend_from_within<R>(&mut self, src: R)
+ where
+ R: RangeBounds<usize>,
+ {
+ let src @ Range { start, end } = slice::range(src, ..self.len());
+
+ assert!(self.is_char_boundary(start));
+ assert!(self.is_char_boundary(end));
+
+ self.vec.extend_from_within(src);
+ }
+
+ /// Returns this `String`'s capacity, in bytes.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s = String::with_capacity(10);
+ ///
+ /// assert!(s.capacity() >= 10);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn capacity(&self) -> usize {
+ self.vec.capacity()
+ }
+
+ /// Ensures that this `String`'s capacity is at least `additional` bytes
+ /// larger than its length.
+ ///
+ /// The capacity may be increased by more than `additional` bytes if it
+ /// chooses, to prevent frequent reallocations.
+ ///
+ /// If you do not want this "at least" behavior, see the [`reserve_exact`]
+ /// method.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new capacity overflows [`usize`].
+ ///
+ /// [`reserve_exact`]: String::reserve_exact
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = String::new();
+ ///
+ /// s.reserve(10);
+ ///
+ /// assert!(s.capacity() >= 10);
+ /// ```
+ ///
+ /// This may not actually increase the capacity:
+ ///
+ /// ```
+ /// let mut s = String::with_capacity(10);
+ /// s.push('a');
+ /// s.push('b');
+ ///
+ /// // s now has a length of 2 and a capacity of 10
+ /// assert_eq!(2, s.len());
+ /// assert_eq!(10, s.capacity());
+ ///
+ /// // Since we already have an extra 8 capacity, calling this...
+ /// s.reserve(8);
+ ///
+ /// // ... doesn't actually increase.
+ /// assert_eq!(10, s.capacity());
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn reserve(&mut self, additional: usize) {
+ self.vec.reserve(additional)
+ }
+
+ /// Ensures that this `String`'s capacity is `additional` bytes
+ /// larger than its length.
+ ///
+ /// Consider using the [`reserve`] method unless you absolutely know
+ /// better than the allocator.
+ ///
+ /// [`reserve`]: String::reserve
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new capacity overflows `usize`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = String::new();
+ ///
+ /// s.reserve_exact(10);
+ ///
+ /// assert!(s.capacity() >= 10);
+ /// ```
+ ///
+ /// This may not actually increase the capacity:
+ ///
+ /// ```
+ /// let mut s = String::with_capacity(10);
+ /// s.push('a');
+ /// s.push('b');
+ ///
+ /// // s now has a length of 2 and a capacity of 10
+ /// assert_eq!(2, s.len());
+ /// assert_eq!(10, s.capacity());
+ ///
+ /// // Since we already have an extra 8 capacity, calling this...
+ /// s.reserve_exact(8);
+ ///
+ /// // ... doesn't actually increase.
+ /// assert_eq!(10, s.capacity());
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn reserve_exact(&mut self, additional: usize) {
+ self.vec.reserve_exact(additional)
+ }
+
+ /// Tries to reserve capacity for at least `additional` more elements to be inserted
+ /// in the given `String`. The collection may reserve more space to avoid
+ /// frequent reallocations. After calling `reserve`, capacity will be
+ /// greater than or equal to `self.len() + additional`. Does nothing if
+ /// capacity is already sufficient.
+ ///
+ /// # Errors
+ ///
+ /// If the capacity overflows, or the allocator reports a failure, then an error
+ /// is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(try_reserve)]
+ /// use std::collections::TryReserveError;
+ ///
+ /// fn process_data(data: &str) -> Result<String, TryReserveError> {
+ /// let mut output = String::new();
+ ///
+ /// // Pre-reserve the memory, exiting if we can't
+ /// output.try_reserve(data.len())?;
+ ///
+ /// // Now we know this can't OOM in the middle of our complex work
+ /// output.push_str(data);
+ ///
+ /// Ok(output)
+ /// }
+ /// # process_data("rust").expect("why is the test harness OOMing on 4 bytes?");
+ /// ```
+ #[unstable(feature = "try_reserve", reason = "new API", issue = "48043")]
+ pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
+ self.vec.try_reserve(additional)
+ }
+
+ /// Tries to reserve the minimum capacity for exactly `additional` more elements to
+ /// be inserted in the given `String`. After calling `reserve_exact`,
+ /// capacity will be greater than or equal to `self.len() + additional`.
+ /// Does nothing if the capacity is already sufficient.
+ ///
+ /// Note that the allocator may give the collection more space than it
+ /// requests. Therefore, capacity can not be relied upon to be precisely
+ /// minimal. Prefer `reserve` if future insertions are expected.
+ ///
+ /// # Errors
+ ///
+ /// If the capacity overflows, or the allocator reports a failure, then an error
+ /// is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(try_reserve)]
+ /// use std::collections::TryReserveError;
+ ///
+ /// fn process_data(data: &str) -> Result<String, TryReserveError> {
+ /// let mut output = String::new();
+ ///
+ /// // Pre-reserve the memory, exiting if we can't
+ /// output.try_reserve(data.len())?;
+ ///
+ /// // Now we know this can't OOM in the middle of our complex work
+ /// output.push_str(data);
+ ///
+ /// Ok(output)
+ /// }
+ /// # process_data("rust").expect("why is the test harness OOMing on 4 bytes?");
+ /// ```
+ #[unstable(feature = "try_reserve", reason = "new API", issue = "48043")]
+ pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
+ self.vec.try_reserve_exact(additional)
+ }
+
+ /// Shrinks the capacity of this `String` to match its length.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = String::from("foo");
+ ///
+ /// s.reserve(100);
+ /// assert!(s.capacity() >= 100);
+ ///
+ /// s.shrink_to_fit();
+ /// assert_eq!(3, s.capacity());
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn shrink_to_fit(&mut self) {
+ self.vec.shrink_to_fit()
+ }
+
+ /// Shrinks the capacity of this `String` with a lower bound.
+ ///
+ /// The capacity will remain at least as large as both the length
+ /// and the supplied value.
+ ///
+ /// If the current capacity is less than the lower limit, this is a no-op.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(shrink_to)]
+ /// let mut s = String::from("foo");
+ ///
+ /// s.reserve(100);
+ /// assert!(s.capacity() >= 100);
+ ///
+ /// s.shrink_to(10);
+ /// assert!(s.capacity() >= 10);
+ /// s.shrink_to(0);
+ /// assert!(s.capacity() >= 3);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[unstable(feature = "shrink_to", reason = "new API", issue = "56431")]
+ pub fn shrink_to(&mut self, min_capacity: usize) {
+ self.vec.shrink_to(min_capacity)
+ }
+
+ /// Appends the given [`char`] to the end of this `String`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = String::from("abc");
+ ///
+ /// s.push('1');
+ /// s.push('2');
+ /// s.push('3');
+ ///
+ /// assert_eq!("abc123", s);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn push(&mut self, ch: char) {
+ match ch.len_utf8() {
+ 1 => self.vec.push(ch as u8),
+ _ => self.vec.extend_from_slice(ch.encode_utf8(&mut [0; 4]).as_bytes()),
+ }
+ }
+
+ /// Returns a byte slice of this `String`'s contents.
+ ///
+ /// The inverse of this method is [`from_utf8`].
+ ///
+ /// [`from_utf8`]: String::from_utf8
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s = String::from("hello");
+ ///
+ /// assert_eq!(&[104, 101, 108, 108, 111], s.as_bytes());
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn as_bytes(&self) -> &[u8] {
+ &self.vec
+ }
+
+ /// Shortens this `String` to the specified length.
+ ///
+ /// If `new_len` is greater than the string's current length, this has no
+ /// effect.
+ ///
+ /// Note that this method has no effect on the allocated capacity
+ /// of the string
+ ///
+ /// # Panics
+ ///
+ /// Panics if `new_len` does not lie on a [`char`] boundary.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = String::from("hello");
+ ///
+ /// s.truncate(2);
+ ///
+ /// assert_eq!("he", s);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn truncate(&mut self, new_len: usize) {
+ if new_len <= self.len() {
+ assert!(self.is_char_boundary(new_len));
+ self.vec.truncate(new_len)
+ }
+ }
+
+ /// Removes the last character from the string buffer and returns it.
+ ///
+ /// Returns [`None`] if this `String` is empty.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = String::from("foo");
+ ///
+ /// assert_eq!(s.pop(), Some('o'));
+ /// assert_eq!(s.pop(), Some('o'));
+ /// assert_eq!(s.pop(), Some('f'));
+ ///
+ /// assert_eq!(s.pop(), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn pop(&mut self) -> Option<char> {
+ let ch = self.chars().rev().next()?;
+ let newlen = self.len() - ch.len_utf8();
+ unsafe {
+ self.vec.set_len(newlen);
+ }
+ Some(ch)
+ }
+
+ /// Removes a [`char`] from this `String` at a byte position and returns it.
+ ///
+ /// This is an *O*(*n*) operation, as it requires copying every element in the
+ /// buffer.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `idx` is larger than or equal to the `String`'s length,
+ /// or if it does not lie on a [`char`] boundary.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = String::from("foo");
+ ///
+ /// assert_eq!(s.remove(0), 'f');
+ /// assert_eq!(s.remove(1), 'o');
+ /// assert_eq!(s.remove(0), 'o');
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn remove(&mut self, idx: usize) -> char {
+ let ch = match self[idx..].chars().next() {
+ Some(ch) => ch,
+ None => panic!("cannot remove a char from the end of a string"),
+ };
+
+ let next = idx + ch.len_utf8();
+ let len = self.len();
+ unsafe {
+ ptr::copy(self.vec.as_ptr().add(next), self.vec.as_mut_ptr().add(idx), len - next);
+ self.vec.set_len(len - (next - idx));
+ }
+ ch
+ }
+
+ /// Remove all matches of pattern `pat` in the `String`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(string_remove_matches)]
+ /// let mut s = String::from("Trees are not green, the sky is not blue.");
+ /// s.remove_matches("not ");
+ /// assert_eq!("Trees are green, the sky is blue.", s);
+ /// ```
+ ///
+ /// Matches will be detected and removed iteratively, so in cases where
+ /// patterns overlap, only the first pattern will be removed:
+ ///
+ /// ```
+ /// #![feature(string_remove_matches)]
+ /// let mut s = String::from("banana");
+ /// s.remove_matches("ana");
+ /// assert_eq!("bna", s);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "string_remove_matches", reason = "new API", issue = "72826")]
+ pub fn remove_matches<'a, P>(&'a mut self, pat: P)
+ where
+ P: for<'x> Pattern<'x>,
+ {
+ use core::str::pattern::Searcher;
+
+ let rejections = {
+ let mut searcher = pat.into_searcher(self);
+ // Per Searcher::next:
+ //
+ // A Match result needs to contain the whole matched pattern,
+ // however Reject results may be split up into arbitrary many
+ // adjacent fragments. Both ranges may have zero length.
+ //
+ // In practice the implementation of Searcher::next_match tends to
+ // be more efficient, so we use it here and do some work to invert
+ // matches into rejections since that's what we want to copy below.
+ let mut front = 0;
+ let rejections: Vec<_> = from_fn(|| {
+ let (start, end) = searcher.next_match()?;
+ let prev_front = front;
+ front = end;
+ Some((prev_front, start))
+ })
+ .collect();
+ rejections.into_iter().chain(core::iter::once((front, self.len())))
+ };
+
+ let mut len = 0;
+ let ptr = self.vec.as_mut_ptr();
+
+ for (start, end) in rejections {
+ let count = end - start;
+ if start != len {
+ // SAFETY: per Searcher::next:
+ //
+ // The stream of Match and Reject values up to a Done will
+ // contain index ranges that are adjacent, non-overlapping,
+ // covering the whole haystack, and laying on utf8
+ // boundaries.
+ unsafe {
+ ptr::copy(ptr.add(start), ptr.add(len), count);
+ }
+ }
+ len += count;
+ }
+
+ unsafe {
+ self.vec.set_len(len);
+ }
+ }
+
+ /// Retains only the characters specified by the predicate.
+ ///
+ /// In other words, remove all characters `c` such that `f(c)` returns `false`.
+ /// This method operates in place, visiting each character exactly once in the
+ /// original order, and preserves the order of the retained characters.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut s = String::from("f_o_ob_ar");
+ ///
+ /// s.retain(|c| c != '_');
+ ///
+ /// assert_eq!(s, "foobar");
+ /// ```
+ ///
+ /// The exact order may be useful for tracking external state, like an index.
+ ///
+ /// ```
+ /// let mut s = String::from("abcde");
+ /// let keep = [false, true, true, false, true];
+ /// let mut i = 0;
+ /// s.retain(|_| (keep[i], i += 1).0);
+ /// assert_eq!(s, "bce");
+ /// ```
+ #[inline]
+ #[stable(feature = "string_retain", since = "1.26.0")]
+ pub fn retain<F>(&mut self, mut f: F)
+ where
+ F: FnMut(char) -> bool,
+ {
+ struct SetLenOnDrop<'a> {
+ s: &'a mut String,
+ idx: usize,
+ del_bytes: usize,
+ }
+
+ impl<'a> Drop for SetLenOnDrop<'a> {
+ fn drop(&mut self) {
+ let new_len = self.idx - self.del_bytes;
+ debug_assert!(new_len <= self.s.len());
+ unsafe { self.s.vec.set_len(new_len) };
+ }
+ }
+
+ let len = self.len();
+ let mut guard = SetLenOnDrop { s: self, idx: 0, del_bytes: 0 };
+
+ while guard.idx < len {
+ let ch = unsafe { guard.s.get_unchecked(guard.idx..len).chars().next().unwrap() };
+ let ch_len = ch.len_utf8();
+
+ if !f(ch) {
+ guard.del_bytes += ch_len;
+ } else if guard.del_bytes > 0 {
+ unsafe {
+ ptr::copy(
+ guard.s.vec.as_ptr().add(guard.idx),
+ guard.s.vec.as_mut_ptr().add(guard.idx - guard.del_bytes),
+ ch_len,
+ );
+ }
+ }
+
+ // Point idx to the next char
+ guard.idx += ch_len;
+ }
+
+ drop(guard);
+ }
+
+ /// Inserts a character into this `String` at a byte position.
+ ///
+ /// This is an *O*(*n*) operation as it requires copying every element in the
+ /// buffer.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `idx` is larger than the `String`'s length, or if it does not
+ /// lie on a [`char`] boundary.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = String::with_capacity(3);
+ ///
+ /// s.insert(0, 'f');
+ /// s.insert(1, 'o');
+ /// s.insert(2, 'o');
+ ///
+ /// assert_eq!("foo", s);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn insert(&mut self, idx: usize, ch: char) {
+ assert!(self.is_char_boundary(idx));
+ let mut bits = [0; 4];
+ let bits = ch.encode_utf8(&mut bits).as_bytes();
+
+ unsafe {
+ self.insert_bytes(idx, bits);
+ }
+ }
+
+ #[cfg(not(no_global_oom_handling))]
+ unsafe fn insert_bytes(&mut self, idx: usize, bytes: &[u8]) {
+ let len = self.len();
+ let amt = bytes.len();
+ self.vec.reserve(amt);
+
+ unsafe {
+ ptr::copy(self.vec.as_ptr().add(idx), self.vec.as_mut_ptr().add(idx + amt), len - idx);
+ ptr::copy(bytes.as_ptr(), self.vec.as_mut_ptr().add(idx), amt);
+ self.vec.set_len(len + amt);
+ }
+ }
+
+ /// Inserts a string slice into this `String` at a byte position.
+ ///
+ /// This is an *O*(*n*) operation as it requires copying every element in the
+ /// buffer.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `idx` is larger than the `String`'s length, or if it does not
+ /// lie on a [`char`] boundary.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = String::from("bar");
+ ///
+ /// s.insert_str(0, "foo");
+ ///
+ /// assert_eq!("foobar", s);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[stable(feature = "insert_str", since = "1.16.0")]
+ pub fn insert_str(&mut self, idx: usize, string: &str) {
+ assert!(self.is_char_boundary(idx));
+
+ unsafe {
+ self.insert_bytes(idx, string.as_bytes());
+ }
+ }
+
+ /// Returns a mutable reference to the contents of this `String`.
+ ///
+ /// # Safety
+ ///
+ /// This function is unsafe because it does not check that the bytes passed
+ /// to it are valid UTF-8. If this constraint is violated, it may cause
+ /// memory unsafety issues with future users of the `String`, as the rest of
+ /// the standard library assumes that `String`s are valid UTF-8.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = String::from("hello");
+ ///
+ /// unsafe {
+ /// let vec = s.as_mut_vec();
+ /// assert_eq!(&[104, 101, 108, 108, 111][..], &vec[..]);
+ ///
+ /// vec.reverse();
+ /// }
+ /// assert_eq!(s, "olleh");
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub unsafe fn as_mut_vec(&mut self) -> &mut Vec<u8> {
+ &mut self.vec
+ }
+
+ /// Returns the length of this `String`, in bytes, not [`char`]s or
+ /// graphemes. In other words, it may not be what a human considers the
+ /// length of the string.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = String::from("foo");
+ /// assert_eq!(a.len(), 3);
+ ///
+ /// let fancy_f = String::from("ƒoo");
+ /// assert_eq!(fancy_f.len(), 4);
+ /// assert_eq!(fancy_f.chars().count(), 3);
+ /// ```
+ #[doc(alias = "length")]
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn len(&self) -> usize {
+ self.vec.len()
+ }
+
+ /// Returns `true` if this `String` has a length of zero, and `false` otherwise.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut v = String::new();
+ /// assert!(v.is_empty());
+ ///
+ /// v.push('a');
+ /// assert!(!v.is_empty());
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+
+ /// Splits the string into two at the given byte index.
+ ///
+ /// Returns a newly allocated `String`. `self` contains bytes `[0, at)`, and
+ /// the returned `String` contains bytes `[at, len)`. `at` must be on the
+ /// boundary of a UTF-8 code point.
+ ///
+ /// Note that the capacity of `self` does not change.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `at` is not on a `UTF-8` code point boundary, or if it is beyond the last
+ /// code point of the string.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # fn main() {
+ /// let mut hello = String::from("Hello, World!");
+ /// let world = hello.split_off(7);
+ /// assert_eq!(hello, "Hello, ");
+ /// assert_eq!(world, "World!");
+ /// # }
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[stable(feature = "string_split_off", since = "1.16.0")]
+ #[must_use = "use `.truncate()` if you don't need the other half"]
+ pub fn split_off(&mut self, at: usize) -> String {
+ assert!(self.is_char_boundary(at));
+ let other = self.vec.split_off(at);
+ unsafe { String::from_utf8_unchecked(other) }
+ }
+
+ /// Truncates this `String`, removing all contents.
+ ///
+ /// While this means the `String` will have a length of zero, it does not
+ /// touch its capacity.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = String::from("foo");
+ ///
+ /// s.clear();
+ ///
+ /// assert!(s.is_empty());
+ /// assert_eq!(0, s.len());
+ /// assert_eq!(3, s.capacity());
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn clear(&mut self) {
+ self.vec.clear()
+ }
+
+ /// Creates a draining iterator that removes the specified range in the `String`
+ /// and yields the removed `chars`.
+ ///
+ /// Note: The element range is removed even if the iterator is not
+ /// consumed until the end.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the starting point or end point do not lie on a [`char`]
+ /// boundary, or if they're out of bounds.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = String::from("α is alpha, β is beta");
+ /// let beta_offset = s.find('β').unwrap_or(s.len());
+ ///
+ /// // Remove the range up until the β from the string
+ /// let t: String = s.drain(..beta_offset).collect();
+ /// assert_eq!(t, "α is alpha, ");
+ /// assert_eq!(s, "β is beta");
+ ///
+ /// // A full range clears the string
+ /// s.drain(..);
+ /// assert_eq!(s, "");
+ /// ```
+ #[stable(feature = "drain", since = "1.6.0")]
+ pub fn drain<R>(&mut self, range: R) -> Drain<'_>
+ where
+ R: RangeBounds<usize>,
+ {
+ // Memory safety
+ //
+ // The String version of Drain does not have the memory safety issues
+ // of the vector version. The data is just plain bytes.
+ // Because the range removal happens in Drop, if the Drain iterator is leaked,
+ // the removal will not happen.
+ let Range { start, end } = slice::range(range, ..self.len());
+ assert!(self.is_char_boundary(start));
+ assert!(self.is_char_boundary(end));
+
+ // Take out two simultaneous borrows. The &mut String won't be accessed
+ // until iteration is over, in Drop.
+ let self_ptr = self as *mut _;
+ // SAFETY: `slice::range` and `is_char_boundary` do the appropriate bounds checks.
+ let chars_iter = unsafe { self.get_unchecked(start..end) }.chars();
+
+ Drain { start, end, iter: chars_iter, string: self_ptr }
+ }
+
+ /// Removes the specified range in the string,
+ /// and replaces it with the given string.
+ /// The given string doesn't need to be the same length as the range.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the starting point or end point do not lie on a [`char`]
+ /// boundary, or if they're out of bounds.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = String::from("α is alpha, β is beta");
+ /// let beta_offset = s.find('β').unwrap_or(s.len());
+ ///
+ /// // Replace the range up until the β from the string
+ /// s.replace_range(..beta_offset, "Α is capital alpha; ");
+ /// assert_eq!(s, "Α is capital alpha; β is beta");
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "splice", since = "1.27.0")]
+ pub fn replace_range<R>(&mut self, range: R, replace_with: &str)
+ where
+ R: RangeBounds<usize>,
+ {
+ // Memory safety
+ //
+ // Replace_range does not have the memory safety issues of a vector Splice.
+ // of the vector version. The data is just plain bytes.
+
+ // WARNING: Inlining this variable would be unsound (#81138)
+ let start = range.start_bound();
+ match start {
+ Included(&n) => assert!(self.is_char_boundary(n)),
+ Excluded(&n) => assert!(self.is_char_boundary(n + 1)),
+ Unbounded => {}
+ };
+ // WARNING: Inlining this variable would be unsound (#81138)
+ let end = range.end_bound();
+ match end {
+ Included(&n) => assert!(self.is_char_boundary(n + 1)),
+ Excluded(&n) => assert!(self.is_char_boundary(n)),
+ Unbounded => {}
+ };
+
+ // Using `range` again would be unsound (#81138)
+ // We assume the bounds reported by `range` remain the same, but
+ // an adversarial implementation could change between calls
+ unsafe { self.as_mut_vec() }.splice((start, end), replace_with.bytes());
+ }
+
+ /// Converts this `String` into a [`Box`]`<`[`str`]`>`.
+ ///
+ /// This will drop any excess capacity.
+ ///
+ /// [`str`]: prim@str
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s = String::from("hello");
+ ///
+ /// let b = s.into_boxed_str();
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "box_str", since = "1.4.0")]
+ #[inline]
+ pub fn into_boxed_str(self) -> Box<str> {
+ let slice = self.vec.into_boxed_slice();
+ unsafe { from_boxed_utf8_unchecked(slice) }
+ }
+}
+
+impl FromUtf8Error {
+ /// Returns a slice of [`u8`]s bytes that were attempted to convert to a `String`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// // some invalid bytes, in a vector
+ /// let bytes = vec![0, 159];
+ ///
+ /// let value = String::from_utf8(bytes);
+ ///
+ /// assert_eq!(&[0, 159], value.unwrap_err().as_bytes());
+ /// ```
+ #[stable(feature = "from_utf8_error_as_bytes", since = "1.26.0")]
+ pub fn as_bytes(&self) -> &[u8] {
+ &self.bytes[..]
+ }
+
+ /// Returns the bytes that were attempted to convert to a `String`.
+ ///
+ /// This method is carefully constructed to avoid allocation. It will
+ /// consume the error, moving out the bytes, so that a copy of the bytes
+ /// does not need to be made.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// // some invalid bytes, in a vector
+ /// let bytes = vec![0, 159];
+ ///
+ /// let value = String::from_utf8(bytes);
+ ///
+ /// assert_eq!(vec![0, 159], value.unwrap_err().into_bytes());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn into_bytes(self) -> Vec<u8> {
+ self.bytes
+ }
+
+ /// Fetch a `Utf8Error` to get more details about the conversion failure.
+ ///
+ /// The [`Utf8Error`] type provided by [`std::str`] represents an error that may
+ /// occur when converting a slice of [`u8`]s to a [`&str`]. In this sense, it's
+ /// an analogue to `FromUtf8Error`. See its documentation for more details
+ /// on using it.
+ ///
+ /// [`std::str`]: core::str
+ /// [`&str`]: prim@str
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// // some invalid bytes, in a vector
+ /// let bytes = vec![0, 159];
+ ///
+ /// let error = String::from_utf8(bytes).unwrap_err().utf8_error();
+ ///
+ /// // the first byte is invalid here
+ /// assert_eq!(1, error.valid_up_to());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn utf8_error(&self) -> Utf8Error {
+ self.error
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Display for FromUtf8Error {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(&self.error, f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Display for FromUtf16Error {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt("invalid utf-16: lone surrogate found", f)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Clone for String {
+ fn clone(&self) -> Self {
+ String { vec: self.vec.clone() }
+ }
+
+ fn clone_from(&mut self, source: &Self) {
+ self.vec.clone_from(&source.vec);
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl FromIterator<char> for String {
+ fn from_iter<I: IntoIterator<Item = char>>(iter: I) -> String {
+ let mut buf = String::new();
+ buf.extend(iter);
+ buf
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "string_from_iter_by_ref", since = "1.17.0")]
+impl<'a> FromIterator<&'a char> for String {
+ fn from_iter<I: IntoIterator<Item = &'a char>>(iter: I) -> String {
+ let mut buf = String::new();
+ buf.extend(iter);
+ buf
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a> FromIterator<&'a str> for String {
+ fn from_iter<I: IntoIterator<Item = &'a str>>(iter: I) -> String {
+ let mut buf = String::new();
+ buf.extend(iter);
+ buf
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "extend_string", since = "1.4.0")]
+impl FromIterator<String> for String {
+ fn from_iter<I: IntoIterator<Item = String>>(iter: I) -> String {
+ let mut iterator = iter.into_iter();
+
+ // Because we're iterating over `String`s, we can avoid at least
+ // one allocation by getting the first string from the iterator
+ // and appending to it all the subsequent strings.
+ match iterator.next() {
+ None => String::new(),
+ Some(mut buf) => {
+ buf.extend(iterator);
+ buf
+ }
+ }
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "box_str2", since = "1.45.0")]
+impl FromIterator<Box<str>> for String {
+ fn from_iter<I: IntoIterator<Item = Box<str>>>(iter: I) -> String {
+ let mut buf = String::new();
+ buf.extend(iter);
+ buf
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "herd_cows", since = "1.19.0")]
+impl<'a> FromIterator<Cow<'a, str>> for String {
+ fn from_iter<I: IntoIterator<Item = Cow<'a, str>>>(iter: I) -> String {
+ let mut iterator = iter.into_iter();
+
+ // Because we're iterating over CoWs, we can (potentially) avoid at least
+ // one allocation by getting the first item and appending to it all the
+ // subsequent items.
+ match iterator.next() {
+ None => String::new(),
+ Some(cow) => {
+ let mut buf = cow.into_owned();
+ buf.extend(iterator);
+ buf
+ }
+ }
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Extend<char> for String {
+ fn extend<I: IntoIterator<Item = char>>(&mut self, iter: I) {
+ let iterator = iter.into_iter();
+ let (lower_bound, _) = iterator.size_hint();
+ self.reserve(lower_bound);
+ iterator.for_each(move |c| self.push(c));
+ }
+
+ #[inline]
+ fn extend_one(&mut self, c: char) {
+ self.push(c);
+ }
+
+ #[inline]
+ fn extend_reserve(&mut self, additional: usize) {
+ self.reserve(additional);
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "extend_ref", since = "1.2.0")]
+impl<'a> Extend<&'a char> for String {
+ fn extend<I: IntoIterator<Item = &'a char>>(&mut self, iter: I) {
+ self.extend(iter.into_iter().cloned());
+ }
+
+ #[inline]
+ fn extend_one(&mut self, &c: &'a char) {
+ self.push(c);
+ }
+
+ #[inline]
+ fn extend_reserve(&mut self, additional: usize) {
+ self.reserve(additional);
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a> Extend<&'a str> for String {
+ fn extend<I: IntoIterator<Item = &'a str>>(&mut self, iter: I) {
+ iter.into_iter().for_each(move |s| self.push_str(s));
+ }
+
+ #[inline]
+ fn extend_one(&mut self, s: &'a str) {
+ self.push_str(s);
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "box_str2", since = "1.45.0")]
+impl Extend<Box<str>> for String {
+ fn extend<I: IntoIterator<Item = Box<str>>>(&mut self, iter: I) {
+ iter.into_iter().for_each(move |s| self.push_str(&s));
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "extend_string", since = "1.4.0")]
+impl Extend<String> for String {
+ fn extend<I: IntoIterator<Item = String>>(&mut self, iter: I) {
+ iter.into_iter().for_each(move |s| self.push_str(&s));
+ }
+
+ #[inline]
+ fn extend_one(&mut self, s: String) {
+ self.push_str(&s);
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "herd_cows", since = "1.19.0")]
+impl<'a> Extend<Cow<'a, str>> for String {
+ fn extend<I: IntoIterator<Item = Cow<'a, str>>>(&mut self, iter: I) {
+ iter.into_iter().for_each(move |s| self.push_str(&s));
+ }
+
+ #[inline]
+ fn extend_one(&mut self, s: Cow<'a, str>) {
+ self.push_str(&s);
+ }
+}
+
+/// A convenience impl that delegates to the impl for `&str`.
+///
+/// # Examples
+///
+/// ```
+/// assert_eq!(String::from("Hello world").find("world"), Some(6));
+/// ```
+#[unstable(
+ feature = "pattern",
+ reason = "API not fully fleshed out and ready to be stabilized",
+ issue = "27721"
+)]
+impl<'a, 'b> Pattern<'a> for &'b String {
+ type Searcher = <&'b str as Pattern<'a>>::Searcher;
+
+ fn into_searcher(self, haystack: &'a str) -> <&'b str as Pattern<'a>>::Searcher {
+ self[..].into_searcher(haystack)
+ }
+
+ #[inline]
+ fn is_contained_in(self, haystack: &'a str) -> bool {
+ self[..].is_contained_in(haystack)
+ }
+
+ #[inline]
+ fn is_prefix_of(self, haystack: &'a str) -> bool {
+ self[..].is_prefix_of(haystack)
+ }
+
+ #[inline]
+ fn strip_prefix_of(self, haystack: &'a str) -> Option<&'a str> {
+ self[..].strip_prefix_of(haystack)
+ }
+
+ #[inline]
+ fn is_suffix_of(self, haystack: &'a str) -> bool {
+ self[..].is_suffix_of(haystack)
+ }
+
+ #[inline]
+ fn strip_suffix_of(self, haystack: &'a str) -> Option<&'a str> {
+ self[..].strip_suffix_of(haystack)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl PartialEq for String {
+ #[inline]
+ fn eq(&self, other: &String) -> bool {
+ PartialEq::eq(&self[..], &other[..])
+ }
+ #[inline]
+ fn ne(&self, other: &String) -> bool {
+ PartialEq::ne(&self[..], &other[..])
+ }
+}
+
+macro_rules! impl_eq {
+ ($lhs:ty, $rhs: ty) => {
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[allow(unused_lifetimes)]
+ impl<'a, 'b> PartialEq<$rhs> for $lhs {
+ #[inline]
+ fn eq(&self, other: &$rhs) -> bool {
+ PartialEq::eq(&self[..], &other[..])
+ }
+ #[inline]
+ fn ne(&self, other: &$rhs) -> bool {
+ PartialEq::ne(&self[..], &other[..])
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[allow(unused_lifetimes)]
+ impl<'a, 'b> PartialEq<$lhs> for $rhs {
+ #[inline]
+ fn eq(&self, other: &$lhs) -> bool {
+ PartialEq::eq(&self[..], &other[..])
+ }
+ #[inline]
+ fn ne(&self, other: &$lhs) -> bool {
+ PartialEq::ne(&self[..], &other[..])
+ }
+ }
+ };
+}
+
+impl_eq! { String, str }
+impl_eq! { String, &'a str }
+#[cfg(not(no_global_oom_handling))]
+impl_eq! { Cow<'a, str>, str }
+#[cfg(not(no_global_oom_handling))]
+impl_eq! { Cow<'a, str>, &'b str }
+#[cfg(not(no_global_oom_handling))]
+impl_eq! { Cow<'a, str>, String }
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Default for String {
+ /// Creates an empty `String`.
+ #[inline]
+ fn default() -> String {
+ String::new()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Display for String {
+ #[inline]
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(&**self, f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Debug for String {
+ #[inline]
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&**self, f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl hash::Hash for String {
+ #[inline]
+ fn hash<H: hash::Hasher>(&self, hasher: &mut H) {
+ (**self).hash(hasher)
+ }
+}
+
+/// Implements the `+` operator for concatenating two strings.
+///
+/// This consumes the `String` on the left-hand side and re-uses its buffer (growing it if
+/// necessary). This is done to avoid allocating a new `String` and copying the entire contents on
+/// every operation, which would lead to *O*(*n*^2) running time when building an *n*-byte string by
+/// repeated concatenation.
+///
+/// The string on the right-hand side is only borrowed; its contents are copied into the returned
+/// `String`.
+///
+/// # Examples
+///
+/// Concatenating two `String`s takes the first by value and borrows the second:
+///
+/// ```
+/// let a = String::from("hello");
+/// let b = String::from(" world");
+/// let c = a + &b;
+/// // `a` is moved and can no longer be used here.
+/// ```
+///
+/// If you want to keep using the first `String`, you can clone it and append to the clone instead:
+///
+/// ```
+/// let a = String::from("hello");
+/// let b = String::from(" world");
+/// let c = a.clone() + &b;
+/// // `a` is still valid here.
+/// ```
+///
+/// Concatenating `&str` slices can be done by converting the first to a `String`:
+///
+/// ```
+/// let a = "hello";
+/// let b = " world";
+/// let c = a.to_string() + b;
+/// ```
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Add<&str> for String {
+ type Output = String;
+
+ #[inline]
+ fn add(mut self, other: &str) -> String {
+ self.push_str(other);
+ self
+ }
+}
+
+/// Implements the `+=` operator for appending to a `String`.
+///
+/// This has the same behavior as the [`push_str`][String::push_str] method.
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "stringaddassign", since = "1.12.0")]
+impl AddAssign<&str> for String {
+ #[inline]
+ fn add_assign(&mut self, other: &str) {
+ self.push_str(other);
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl ops::Index<ops::Range<usize>> for String {
+ type Output = str;
+
+ #[inline]
+ fn index(&self, index: ops::Range<usize>) -> &str {
+ &self[..][index]
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl ops::Index<ops::RangeTo<usize>> for String {
+ type Output = str;
+
+ #[inline]
+ fn index(&self, index: ops::RangeTo<usize>) -> &str {
+ &self[..][index]
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl ops::Index<ops::RangeFrom<usize>> for String {
+ type Output = str;
+
+ #[inline]
+ fn index(&self, index: ops::RangeFrom<usize>) -> &str {
+ &self[..][index]
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl ops::Index<ops::RangeFull> for String {
+ type Output = str;
+
+ #[inline]
+ fn index(&self, _index: ops::RangeFull) -> &str {
+ unsafe { str::from_utf8_unchecked(&self.vec) }
+ }
+}
+#[stable(feature = "inclusive_range", since = "1.26.0")]
+impl ops::Index<ops::RangeInclusive<usize>> for String {
+ type Output = str;
+
+ #[inline]
+ fn index(&self, index: ops::RangeInclusive<usize>) -> &str {
+ Index::index(&**self, index)
+ }
+}
+#[stable(feature = "inclusive_range", since = "1.26.0")]
+impl ops::Index<ops::RangeToInclusive<usize>> for String {
+ type Output = str;
+
+ #[inline]
+ fn index(&self, index: ops::RangeToInclusive<usize>) -> &str {
+ Index::index(&**self, index)
+ }
+}
+
+#[stable(feature = "derefmut_for_string", since = "1.3.0")]
+impl ops::IndexMut<ops::Range<usize>> for String {
+ #[inline]
+ fn index_mut(&mut self, index: ops::Range<usize>) -> &mut str {
+ &mut self[..][index]
+ }
+}
+#[stable(feature = "derefmut_for_string", since = "1.3.0")]
+impl ops::IndexMut<ops::RangeTo<usize>> for String {
+ #[inline]
+ fn index_mut(&mut self, index: ops::RangeTo<usize>) -> &mut str {
+ &mut self[..][index]
+ }
+}
+#[stable(feature = "derefmut_for_string", since = "1.3.0")]
+impl ops::IndexMut<ops::RangeFrom<usize>> for String {
+ #[inline]
+ fn index_mut(&mut self, index: ops::RangeFrom<usize>) -> &mut str {
+ &mut self[..][index]
+ }
+}
+#[stable(feature = "derefmut_for_string", since = "1.3.0")]
+impl ops::IndexMut<ops::RangeFull> for String {
+ #[inline]
+ fn index_mut(&mut self, _index: ops::RangeFull) -> &mut str {
+ unsafe { str::from_utf8_unchecked_mut(&mut *self.vec) }
+ }
+}
+#[stable(feature = "inclusive_range", since = "1.26.0")]
+impl ops::IndexMut<ops::RangeInclusive<usize>> for String {
+ #[inline]
+ fn index_mut(&mut self, index: ops::RangeInclusive<usize>) -> &mut str {
+ IndexMut::index_mut(&mut **self, index)
+ }
+}
+#[stable(feature = "inclusive_range", since = "1.26.0")]
+impl ops::IndexMut<ops::RangeToInclusive<usize>> for String {
+ #[inline]
+ fn index_mut(&mut self, index: ops::RangeToInclusive<usize>) -> &mut str {
+ IndexMut::index_mut(&mut **self, index)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl ops::Deref for String {
+ type Target = str;
+
+ #[inline]
+ fn deref(&self) -> &str {
+ unsafe { str::from_utf8_unchecked(&self.vec) }
+ }
+}
+
+#[stable(feature = "derefmut_for_string", since = "1.3.0")]
+impl ops::DerefMut for String {
+ #[inline]
+ fn deref_mut(&mut self) -> &mut str {
+ unsafe { str::from_utf8_unchecked_mut(&mut *self.vec) }
+ }
+}
+
+/// A type alias for [`Infallible`].
+///
+/// This alias exists for backwards compatibility, and may be eventually deprecated.
+///
+/// [`Infallible`]: core::convert::Infallible
+#[stable(feature = "str_parse_error", since = "1.5.0")]
+pub type ParseError = core::convert::Infallible;
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl FromStr for String {
+ type Err = core::convert::Infallible;
+ #[inline]
+ fn from_str(s: &str) -> Result<String, Self::Err> {
+ Ok(String::from(s))
+ }
+}
+
+/// A trait for converting a value to a `String`.
+///
+/// This trait is automatically implemented for any type which implements the
+/// [`Display`] trait. As such, `ToString` shouldn't be implemented directly:
+/// [`Display`] should be implemented instead, and you get the `ToString`
+/// implementation for free.
+///
+/// [`Display`]: fmt::Display
+#[cfg_attr(not(test), rustc_diagnostic_item = "ToString")]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait ToString {
+ /// Converts the given value to a `String`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let i = 5;
+ /// let five = String::from("5");
+ ///
+ /// assert_eq!(five, i.to_string());
+ /// ```
+ #[rustc_conversion_suggestion]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn to_string(&self) -> String;
+}
+
+/// # Panics
+///
+/// In this implementation, the `to_string` method panics
+/// if the `Display` implementation returns an error.
+/// This indicates an incorrect `Display` implementation
+/// since `fmt::Write for String` never returns an error itself.
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: fmt::Display + ?Sized> ToString for T {
+ // A common guideline is to not inline generic functions. However,
+ // removing `#[inline]` from this method causes non-negligible regressions.
+ // See <https://github.com/rust-lang/rust/pull/74852>, the last attempt
+ // to try to remove it.
+ #[inline]
+ default fn to_string(&self) -> String {
+ let mut buf = String::new();
+ let mut formatter = core::fmt::Formatter::new(&mut buf);
+ // Bypass format_args!() to avoid write_str with zero-length strs
+ fmt::Display::fmt(self, &mut formatter)
+ .expect("a Display implementation returned an error unexpectedly");
+ buf
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "char_to_string_specialization", since = "1.46.0")]
+impl ToString for char {
+ #[inline]
+ fn to_string(&self) -> String {
+ String::from(self.encode_utf8(&mut [0; 4]))
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "u8_to_string_specialization", since = "1.54.0")]
+impl ToString for u8 {
+ #[inline]
+ fn to_string(&self) -> String {
+ let mut buf = String::with_capacity(3);
+ let mut n = *self;
+ if n >= 10 {
+ if n >= 100 {
+ buf.push((b'0' + n / 100) as char);
+ n %= 100;
+ }
+ buf.push((b'0' + n / 10) as char);
+ n %= 10;
+ }
+ buf.push((b'0' + n) as char);
+ buf
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "i8_to_string_specialization", since = "1.54.0")]
+impl ToString for i8 {
+ #[inline]
+ fn to_string(&self) -> String {
+ let mut buf = String::with_capacity(4);
+ if self.is_negative() {
+ buf.push('-');
+ }
+ let mut n = self.unsigned_abs();
+ if n >= 10 {
+ if n >= 100 {
+ buf.push('1');
+ n -= 100;
+ }
+ buf.push((b'0' + n / 10) as char);
+ n %= 10;
+ }
+ buf.push((b'0' + n) as char);
+ buf
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "str_to_string_specialization", since = "1.9.0")]
+impl ToString for str {
+ #[inline]
+ fn to_string(&self) -> String {
+ String::from(self)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "cow_str_to_string_specialization", since = "1.17.0")]
+impl ToString for Cow<'_, str> {
+ #[inline]
+ fn to_string(&self) -> String {
+ self[..].to_owned()
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "string_to_string_specialization", since = "1.17.0")]
+impl ToString for String {
+ #[inline]
+ fn to_string(&self) -> String {
+ self.to_owned()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl AsRef<str> for String {
+ #[inline]
+ fn as_ref(&self) -> &str {
+ self
+ }
+}
+
+#[stable(feature = "string_as_mut", since = "1.43.0")]
+impl AsMut<str> for String {
+ #[inline]
+ fn as_mut(&mut self) -> &mut str {
+ self
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl AsRef<[u8]> for String {
+ #[inline]
+ fn as_ref(&self) -> &[u8] {
+ self.as_bytes()
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl From<&str> for String {
+ /// Converts a `&str` into a [`String`].
+ ///
+ /// The result is allocated on the heap.
+ #[inline]
+ fn from(s: &str) -> String {
+ s.to_owned()
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "from_mut_str_for_string", since = "1.44.0")]
+impl From<&mut str> for String {
+ /// Converts a `&mut str` into a [`String`].
+ ///
+ /// The result is allocated on the heap.
+ #[inline]
+ fn from(s: &mut str) -> String {
+ s.to_owned()
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "from_ref_string", since = "1.35.0")]
+impl From<&String> for String {
+ /// Converts a `&String` into a [`String`].
+ ///
+ /// This clones `s` and returns the clone.
+ #[inline]
+ fn from(s: &String) -> String {
+ s.clone()
+ }
+}
+
+// note: test pulls in libstd, which causes errors here
+#[cfg(not(test))]
+#[stable(feature = "string_from_box", since = "1.18.0")]
+impl From<Box<str>> for String {
+ /// Converts the given boxed `str` slice to a [`String`].
+ /// It is notable that the `str` slice is owned.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s1: String = String::from("hello world");
+ /// let s2: Box<str> = s1.into_boxed_str();
+ /// let s3: String = String::from(s2);
+ ///
+ /// assert_eq!("hello world", s3)
+ /// ```
+ fn from(s: Box<str>) -> String {
+ s.into_string()
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "box_from_str", since = "1.20.0")]
+impl From<String> for Box<str> {
+ /// Converts the given [`String`] to a boxed `str` slice that is owned.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s1: String = String::from("hello world");
+ /// let s2: Box<str> = Box::from(s1);
+ /// let s3: String = String::from(s2);
+ ///
+ /// assert_eq!("hello world", s3)
+ /// ```
+ fn from(s: String) -> Box<str> {
+ s.into_boxed_str()
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "string_from_cow_str", since = "1.14.0")]
+impl<'a> From<Cow<'a, str>> for String {
+ /// Converts a clone-on-write string to an owned
+ /// instance of [`String`].
+ ///
+ /// This extracts the owned string,
+ /// clones the string if it is not already owned.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # use std::borrow::Cow;
+ /// // If the string is not owned...
+ /// let cow: Cow<str> = Cow::Borrowed("eggplant");
+ /// // It will allocate on the heap and copy the string.
+ /// let owned: String = String::from(cow);
+ /// assert_eq!(&owned[..], "eggplant");
+ /// ```
+ fn from(s: Cow<'a, str>) -> String {
+ s.into_owned()
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a> From<&'a str> for Cow<'a, str> {
+ /// Converts a string slice into a [`Borrowed`] variant.
+ /// No heap allocation is performed, and the string
+ /// is not copied.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # use std::borrow::Cow;
+ /// assert_eq!(Cow::from("eggplant"), Cow::Borrowed("eggplant"));
+ /// ```
+ ///
+ /// [`Borrowed`]: crate::borrow::Cow::Borrowed
+ #[inline]
+ fn from(s: &'a str) -> Cow<'a, str> {
+ Cow::Borrowed(s)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a> From<String> for Cow<'a, str> {
+ /// Converts a [`String`] into an [`Owned`] variant.
+ /// No heap allocation is performed, and the string
+ /// is not copied.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # use std::borrow::Cow;
+ /// let s = "eggplant".to_string();
+ /// let s2 = "eggplant".to_string();
+ /// assert_eq!(Cow::from(s), Cow::<'static, str>::Owned(s2));
+ /// ```
+ ///
+ /// [`Owned`]: crate::borrow::Cow::Owned
+ #[inline]
+ fn from(s: String) -> Cow<'a, str> {
+ Cow::Owned(s)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "cow_from_string_ref", since = "1.28.0")]
+impl<'a> From<&'a String> for Cow<'a, str> {
+ /// Converts a [`String`] reference into a [`Borrowed`] variant.
+ /// No heap allocation is performed, and the string
+ /// is not copied.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # use std::borrow::Cow;
+ /// let s = "eggplant".to_string();
+ /// assert_eq!(Cow::from(&s), Cow::Borrowed("eggplant"));
+ /// ```
+ ///
+ /// [`Borrowed`]: crate::borrow::Cow::Borrowed
+ #[inline]
+ fn from(s: &'a String) -> Cow<'a, str> {
+ Cow::Borrowed(s.as_str())
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "cow_str_from_iter", since = "1.12.0")]
+impl<'a> FromIterator<char> for Cow<'a, str> {
+ fn from_iter<I: IntoIterator<Item = char>>(it: I) -> Cow<'a, str> {
+ Cow::Owned(FromIterator::from_iter(it))
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "cow_str_from_iter", since = "1.12.0")]
+impl<'a, 'b> FromIterator<&'b str> for Cow<'a, str> {
+ fn from_iter<I: IntoIterator<Item = &'b str>>(it: I) -> Cow<'a, str> {
+ Cow::Owned(FromIterator::from_iter(it))
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "cow_str_from_iter", since = "1.12.0")]
+impl<'a> FromIterator<String> for Cow<'a, str> {
+ fn from_iter<I: IntoIterator<Item = String>>(it: I) -> Cow<'a, str> {
+ Cow::Owned(FromIterator::from_iter(it))
+ }
+}
+
+#[stable(feature = "from_string_for_vec_u8", since = "1.14.0")]
+impl From<String> for Vec<u8> {
+ /// Converts the given [`String`] to a vector [`Vec`] that holds values of type [`u8`].
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s1 = String::from("hello world");
+ /// let v1 = Vec::from(s1);
+ ///
+ /// for b in v1 {
+ /// println!("{}", b);
+ /// }
+ /// ```
+ fn from(string: String) -> Vec<u8> {
+ string.into_bytes()
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Write for String {
+ #[inline]
+ fn write_str(&mut self, s: &str) -> fmt::Result {
+ self.push_str(s);
+ Ok(())
+ }
+
+ #[inline]
+ fn write_char(&mut self, c: char) -> fmt::Result {
+ self.push(c);
+ Ok(())
+ }
+}
+
+/// A draining iterator for `String`.
+///
+/// This struct is created by the [`drain`] method on [`String`]. See its
+/// documentation for more.
+///
+/// [`drain`]: String::drain
+#[stable(feature = "drain", since = "1.6.0")]
+pub struct Drain<'a> {
+ /// Will be used as &'a mut String in the destructor
+ string: *mut String,
+ /// Start of part to remove
+ start: usize,
+ /// End of part to remove
+ end: usize,
+ /// Current remaining range to remove
+ iter: Chars<'a>,
+}
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl fmt::Debug for Drain<'_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("Drain").field(&self.as_str()).finish()
+ }
+}
+
+#[stable(feature = "drain", since = "1.6.0")]
+unsafe impl Sync for Drain<'_> {}
+#[stable(feature = "drain", since = "1.6.0")]
+unsafe impl Send for Drain<'_> {}
+
+#[stable(feature = "drain", since = "1.6.0")]
+impl Drop for Drain<'_> {
+ fn drop(&mut self) {
+ unsafe {
+ // Use Vec::drain. "Reaffirm" the bounds checks to avoid
+ // panic code being inserted again.
+ let self_vec = (*self.string).as_mut_vec();
+ if self.start <= self.end && self.end <= self_vec.len() {
+ self_vec.drain(self.start..self.end);
+ }
+ }
+ }
+}
+
+impl<'a> Drain<'a> {
+ /// Returns the remaining (sub)string of this iterator as a slice.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(string_drain_as_str)]
+ /// let mut s = String::from("abc");
+ /// let mut drain = s.drain(..);
+ /// assert_eq!(drain.as_str(), "abc");
+ /// let _ = drain.next().unwrap();
+ /// assert_eq!(drain.as_str(), "bc");
+ /// ```
+ #[unstable(feature = "string_drain_as_str", issue = "76905")] // Note: uncomment AsRef impls below when stabilizing.
+ pub fn as_str(&self) -> &str {
+ self.iter.as_str()
+ }
+}
+
+// Uncomment when stabilizing `string_drain_as_str`.
+// #[unstable(feature = "string_drain_as_str", issue = "76905")]
+// impl<'a> AsRef<str> for Drain<'a> {
+// fn as_ref(&self) -> &str {
+// self.as_str()
+// }
+// }
+//
+// #[unstable(feature = "string_drain_as_str", issue = "76905")]
+// impl<'a> AsRef<[u8]> for Drain<'a> {
+// fn as_ref(&self) -> &[u8] {
+// self.as_str().as_bytes()
+// }
+// }
+
+#[stable(feature = "drain", since = "1.6.0")]
+impl Iterator for Drain<'_> {
+ type Item = char;
+
+ #[inline]
+ fn next(&mut self) -> Option<char> {
+ self.iter.next()
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+
+ #[inline]
+ fn last(mut self) -> Option<char> {
+ self.next_back()
+ }
+}
+
+#[stable(feature = "drain", since = "1.6.0")]
+impl DoubleEndedIterator for Drain<'_> {
+ #[inline]
+ fn next_back(&mut self) -> Option<char> {
+ self.iter.next_back()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl FusedIterator for Drain<'_> {}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "from_char_for_string", since = "1.46.0")]
+impl From<char> for String {
+ /// Allocates an owned [`String`] from a single character.
+ ///
+ /// # Example
+ /// ```rust
+ /// let c: char = 'a';
+ /// let s: String = String::from(c);
+ /// assert_eq!("a", &s[..]);
+ /// ```
+ #[inline]
+ fn from(c: char) -> Self {
+ c.to_string()
+ }
+}
diff --git a/rust/alloc/sync.rs b/rust/alloc/sync.rs
new file mode 100644
index 000000000000..1f4e446806cf
--- /dev/null
+++ b/rust/alloc/sync.rs
@@ -0,0 +1,2631 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+//! Thread-safe reference-counting pointers.
+//!
+//! See the [`Arc<T>`][Arc] documentation for more details.
+
+use core::any::Any;
+use core::borrow;
+use core::cmp::Ordering;
+use core::convert::{From, TryFrom};
+use core::fmt;
+use core::hash::{Hash, Hasher};
+use core::hint;
+use core::intrinsics::abort;
+#[cfg(not(no_global_oom_handling))]
+use core::iter;
+use core::marker::{PhantomData, Unpin, Unsize};
+#[cfg(not(no_global_oom_handling))]
+use core::mem::size_of_val;
+use core::mem::{self, align_of_val_raw};
+use core::ops::{CoerceUnsized, Deref, DispatchFromDyn, Receiver};
+#[cfg(not(no_global_oom_handling))]
+use core::pin::Pin;
+use core::ptr::{self, NonNull};
+#[cfg(not(no_global_oom_handling))]
+use core::slice::from_raw_parts_mut;
+use core::sync::atomic;
+use core::sync::atomic::Ordering::{Acquire, Relaxed, Release, SeqCst};
+
+#[cfg(not(no_global_oom_handling))]
+use crate::alloc::handle_alloc_error;
+#[cfg(not(no_global_oom_handling))]
+use crate::alloc::{box_free, WriteCloneIntoRaw};
+use crate::alloc::{AllocError, Allocator, Global, Layout};
+use crate::borrow::{Cow, ToOwned};
+use crate::boxed::Box;
+use crate::collections::TryReserveError;
+use crate::rc::is_dangling;
+#[cfg(not(no_global_oom_handling))]
+use crate::string::String;
+use crate::vec::Vec;
+
+#[cfg(test)]
+mod tests;
+
+/// A soft limit on the amount of references that may be made to an `Arc`.
+///
+/// Going above this limit will abort your program (although not
+/// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references.
+const MAX_REFCOUNT: usize = (isize::MAX) as usize;
+
+#[cfg(not(sanitize = "thread"))]
+macro_rules! acquire {
+ ($x:expr) => {
+ atomic::fence(Acquire)
+ };
+}
+
+// ThreadSanitizer does not support memory fences. To avoid false positive
+// reports in Arc / Weak implementation use atomic loads for synchronization
+// instead.
+#[cfg(sanitize = "thread")]
+macro_rules! acquire {
+ ($x:expr) => {
+ $x.load(Acquire)
+ };
+}
+
+/// A thread-safe reference-counting pointer. 'Arc' stands for 'Atomically
+/// Reference Counted'.
+///
+/// The type `Arc<T>` provides shared ownership of a value of type `T`,
+/// allocated in the heap. Invoking [`clone`][clone] on `Arc` produces
+/// a new `Arc` instance, which points to the same allocation on the heap as the
+/// source `Arc`, while increasing a reference count. When the last `Arc`
+/// pointer to a given allocation is destroyed, the value stored in that allocation (often
+/// referred to as "inner value") is also dropped.
+///
+/// Shared references in Rust disallow mutation by default, and `Arc` is no
+/// exception: you cannot generally obtain a mutable reference to something
+/// inside an `Arc`. If you need to mutate through an `Arc`, use
+/// [`Mutex`][mutex], [`RwLock`][rwlock], or one of the [`Atomic`][atomic]
+/// types.
+///
+/// ## Thread Safety
+///
+/// Unlike [`Rc<T>`], `Arc<T>` uses atomic operations for its reference
+/// counting. This means that it is thread-safe. The disadvantage is that
+/// atomic operations are more expensive than ordinary memory accesses. If you
+/// are not sharing reference-counted allocations between threads, consider using
+/// [`Rc<T>`] for lower overhead. [`Rc<T>`] is a safe default, because the
+/// compiler will catch any attempt to send an [`Rc<T>`] between threads.
+/// However, a library might choose `Arc<T>` in order to give library consumers
+/// more flexibility.
+///
+/// `Arc<T>` will implement [`Send`] and [`Sync`] as long as the `T` implements
+/// [`Send`] and [`Sync`]. Why can't you put a non-thread-safe type `T` in an
+/// `Arc<T>` to make it thread-safe? This may be a bit counter-intuitive at
+/// first: after all, isn't the point of `Arc<T>` thread safety? The key is
+/// this: `Arc<T>` makes it thread safe to have multiple ownership of the same
+/// data, but it doesn't add thread safety to its data. Consider
+/// `Arc<`[`RefCell<T>`]`>`. [`RefCell<T>`] isn't [`Sync`], and if `Arc<T>` was always
+/// [`Send`], `Arc<`[`RefCell<T>`]`>` would be as well. But then we'd have a problem:
+/// [`RefCell<T>`] is not thread safe; it keeps track of the borrowing count using
+/// non-atomic operations.
+///
+/// In the end, this means that you may need to pair `Arc<T>` with some sort of
+/// [`std::sync`] type, usually [`Mutex<T>`][mutex].
+///
+/// ## Breaking cycles with `Weak`
+///
+/// The [`downgrade`][downgrade] method can be used to create a non-owning
+/// [`Weak`] pointer. A [`Weak`] pointer can be [`upgrade`][upgrade]d
+/// to an `Arc`, but this will return [`None`] if the value stored in the allocation has
+/// already been dropped. In other words, `Weak` pointers do not keep the value
+/// inside the allocation alive; however, they *do* keep the allocation
+/// (the backing store for the value) alive.
+///
+/// A cycle between `Arc` pointers will never be deallocated. For this reason,
+/// [`Weak`] is used to break cycles. For example, a tree could have
+/// strong `Arc` pointers from parent nodes to children, and [`Weak`]
+/// pointers from children back to their parents.
+///
+/// # Cloning references
+///
+/// Creating a new reference from an existing reference-counted pointer is done using the
+/// `Clone` trait implemented for [`Arc<T>`][Arc] and [`Weak<T>`][Weak].
+///
+/// ```
+/// use std::sync::Arc;
+/// let foo = Arc::new(vec![1.0, 2.0, 3.0]);
+/// // The two syntaxes below are equivalent.
+/// let a = foo.clone();
+/// let b = Arc::clone(&foo);
+/// // a, b, and foo are all Arcs that point to the same memory location
+/// ```
+///
+/// ## `Deref` behavior
+///
+/// `Arc<T>` automatically dereferences to `T` (via the [`Deref`][deref] trait),
+/// so you can call `T`'s methods on a value of type `Arc<T>`. To avoid name
+/// clashes with `T`'s methods, the methods of `Arc<T>` itself are associated
+/// functions, called using [fully qualified syntax]:
+///
+/// ```
+/// use std::sync::Arc;
+///
+/// let my_arc = Arc::new(());
+/// Arc::downgrade(&my_arc);
+/// ```
+///
+/// `Arc<T>`'s implementations of traits like `Clone` may also be called using
+/// fully qualified syntax. Some people prefer to use fully qualified syntax,
+/// while others prefer using method-call syntax.
+///
+/// ```
+/// use std::sync::Arc;
+///
+/// let arc = Arc::new(());
+/// // Method-call syntax
+/// let arc2 = arc.clone();
+/// // Fully qualified syntax
+/// let arc3 = Arc::clone(&arc);
+/// ```
+///
+/// [`Weak<T>`][Weak] does not auto-dereference to `T`, because the inner value may have
+/// already been dropped.
+///
+/// [`Rc<T>`]: crate::rc::Rc
+/// [clone]: Clone::clone
+/// [mutex]: ../../std/sync/struct.Mutex.html
+/// [rwlock]: ../../std/sync/struct.RwLock.html
+/// [atomic]: core::sync::atomic
+/// [`Send`]: core::marker::Send
+/// [`Sync`]: core::marker::Sync
+/// [deref]: core::ops::Deref
+/// [downgrade]: Arc::downgrade
+/// [upgrade]: Weak::upgrade
+/// [`RefCell<T>`]: core::cell::RefCell
+/// [`std::sync`]: ../../std/sync/index.html
+/// [`Arc::clone(&from)`]: Arc::clone
+/// [fully qualified syntax]: https://doc.rust-lang.org/book/ch19-03-advanced-traits.html#fully-qualified-syntax-for-disambiguation-calling-methods-with-the-same-name
+///
+/// # Examples
+///
+/// Sharing some immutable data between threads:
+///
+// Note that we **do not** run these tests here. The windows builders get super
+// unhappy if a thread outlives the main thread and then exits at the same time
+// (something deadlocks) so we just avoid this entirely by not running these
+// tests.
+/// ```no_run
+/// use std::sync::Arc;
+/// use std::thread;
+///
+/// let five = Arc::new(5);
+///
+/// for _ in 0..10 {
+/// let five = Arc::clone(&five);
+///
+/// thread::spawn(move || {
+/// println!("{:?}", five);
+/// });
+/// }
+/// ```
+///
+/// Sharing a mutable [`AtomicUsize`]:
+///
+/// [`AtomicUsize`]: core::sync::atomic::AtomicUsize
+///
+/// ```no_run
+/// use std::sync::Arc;
+/// use std::sync::atomic::{AtomicUsize, Ordering};
+/// use std::thread;
+///
+/// let val = Arc::new(AtomicUsize::new(5));
+///
+/// for _ in 0..10 {
+/// let val = Arc::clone(&val);
+///
+/// thread::spawn(move || {
+/// let v = val.fetch_add(1, Ordering::SeqCst);
+/// println!("{:?}", v);
+/// });
+/// }
+/// ```
+///
+/// See the [`rc` documentation][rc_examples] for more examples of reference
+/// counting in general.
+///
+/// [rc_examples]: crate::rc#examples
+#[cfg_attr(not(test), rustc_diagnostic_item = "Arc")]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Arc<T: ?Sized> {
+ ptr: NonNull<ArcInner<T>>,
+ phantom: PhantomData<ArcInner<T>>,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<T: ?Sized + Sync + Send> Send for Arc<T> {}
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<T: ?Sized + Sync + Send> Sync for Arc<T> {}
+
+#[unstable(feature = "coerce_unsized", issue = "27732")]
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Arc<U>> for Arc<T> {}
+
+#[unstable(feature = "dispatch_from_dyn", issue = "none")]
+impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Arc<U>> for Arc<T> {}
+
+impl<T: ?Sized> Arc<T> {
+ fn from_inner(ptr: NonNull<ArcInner<T>>) -> Self {
+ Self { ptr, phantom: PhantomData }
+ }
+
+ unsafe fn from_ptr(ptr: *mut ArcInner<T>) -> Self {
+ unsafe { Self::from_inner(NonNull::new_unchecked(ptr)) }
+ }
+}
+
+/// `Weak` is a version of [`Arc`] that holds a non-owning reference to the
+/// managed allocation. The allocation is accessed by calling [`upgrade`] on the `Weak`
+/// pointer, which returns an [`Option`]`<`[`Arc`]`<T>>`.
+///
+/// Since a `Weak` reference does not count towards ownership, it will not
+/// prevent the value stored in the allocation from being dropped, and `Weak` itself makes no
+/// guarantees about the value still being present. Thus it may return [`None`]
+/// when [`upgrade`]d. Note however that a `Weak` reference *does* prevent the allocation
+/// itself (the backing store) from being deallocated.
+///
+/// A `Weak` pointer is useful for keeping a temporary reference to the allocation
+/// managed by [`Arc`] without preventing its inner value from being dropped. It is also used to
+/// prevent circular references between [`Arc`] pointers, since mutual owning references
+/// would never allow either [`Arc`] to be dropped. For example, a tree could
+/// have strong [`Arc`] pointers from parent nodes to children, and `Weak`
+/// pointers from children back to their parents.
+///
+/// The typical way to obtain a `Weak` pointer is to call [`Arc::downgrade`].
+///
+/// [`upgrade`]: Weak::upgrade
+#[stable(feature = "arc_weak", since = "1.4.0")]
+pub struct Weak<T: ?Sized> {
+ // This is a `NonNull` to allow optimizing the size of this type in enums,
+ // but it is not necessarily a valid pointer.
+ // `Weak::new` sets this to `usize::MAX` so that it doesn’t need
+ // to allocate space on the heap. That's not a value a real pointer
+ // will ever have because RcBox has alignment at least 2.
+ // This is only possible when `T: Sized`; unsized `T` never dangle.
+ ptr: NonNull<ArcInner<T>>,
+}
+
+#[stable(feature = "arc_weak", since = "1.4.0")]
+unsafe impl<T: ?Sized + Sync + Send> Send for Weak<T> {}
+#[stable(feature = "arc_weak", since = "1.4.0")]
+unsafe impl<T: ?Sized + Sync + Send> Sync for Weak<T> {}
+
+#[unstable(feature = "coerce_unsized", issue = "27732")]
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Weak<U>> for Weak<T> {}
+#[unstable(feature = "dispatch_from_dyn", issue = "none")]
+impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Weak<U>> for Weak<T> {}
+
+#[stable(feature = "arc_weak", since = "1.4.0")]
+impl<T: ?Sized + fmt::Debug> fmt::Debug for Weak<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "(Weak)")
+ }
+}
+
+// This is repr(C) to future-proof against possible field-reordering, which
+// would interfere with otherwise safe [into|from]_raw() of transmutable
+// inner types.
+#[repr(C)]
+struct ArcInner<T: ?Sized> {
+ strong: atomic::AtomicUsize,
+
+ // the value usize::MAX acts as a sentinel for temporarily "locking" the
+ // ability to upgrade weak pointers or downgrade strong ones; this is used
+ // to avoid races in `make_mut` and `get_mut`.
+ weak: atomic::AtomicUsize,
+
+ data: T,
+}
+
+unsafe impl<T: ?Sized + Sync + Send> Send for ArcInner<T> {}
+unsafe impl<T: ?Sized + Sync + Send> Sync for ArcInner<T> {}
+
+impl<T> Arc<T> {
+ /// Constructs a new `Arc<T>`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ ///
+ /// let five = Arc::new(5);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn new(data: T) -> Arc<T> {
+ // Start the weak pointer count as 1 which is the weak pointer that's
+ // held by all the strong pointers (kinda), see std/rc.rs for more info
+ let x: Box<_> = box ArcInner {
+ strong: atomic::AtomicUsize::new(1),
+ weak: atomic::AtomicUsize::new(1),
+ data,
+ };
+ Self::from_inner(Box::leak(x).into())
+ }
+
+ /// Constructs a new `Arc<T>` using a weak reference to itself. Attempting
+ /// to upgrade the weak reference before this function returns will result
+ /// in a `None` value. However, the weak reference may be cloned freely and
+ /// stored for use at a later time.
+ ///
+ /// # Examples
+ /// ```
+ /// #![feature(arc_new_cyclic)]
+ /// #![allow(dead_code)]
+ ///
+ /// use std::sync::{Arc, Weak};
+ ///
+ /// struct Foo {
+ /// me: Weak<Foo>,
+ /// }
+ ///
+ /// let foo = Arc::new_cyclic(|me| Foo {
+ /// me: me.clone(),
+ /// });
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[unstable(feature = "arc_new_cyclic", issue = "75861")]
+ pub fn new_cyclic(data_fn: impl FnOnce(&Weak<T>) -> T) -> Arc<T> {
+ // Construct the inner in the "uninitialized" state with a single
+ // weak reference.
+ let uninit_ptr: NonNull<_> = Box::leak(box ArcInner {
+ strong: atomic::AtomicUsize::new(0),
+ weak: atomic::AtomicUsize::new(1),
+ data: mem::MaybeUninit::<T>::uninit(),
+ })
+ .into();
+ let init_ptr: NonNull<ArcInner<T>> = uninit_ptr.cast();
+
+ let weak = Weak { ptr: init_ptr };
+
+ // It's important we don't give up ownership of the weak pointer, or
+ // else the memory might be freed by the time `data_fn` returns. If
+ // we really wanted to pass ownership, we could create an additional
+ // weak pointer for ourselves, but this would result in additional
+ // updates to the weak reference count which might not be necessary
+ // otherwise.
+ let data = data_fn(&weak);
+
+ // Now we can properly initialize the inner value and turn our weak
+ // reference into a strong reference.
+ unsafe {
+ let inner = init_ptr.as_ptr();
+ ptr::write(ptr::addr_of_mut!((*inner).data), data);
+
+ // The above write to the data field must be visible to any threads which
+ // observe a non-zero strong count. Therefore we need at least "Release" ordering
+ // in order to synchronize with the `compare_exchange_weak` in `Weak::upgrade`.
+ //
+ // "Acquire" ordering is not required. When considering the possible behaviours
+ // of `data_fn` we only need to look at what it could do with a reference to a
+ // non-upgradeable `Weak`:
+ // - It can *clone* the `Weak`, increasing the weak reference count.
+ // - It can drop those clones, decreasing the weak reference count (but never to zero).
+ //
+ // These side effects do not impact us in any way, and no other side effects are
+ // possible with safe code alone.
+ let prev_value = (*inner).strong.fetch_add(1, Release);
+ debug_assert_eq!(prev_value, 0, "No prior strong references should exist");
+ }
+
+ let strong = Arc::from_inner(init_ptr);
+
+ // Strong references should collectively own a shared weak reference,
+ // so don't run the destructor for our old weak reference.
+ mem::forget(weak);
+ strong
+ }
+
+ /// Constructs a new `Arc` with uninitialized contents.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ /// #![feature(get_mut_unchecked)]
+ ///
+ /// use std::sync::Arc;
+ ///
+ /// let mut five = Arc::<u32>::new_uninit();
+ ///
+ /// let five = unsafe {
+ /// // Deferred initialization:
+ /// Arc::get_mut_unchecked(&mut five).as_mut_ptr().write(5);
+ ///
+ /// five.assume_init()
+ /// };
+ ///
+ /// assert_eq!(*five, 5)
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "new_uninit", issue = "63291")]
+ pub fn new_uninit() -> Arc<mem::MaybeUninit<T>> {
+ unsafe {
+ Arc::from_ptr(Arc::allocate_for_layout(
+ Layout::new::<T>(),
+ |layout| Global.allocate(layout),
+ |mem| mem as *mut ArcInner<mem::MaybeUninit<T>>,
+ ))
+ }
+ }
+
+ /// Constructs a new `Arc` with uninitialized contents, with the memory
+ /// being filled with `0` bytes.
+ ///
+ /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
+ /// of this method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ ///
+ /// use std::sync::Arc;
+ ///
+ /// let zero = Arc::<u32>::new_zeroed();
+ /// let zero = unsafe { zero.assume_init() };
+ ///
+ /// assert_eq!(*zero, 0)
+ /// ```
+ ///
+ /// [zeroed]: ../../std/mem/union.MaybeUninit.html#method.zeroed
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "new_uninit", issue = "63291")]
+ pub fn new_zeroed() -> Arc<mem::MaybeUninit<T>> {
+ unsafe {
+ Arc::from_ptr(Arc::allocate_for_layout(
+ Layout::new::<T>(),
+ |layout| Global.allocate_zeroed(layout),
+ |mem| mem as *mut ArcInner<mem::MaybeUninit<T>>,
+ ))
+ }
+ }
+
+ /// Constructs a new `Pin<Arc<T>>`. If `T` does not implement `Unpin`, then
+ /// `data` will be pinned in memory and unable to be moved.
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "pin", since = "1.33.0")]
+ pub fn pin(data: T) -> Pin<Arc<T>> {
+ unsafe { Pin::new_unchecked(Arc::new(data)) }
+ }
+
+ /// Constructs a new `Arc<T>`, returning an error if allocation fails.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api)]
+ /// use std::sync::Arc;
+ ///
+ /// let five = Arc::try_new(5)?;
+ /// # Ok::<(), std::alloc::AllocError>(())
+ /// ```
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[inline]
+ pub fn try_new(data: T) -> Result<Arc<T>, AllocError> {
+ // Start the weak pointer count as 1 which is the weak pointer that's
+ // held by all the strong pointers (kinda), see std/rc.rs for more info
+ let x: Box<_> = Box::try_new(ArcInner {
+ strong: atomic::AtomicUsize::new(1),
+ weak: atomic::AtomicUsize::new(1),
+ data,
+ })?;
+ Ok(Self::from_inner(Box::leak(x).into()))
+ }
+
+ /// Constructs a new `Arc` with uninitialized contents, returning an error
+ /// if allocation fails.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit, allocator_api)]
+ /// #![feature(get_mut_unchecked)]
+ ///
+ /// use std::sync::Arc;
+ ///
+ /// let mut five = Arc::<u32>::try_new_uninit()?;
+ ///
+ /// let five = unsafe {
+ /// // Deferred initialization:
+ /// Arc::get_mut_unchecked(&mut five).as_mut_ptr().write(5);
+ ///
+ /// five.assume_init()
+ /// };
+ ///
+ /// assert_eq!(*five, 5);
+ /// # Ok::<(), std::alloc::AllocError>(())
+ /// ```
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ // #[unstable(feature = "new_uninit", issue = "63291")]
+ pub fn try_new_uninit() -> Result<Arc<mem::MaybeUninit<T>>, AllocError> {
+ unsafe {
+ Ok(Arc::from_ptr(Arc::try_allocate_for_layout(
+ Layout::new::<T>(),
+ |layout| Global.allocate(layout),
+ |mem| mem as *mut ArcInner<mem::MaybeUninit<T>>,
+ )?))
+ }
+ }
+
+ /// Constructs a new `Arc` with uninitialized contents, with the memory
+ /// being filled with `0` bytes, returning an error if allocation fails.
+ ///
+ /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
+ /// of this method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit, allocator_api)]
+ ///
+ /// use std::sync::Arc;
+ ///
+ /// let zero = Arc::<u32>::try_new_zeroed()?;
+ /// let zero = unsafe { zero.assume_init() };
+ ///
+ /// assert_eq!(*zero, 0);
+ /// # Ok::<(), std::alloc::AllocError>(())
+ /// ```
+ ///
+ /// [zeroed]: mem::MaybeUninit::zeroed
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ // #[unstable(feature = "new_uninit", issue = "63291")]
+ pub fn try_new_zeroed() -> Result<Arc<mem::MaybeUninit<T>>, AllocError> {
+ unsafe {
+ Ok(Arc::from_ptr(Arc::try_allocate_for_layout(
+ Layout::new::<T>(),
+ |layout| Global.allocate_zeroed(layout),
+ |mem| mem as *mut ArcInner<mem::MaybeUninit<T>>,
+ )?))
+ }
+ }
+ /// Returns the inner value, if the `Arc` has exactly one strong reference.
+ ///
+ /// Otherwise, an [`Err`] is returned with the same `Arc` that was
+ /// passed in.
+ ///
+ /// This will succeed even if there are outstanding weak references.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ ///
+ /// let x = Arc::new(3);
+ /// assert_eq!(Arc::try_unwrap(x), Ok(3));
+ ///
+ /// let x = Arc::new(4);
+ /// let _y = Arc::clone(&x);
+ /// assert_eq!(*Arc::try_unwrap(x).unwrap_err(), 4);
+ /// ```
+ #[inline]
+ #[stable(feature = "arc_unique", since = "1.4.0")]
+ pub fn try_unwrap(this: Self) -> Result<T, Self> {
+ if this.inner().strong.compare_exchange(1, 0, Relaxed, Relaxed).is_err() {
+ return Err(this);
+ }
+
+ acquire!(this.inner().strong);
+
+ unsafe {
+ let elem = ptr::read(&this.ptr.as_ref().data);
+
+ // Make a weak pointer to clean up the implicit strong-weak reference
+ let _weak = Weak { ptr: this.ptr };
+ mem::forget(this);
+
+ Ok(elem)
+ }
+ }
+}
+
+impl<T> Arc<[T]> {
+ /// Constructs a new atomically reference-counted slice with uninitialized contents.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ /// #![feature(get_mut_unchecked)]
+ ///
+ /// use std::sync::Arc;
+ ///
+ /// let mut values = Arc::<[u32]>::new_uninit_slice(3);
+ ///
+ /// let values = unsafe {
+ /// // Deferred initialization:
+ /// Arc::get_mut_unchecked(&mut values)[0].as_mut_ptr().write(1);
+ /// Arc::get_mut_unchecked(&mut values)[1].as_mut_ptr().write(2);
+ /// Arc::get_mut_unchecked(&mut values)[2].as_mut_ptr().write(3);
+ ///
+ /// values.assume_init()
+ /// };
+ ///
+ /// assert_eq!(*values, [1, 2, 3])
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "new_uninit", issue = "63291")]
+ pub fn new_uninit_slice(len: usize) -> Arc<[mem::MaybeUninit<T>]> {
+ unsafe { Arc::from_ptr(Arc::allocate_for_slice(len)) }
+ }
+
+ /// Constructs a new atomically reference-counted slice with uninitialized contents, with the memory being
+ /// filled with `0` bytes.
+ ///
+ /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and
+ /// incorrect usage of this method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ ///
+ /// use std::sync::Arc;
+ ///
+ /// let values = Arc::<[u32]>::new_zeroed_slice(3);
+ /// let values = unsafe { values.assume_init() };
+ ///
+ /// assert_eq!(*values, [0, 0, 0])
+ /// ```
+ ///
+ /// [zeroed]: ../../std/mem/union.MaybeUninit.html#method.zeroed
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "new_uninit", issue = "63291")]
+ pub fn new_zeroed_slice(len: usize) -> Arc<[mem::MaybeUninit<T>]> {
+ unsafe {
+ Arc::from_ptr(Arc::allocate_for_layout(
+ Layout::array::<T>(len).unwrap(),
+ |layout| Global.allocate_zeroed(layout),
+ |mem| {
+ ptr::slice_from_raw_parts_mut(mem as *mut T, len)
+ as *mut ArcInner<[mem::MaybeUninit<T>]>
+ },
+ ))
+ }
+ }
+}
+
+impl<T> Arc<mem::MaybeUninit<T>> {
+ /// Converts to `Arc<T>`.
+ ///
+ /// # Safety
+ ///
+ /// As with [`MaybeUninit::assume_init`],
+ /// it is up to the caller to guarantee that the inner value
+ /// really is in an initialized state.
+ /// Calling this when the content is not yet fully initialized
+ /// causes immediate undefined behavior.
+ ///
+ /// [`MaybeUninit::assume_init`]: ../../std/mem/union.MaybeUninit.html#method.assume_init
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ /// #![feature(get_mut_unchecked)]
+ ///
+ /// use std::sync::Arc;
+ ///
+ /// let mut five = Arc::<u32>::new_uninit();
+ ///
+ /// let five = unsafe {
+ /// // Deferred initialization:
+ /// Arc::get_mut_unchecked(&mut five).as_mut_ptr().write(5);
+ ///
+ /// five.assume_init()
+ /// };
+ ///
+ /// assert_eq!(*five, 5)
+ /// ```
+ #[unstable(feature = "new_uninit", issue = "63291")]
+ #[inline]
+ pub unsafe fn assume_init(self) -> Arc<T> {
+ Arc::from_inner(mem::ManuallyDrop::new(self).ptr.cast())
+ }
+}
+
+impl<T> Arc<[mem::MaybeUninit<T>]> {
+ /// Converts to `Arc<[T]>`.
+ ///
+ /// # Safety
+ ///
+ /// As with [`MaybeUninit::assume_init`],
+ /// it is up to the caller to guarantee that the inner value
+ /// really is in an initialized state.
+ /// Calling this when the content is not yet fully initialized
+ /// causes immediate undefined behavior.
+ ///
+ /// [`MaybeUninit::assume_init`]: ../../std/mem/union.MaybeUninit.html#method.assume_init
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ /// #![feature(get_mut_unchecked)]
+ ///
+ /// use std::sync::Arc;
+ ///
+ /// let mut values = Arc::<[u32]>::new_uninit_slice(3);
+ ///
+ /// let values = unsafe {
+ /// // Deferred initialization:
+ /// Arc::get_mut_unchecked(&mut values)[0].as_mut_ptr().write(1);
+ /// Arc::get_mut_unchecked(&mut values)[1].as_mut_ptr().write(2);
+ /// Arc::get_mut_unchecked(&mut values)[2].as_mut_ptr().write(3);
+ ///
+ /// values.assume_init()
+ /// };
+ ///
+ /// assert_eq!(*values, [1, 2, 3])
+ /// ```
+ #[unstable(feature = "new_uninit", issue = "63291")]
+ #[inline]
+ pub unsafe fn assume_init(self) -> Arc<[T]> {
+ unsafe { Arc::from_ptr(mem::ManuallyDrop::new(self).ptr.as_ptr() as _) }
+ }
+}
+
+impl<T: ?Sized> Arc<T> {
+ /// Consumes the `Arc`, returning the wrapped pointer.
+ ///
+ /// To avoid a memory leak the pointer must be converted back to an `Arc` using
+ /// [`Arc::from_raw`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ ///
+ /// let x = Arc::new("hello".to_owned());
+ /// let x_ptr = Arc::into_raw(x);
+ /// assert_eq!(unsafe { &*x_ptr }, "hello");
+ /// ```
+ #[stable(feature = "rc_raw", since = "1.17.0")]
+ pub fn into_raw(this: Self) -> *const T {
+ let ptr = Self::as_ptr(&this);
+ mem::forget(this);
+ ptr
+ }
+
+ /// Provides a raw pointer to the data.
+ ///
+ /// The counts are not affected in any way and the `Arc` is not consumed. The pointer is valid for
+ /// as long as there are strong counts in the `Arc`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ ///
+ /// let x = Arc::new("hello".to_owned());
+ /// let y = Arc::clone(&x);
+ /// let x_ptr = Arc::as_ptr(&x);
+ /// assert_eq!(x_ptr, Arc::as_ptr(&y));
+ /// assert_eq!(unsafe { &*x_ptr }, "hello");
+ /// ```
+ #[stable(feature = "rc_as_ptr", since = "1.45.0")]
+ pub fn as_ptr(this: &Self) -> *const T {
+ let ptr: *mut ArcInner<T> = NonNull::as_ptr(this.ptr);
+
+ // SAFETY: This cannot go through Deref::deref or RcBoxPtr::inner because
+ // this is required to retain raw/mut provenance such that e.g. `get_mut` can
+ // write through the pointer after the Rc is recovered through `from_raw`.
+ unsafe { ptr::addr_of_mut!((*ptr).data) }
+ }
+
+ /// Constructs an `Arc<T>` from a raw pointer.
+ ///
+ /// The raw pointer must have been previously returned by a call to
+ /// [`Arc<U>::into_raw`][into_raw] where `U` must have the same size and
+ /// alignment as `T`. This is trivially true if `U` is `T`.
+ /// Note that if `U` is not `T` but has the same size and alignment, this is
+ /// basically like transmuting references of different types. See
+ /// [`mem::transmute`][transmute] for more information on what
+ /// restrictions apply in this case.
+ ///
+ /// The user of `from_raw` has to make sure a specific value of `T` is only
+ /// dropped once.
+ ///
+ /// This function is unsafe because improper use may lead to memory unsafety,
+ /// even if the returned `Arc<T>` is never accessed.
+ ///
+ /// [into_raw]: Arc::into_raw
+ /// [transmute]: core::mem::transmute
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ ///
+ /// let x = Arc::new("hello".to_owned());
+ /// let x_ptr = Arc::into_raw(x);
+ ///
+ /// unsafe {
+ /// // Convert back to an `Arc` to prevent leak.
+ /// let x = Arc::from_raw(x_ptr);
+ /// assert_eq!(&*x, "hello");
+ ///
+ /// // Further calls to `Arc::from_raw(x_ptr)` would be memory-unsafe.
+ /// }
+ ///
+ /// // The memory was freed when `x` went out of scope above, so `x_ptr` is now dangling!
+ /// ```
+ #[stable(feature = "rc_raw", since = "1.17.0")]
+ pub unsafe fn from_raw(ptr: *const T) -> Self {
+ unsafe {
+ let offset = data_offset(ptr);
+
+ // Reverse the offset to find the original ArcInner.
+ let arc_ptr = (ptr as *mut ArcInner<T>).set_ptr_value((ptr as *mut u8).offset(-offset));
+
+ Self::from_ptr(arc_ptr)
+ }
+ }
+
+ /// Creates a new [`Weak`] pointer to this allocation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ ///
+ /// let five = Arc::new(5);
+ ///
+ /// let weak_five = Arc::downgrade(&five);
+ /// ```
+ #[stable(feature = "arc_weak", since = "1.4.0")]
+ pub fn downgrade(this: &Self) -> Weak<T> {
+ // This Relaxed is OK because we're checking the value in the CAS
+ // below.
+ let mut cur = this.inner().weak.load(Relaxed);
+
+ loop {
+ // check if the weak counter is currently "locked"; if so, spin.
+ if cur == usize::MAX {
+ hint::spin_loop();
+ cur = this.inner().weak.load(Relaxed);
+ continue;
+ }
+
+ // NOTE: this code currently ignores the possibility of overflow
+ // into usize::MAX; in general both Rc and Arc need to be adjusted
+ // to deal with overflow.
+
+ // Unlike with Clone(), we need this to be an Acquire read to
+ // synchronize with the write coming from `is_unique`, so that the
+ // events prior to that write happen before this read.
+ match this.inner().weak.compare_exchange_weak(cur, cur + 1, Acquire, Relaxed) {
+ Ok(_) => {
+ // Make sure we do not create a dangling Weak
+ debug_assert!(!is_dangling(this.ptr.as_ptr()));
+ return Weak { ptr: this.ptr };
+ }
+ Err(old) => cur = old,
+ }
+ }
+ }
+
+ /// Gets the number of [`Weak`] pointers to this allocation.
+ ///
+ /// # Safety
+ ///
+ /// This method by itself is safe, but using it correctly requires extra care.
+ /// Another thread can change the weak count at any time,
+ /// including potentially between calling this method and acting on the result.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ ///
+ /// let five = Arc::new(5);
+ /// let _weak_five = Arc::downgrade(&five);
+ ///
+ /// // This assertion is deterministic because we haven't shared
+ /// // the `Arc` or `Weak` between threads.
+ /// assert_eq!(1, Arc::weak_count(&five));
+ /// ```
+ #[inline]
+ #[stable(feature = "arc_counts", since = "1.15.0")]
+ pub fn weak_count(this: &Self) -> usize {
+ let cnt = this.inner().weak.load(SeqCst);
+ // If the weak count is currently locked, the value of the
+ // count was 0 just before taking the lock.
+ if cnt == usize::MAX { 0 } else { cnt - 1 }
+ }
+
+ /// Gets the number of strong (`Arc`) pointers to this allocation.
+ ///
+ /// # Safety
+ ///
+ /// This method by itself is safe, but using it correctly requires extra care.
+ /// Another thread can change the strong count at any time,
+ /// including potentially between calling this method and acting on the result.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ ///
+ /// let five = Arc::new(5);
+ /// let _also_five = Arc::clone(&five);
+ ///
+ /// // This assertion is deterministic because we haven't shared
+ /// // the `Arc` between threads.
+ /// assert_eq!(2, Arc::strong_count(&five));
+ /// ```
+ #[inline]
+ #[stable(feature = "arc_counts", since = "1.15.0")]
+ pub fn strong_count(this: &Self) -> usize {
+ this.inner().strong.load(SeqCst)
+ }
+
+ /// Increments the strong reference count on the `Arc<T>` associated with the
+ /// provided pointer by one.
+ ///
+ /// # Safety
+ ///
+ /// The pointer must have been obtained through `Arc::into_raw`, and the
+ /// associated `Arc` instance must be valid (i.e. the strong count must be at
+ /// least 1) for the duration of this method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ ///
+ /// let five = Arc::new(5);
+ ///
+ /// unsafe {
+ /// let ptr = Arc::into_raw(five);
+ /// Arc::increment_strong_count(ptr);
+ ///
+ /// // This assertion is deterministic because we haven't shared
+ /// // the `Arc` between threads.
+ /// let five = Arc::from_raw(ptr);
+ /// assert_eq!(2, Arc::strong_count(&five));
+ /// }
+ /// ```
+ #[inline]
+ #[stable(feature = "arc_mutate_strong_count", since = "1.51.0")]
+ pub unsafe fn increment_strong_count(ptr: *const T) {
+ // Retain Arc, but don't touch refcount by wrapping in ManuallyDrop
+ let arc = unsafe { mem::ManuallyDrop::new(Arc::<T>::from_raw(ptr)) };
+ // Now increase refcount, but don't drop new refcount either
+ let _arc_clone: mem::ManuallyDrop<_> = arc.clone();
+ }
+
+ /// Decrements the strong reference count on the `Arc<T>` associated with the
+ /// provided pointer by one.
+ ///
+ /// # Safety
+ ///
+ /// The pointer must have been obtained through `Arc::into_raw`, and the
+ /// associated `Arc` instance must be valid (i.e. the strong count must be at
+ /// least 1) when invoking this method. This method can be used to release the final
+ /// `Arc` and backing storage, but **should not** be called after the final `Arc` has been
+ /// released.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ ///
+ /// let five = Arc::new(5);
+ ///
+ /// unsafe {
+ /// let ptr = Arc::into_raw(five);
+ /// Arc::increment_strong_count(ptr);
+ ///
+ /// // Those assertions are deterministic because we haven't shared
+ /// // the `Arc` between threads.
+ /// let five = Arc::from_raw(ptr);
+ /// assert_eq!(2, Arc::strong_count(&five));
+ /// Arc::decrement_strong_count(ptr);
+ /// assert_eq!(1, Arc::strong_count(&five));
+ /// }
+ /// ```
+ #[inline]
+ #[stable(feature = "arc_mutate_strong_count", since = "1.51.0")]
+ pub unsafe fn decrement_strong_count(ptr: *const T) {
+ unsafe { mem::drop(Arc::from_raw(ptr)) };
+ }
+
+ #[inline]
+ fn inner(&self) -> &ArcInner<T> {
+ // This unsafety is ok because while this arc is alive we're guaranteed
+ // that the inner pointer is valid. Furthermore, we know that the
+ // `ArcInner` structure itself is `Sync` because the inner data is
+ // `Sync` as well, so we're ok loaning out an immutable pointer to these
+ // contents.
+ unsafe { self.ptr.as_ref() }
+ }
+
+ // Non-inlined part of `drop`.
+ #[inline(never)]
+ unsafe fn drop_slow(&mut self) {
+ // Destroy the data at this time, even though we may not free the box
+ // allocation itself (there may still be weak pointers lying around).
+ unsafe { ptr::drop_in_place(Self::get_mut_unchecked(self)) };
+
+ // Drop the weak ref collectively held by all strong references
+ drop(Weak { ptr: self.ptr });
+ }
+
+ #[inline]
+ #[stable(feature = "ptr_eq", since = "1.17.0")]
+ /// Returns `true` if the two `Arc`s point to the same allocation
+ /// (in a vein similar to [`ptr::eq`]).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ ///
+ /// let five = Arc::new(5);
+ /// let same_five = Arc::clone(&five);
+ /// let other_five = Arc::new(5);
+ ///
+ /// assert!(Arc::ptr_eq(&five, &same_five));
+ /// assert!(!Arc::ptr_eq(&five, &other_five));
+ /// ```
+ ///
+ /// [`ptr::eq`]: core::ptr::eq
+ pub fn ptr_eq(this: &Self, other: &Self) -> bool {
+ this.ptr.as_ptr() == other.ptr.as_ptr()
+ }
+}
+
+impl<T: ?Sized> Arc<T> {
+ /// Allocates an `ArcInner<T>` with sufficient space for
+ /// a possibly-unsized inner value where the value has the layout provided.
+ ///
+ /// The function `mem_to_arcinner` is called with the data pointer
+ /// and must return back a (potentially fat)-pointer for the `ArcInner<T>`.
+ #[cfg(not(no_global_oom_handling))]
+ unsafe fn allocate_for_layout(
+ value_layout: Layout,
+ allocate: impl FnOnce(Layout) -> Result<NonNull<[u8]>, AllocError>,
+ mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
+ ) -> *mut ArcInner<T> {
+ // Calculate layout using the given value layout.
+ // Previously, layout was calculated on the expression
+ // `&*(ptr as *const ArcInner<T>)`, but this created a misaligned
+ // reference (see #54908).
+ let layout = Layout::new::<ArcInner<()>>().extend(value_layout).unwrap().0.pad_to_align();
+ unsafe {
+ Arc::try_allocate_for_layout(value_layout, allocate, mem_to_arcinner)
+ .unwrap_or_else(|_| handle_alloc_error(layout))
+ }
+ }
+
+ /// Allocates an `ArcInner<T>` with sufficient space for
+ /// a possibly-unsized inner value where the value has the layout provided,
+ /// returning an error if allocation fails.
+ ///
+ /// The function `mem_to_arcinner` is called with the data pointer
+ /// and must return back a (potentially fat)-pointer for the `ArcInner<T>`.
+ unsafe fn try_allocate_for_layout(
+ value_layout: Layout,
+ allocate: impl FnOnce(Layout) -> Result<NonNull<[u8]>, AllocError>,
+ mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
+ ) -> Result<*mut ArcInner<T>, AllocError> {
+ // Calculate layout using the given value layout.
+ // Previously, layout was calculated on the expression
+ // `&*(ptr as *const ArcInner<T>)`, but this created a misaligned
+ // reference (see #54908).
+ let layout = Layout::new::<ArcInner<()>>().extend(value_layout).unwrap().0.pad_to_align();
+
+ let ptr = allocate(layout)?;
+
+ // Initialize the ArcInner
+ let inner = mem_to_arcinner(ptr.as_non_null_ptr().as_ptr());
+ debug_assert_eq!(unsafe { Layout::for_value(&*inner) }, layout);
+
+ unsafe {
+ ptr::write(&mut (*inner).strong, atomic::AtomicUsize::new(1));
+ ptr::write(&mut (*inner).weak, atomic::AtomicUsize::new(1));
+ }
+
+ Ok(inner)
+ }
+
+ /// Allocates an `ArcInner<T>` with sufficient space for an unsized inner value.
+ #[cfg(not(no_global_oom_handling))]
+ unsafe fn allocate_for_ptr(ptr: *const T) -> *mut ArcInner<T> {
+ // Allocate for the `ArcInner<T>` using the given value.
+ unsafe {
+ Self::allocate_for_layout(
+ Layout::for_value(&*ptr),
+ |layout| Global.allocate(layout),
+ |mem| (ptr as *mut ArcInner<T>).set_ptr_value(mem) as *mut ArcInner<T>,
+ )
+ }
+ }
+
+ #[cfg(not(no_global_oom_handling))]
+ fn from_box(v: Box<T>) -> Arc<T> {
+ unsafe {
+ let (box_unique, alloc) = Box::into_unique(v);
+ let bptr = box_unique.as_ptr();
+
+ let value_size = size_of_val(&*bptr);
+ let ptr = Self::allocate_for_ptr(bptr);
+
+ // Copy value as bytes
+ ptr::copy_nonoverlapping(
+ bptr as *const T as *const u8,
+ &mut (*ptr).data as *mut _ as *mut u8,
+ value_size,
+ );
+
+ // Free the allocation without dropping its contents
+ box_free(box_unique, alloc);
+
+ Self::from_ptr(ptr)
+ }
+ }
+}
+
+impl<T> Arc<[T]> {
+ /// Allocates an `ArcInner<[T]>` with the given length.
+ #[cfg(not(no_global_oom_handling))]
+ unsafe fn allocate_for_slice(len: usize) -> *mut ArcInner<[T]> {
+ unsafe {
+ Self::allocate_for_layout(
+ Layout::array::<T>(len).unwrap(),
+ |layout| Global.allocate(layout),
+ |mem| ptr::slice_from_raw_parts_mut(mem as *mut T, len) as *mut ArcInner<[T]>,
+ )
+ }
+ }
+
+ /// Tries to allocate an `ArcInner<[T]>` with the given length.
+ unsafe fn try_allocate_for_slice(len: usize) -> Result<*mut ArcInner<[T]>, TryReserveError> {
+ unsafe {
+ let layout = Layout::array::<T>(len)?;
+ Self::try_allocate_for_layout(
+ layout,
+ |l| Global.allocate(l),
+ |mem| ptr::slice_from_raw_parts_mut(mem as *mut T, len) as *mut ArcInner<[T]>,
+ ).map_err(|_| TryReserveError::AllocError { layout, non_exhaustive: () })
+ }
+ }
+
+ /// Copy elements from slice into newly allocated Arc<\[T\]>
+ ///
+ /// Unsafe because the caller must either take ownership or bind `T: Copy`.
+ #[cfg(not(no_global_oom_handling))]
+ unsafe fn copy_from_slice(v: &[T]) -> Arc<[T]> {
+ unsafe {
+ let ptr = Self::allocate_for_slice(v.len());
+
+ ptr::copy_nonoverlapping(v.as_ptr(), &mut (*ptr).data as *mut [T] as *mut T, v.len());
+
+ Self::from_ptr(ptr)
+ }
+ }
+
+ /// Tries to copy elements from slice into newly allocated Arc<\[T\]>
+ ///
+ /// Unsafe because the caller must either take ownership or bind `T: Copy`.
+ unsafe fn try_copy_from_slice(v: &[T]) -> Result<Arc<[T]>, TryReserveError> {
+ unsafe {
+ let ptr = Self::try_allocate_for_slice(v.len())?;
+
+ ptr::copy_nonoverlapping(v.as_ptr(), &mut (*ptr).data as *mut [T] as *mut T, v.len());
+
+ Ok(Self::from_ptr(ptr))
+ }
+ }
+
+ /// Constructs an `Arc<[T]>` from an iterator known to be of a certain size.
+ ///
+ /// Behavior is undefined should the size be wrong.
+ #[cfg(not(no_global_oom_handling))]
+ unsafe fn from_iter_exact(iter: impl iter::Iterator<Item = T>, len: usize) -> Arc<[T]> {
+ // Panic guard while cloning T elements.
+ // In the event of a panic, elements that have been written
+ // into the new ArcInner will be dropped, then the memory freed.
+ struct Guard<T> {
+ mem: NonNull<u8>,
+ elems: *mut T,
+ layout: Layout,
+ n_elems: usize,
+ }
+
+ impl<T> Drop for Guard<T> {
+ fn drop(&mut self) {
+ unsafe {
+ let slice = from_raw_parts_mut(self.elems, self.n_elems);
+ ptr::drop_in_place(slice);
+
+ Global.deallocate(self.mem, self.layout);
+ }
+ }
+ }
+
+ unsafe {
+ let ptr = Self::allocate_for_slice(len);
+
+ let mem = ptr as *mut _ as *mut u8;
+ let layout = Layout::for_value(&*ptr);
+
+ // Pointer to first element
+ let elems = &mut (*ptr).data as *mut [T] as *mut T;
+
+ let mut guard = Guard { mem: NonNull::new_unchecked(mem), elems, layout, n_elems: 0 };
+
+ for (i, item) in iter.enumerate() {
+ ptr::write(elems.add(i), item);
+ guard.n_elems += 1;
+ }
+
+ // All clear. Forget the guard so it doesn't free the new ArcInner.
+ mem::forget(guard);
+
+ Self::from_ptr(ptr)
+ }
+ }
+}
+
+/// Specialization trait used for `From<&[T]>`.
+#[cfg(not(no_global_oom_handling))]
+trait ArcFromSlice<T> {
+ fn from_slice(slice: &[T]) -> Self;
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<T: Clone> ArcFromSlice<T> for Arc<[T]> {
+ #[inline]
+ default fn from_slice(v: &[T]) -> Self {
+ unsafe { Self::from_iter_exact(v.iter().cloned(), v.len()) }
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<T: Copy> ArcFromSlice<T> for Arc<[T]> {
+ #[inline]
+ fn from_slice(v: &[T]) -> Self {
+ unsafe { Arc::copy_from_slice(v) }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> Clone for Arc<T> {
+ /// Makes a clone of the `Arc` pointer.
+ ///
+ /// This creates another pointer to the same allocation, increasing the
+ /// strong reference count.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ ///
+ /// let five = Arc::new(5);
+ ///
+ /// let _ = Arc::clone(&five);
+ /// ```
+ #[inline]
+ fn clone(&self) -> Arc<T> {
+ // Using a relaxed ordering is alright here, as knowledge of the
+ // original reference prevents other threads from erroneously deleting
+ // the object.
+ //
+ // As explained in the [Boost documentation][1], Increasing the
+ // reference counter can always be done with memory_order_relaxed: New
+ // references to an object can only be formed from an existing
+ // reference, and passing an existing reference from one thread to
+ // another must already provide any required synchronization.
+ //
+ // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
+ let old_size = self.inner().strong.fetch_add(1, Relaxed);
+
+ // However we need to guard against massive refcounts in case someone
+ // is `mem::forget`ing Arcs. If we don't do this the count can overflow
+ // and users will use-after free. We racily saturate to `isize::MAX` on
+ // the assumption that there aren't ~2 billion threads incrementing
+ // the reference count at once. This branch will never be taken in
+ // any realistic program.
+ //
+ // We abort because such a program is incredibly degenerate, and we
+ // don't care to support it.
+ if old_size > MAX_REFCOUNT {
+ abort();
+ }
+
+ Self::from_inner(self.ptr)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> Deref for Arc<T> {
+ type Target = T;
+
+ #[inline]
+ fn deref(&self) -> &T {
+ &self.inner().data
+ }
+}
+
+#[unstable(feature = "receiver_trait", issue = "none")]
+impl<T: ?Sized> Receiver for Arc<T> {}
+
+impl<T: Clone> Arc<T> {
+ /// Makes a mutable reference into the given `Arc`.
+ ///
+ /// If there are other `Arc` or [`Weak`] pointers to the same allocation,
+ /// then `make_mut` will create a new allocation and invoke [`clone`][clone] on the inner value
+ /// to ensure unique ownership. This is also referred to as clone-on-write.
+ ///
+ /// Note that this differs from the behavior of [`Rc::make_mut`] which disassociates
+ /// any remaining `Weak` pointers.
+ ///
+ /// See also [`get_mut`][get_mut], which will fail rather than cloning.
+ ///
+ /// [clone]: Clone::clone
+ /// [get_mut]: Arc::get_mut
+ /// [`Rc::make_mut`]: super::rc::Rc::make_mut
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ ///
+ /// let mut data = Arc::new(5);
+ ///
+ /// *Arc::make_mut(&mut data) += 1; // Won't clone anything
+ /// let mut other_data = Arc::clone(&data); // Won't clone inner data
+ /// *Arc::make_mut(&mut data) += 1; // Clones inner data
+ /// *Arc::make_mut(&mut data) += 1; // Won't clone anything
+ /// *Arc::make_mut(&mut other_data) *= 2; // Won't clone anything
+ ///
+ /// // Now `data` and `other_data` point to different allocations.
+ /// assert_eq!(*data, 8);
+ /// assert_eq!(*other_data, 12);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[stable(feature = "arc_unique", since = "1.4.0")]
+ pub fn make_mut(this: &mut Self) -> &mut T {
+ // Note that we hold both a strong reference and a weak reference.
+ // Thus, releasing our strong reference only will not, by itself, cause
+ // the memory to be deallocated.
+ //
+ // Use Acquire to ensure that we see any writes to `weak` that happen
+ // before release writes (i.e., decrements) to `strong`. Since we hold a
+ // weak count, there's no chance the ArcInner itself could be
+ // deallocated.
+ if this.inner().strong.compare_exchange(1, 0, Acquire, Relaxed).is_err() {
+ // Another strong pointer exists, so we must clone.
+ // Pre-allocate memory to allow writing the cloned value directly.
+ let mut arc = Self::new_uninit();
+ unsafe {
+ let data = Arc::get_mut_unchecked(&mut arc);
+ (**this).write_clone_into_raw(data.as_mut_ptr());
+ *this = arc.assume_init();
+ }
+ } else if this.inner().weak.load(Relaxed) != 1 {
+ // Relaxed suffices in the above because this is fundamentally an
+ // optimization: we are always racing with weak pointers being
+ // dropped. Worst case, we end up allocated a new Arc unnecessarily.
+
+ // We removed the last strong ref, but there are additional weak
+ // refs remaining. We'll move the contents to a new Arc, and
+ // invalidate the other weak refs.
+
+ // Note that it is not possible for the read of `weak` to yield
+ // usize::MAX (i.e., locked), since the weak count can only be
+ // locked by a thread with a strong reference.
+
+ // Materialize our own implicit weak pointer, so that it can clean
+ // up the ArcInner as needed.
+ let _weak = Weak { ptr: this.ptr };
+
+ // Can just steal the data, all that's left is Weaks
+ let mut arc = Self::new_uninit();
+ unsafe {
+ let data = Arc::get_mut_unchecked(&mut arc);
+ data.as_mut_ptr().copy_from_nonoverlapping(&**this, 1);
+ ptr::write(this, arc.assume_init());
+ }
+ } else {
+ // We were the sole reference of either kind; bump back up the
+ // strong ref count.
+ this.inner().strong.store(1, Release);
+ }
+
+ // As with `get_mut()`, the unsafety is ok because our reference was
+ // either unique to begin with, or became one upon cloning the contents.
+ unsafe { Self::get_mut_unchecked(this) }
+ }
+}
+
+impl<T: ?Sized> Arc<T> {
+ /// Returns a mutable reference into the given `Arc`, if there are
+ /// no other `Arc` or [`Weak`] pointers to the same allocation.
+ ///
+ /// Returns [`None`] otherwise, because it is not safe to
+ /// mutate a shared value.
+ ///
+ /// See also [`make_mut`][make_mut], which will [`clone`][clone]
+ /// the inner value when there are other pointers.
+ ///
+ /// [make_mut]: Arc::make_mut
+ /// [clone]: Clone::clone
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ ///
+ /// let mut x = Arc::new(3);
+ /// *Arc::get_mut(&mut x).unwrap() = 4;
+ /// assert_eq!(*x, 4);
+ ///
+ /// let _y = Arc::clone(&x);
+ /// assert!(Arc::get_mut(&mut x).is_none());
+ /// ```
+ #[inline]
+ #[stable(feature = "arc_unique", since = "1.4.0")]
+ pub fn get_mut(this: &mut Self) -> Option<&mut T> {
+ if this.is_unique() {
+ // This unsafety is ok because we're guaranteed that the pointer
+ // returned is the *only* pointer that will ever be returned to T. Our
+ // reference count is guaranteed to be 1 at this point, and we required
+ // the Arc itself to be `mut`, so we're returning the only possible
+ // reference to the inner data.
+ unsafe { Some(Arc::get_mut_unchecked(this)) }
+ } else {
+ None
+ }
+ }
+
+ /// Returns a mutable reference into the given `Arc`,
+ /// without any check.
+ ///
+ /// See also [`get_mut`], which is safe and does appropriate checks.
+ ///
+ /// [`get_mut`]: Arc::get_mut
+ ///
+ /// # Safety
+ ///
+ /// Any other `Arc` or [`Weak`] pointers to the same allocation must not be dereferenced
+ /// for the duration of the returned borrow.
+ /// This is trivially the case if no such pointers exist,
+ /// for example immediately after `Arc::new`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(get_mut_unchecked)]
+ ///
+ /// use std::sync::Arc;
+ ///
+ /// let mut x = Arc::new(String::new());
+ /// unsafe {
+ /// Arc::get_mut_unchecked(&mut x).push_str("foo")
+ /// }
+ /// assert_eq!(*x, "foo");
+ /// ```
+ #[inline]
+ #[unstable(feature = "get_mut_unchecked", issue = "63292")]
+ pub unsafe fn get_mut_unchecked(this: &mut Self) -> &mut T {
+ // We are careful to *not* create a reference covering the "count" fields, as
+ // this would alias with concurrent access to the reference counts (e.g. by `Weak`).
+ unsafe { &mut (*this.ptr.as_ptr()).data }
+ }
+
+ /// Determine whether this is the unique reference (including weak refs) to
+ /// the underlying data.
+ ///
+ /// Note that this requires locking the weak ref count.
+ fn is_unique(&mut self) -> bool {
+ // lock the weak pointer count if we appear to be the sole weak pointer
+ // holder.
+ //
+ // The acquire label here ensures a happens-before relationship with any
+ // writes to `strong` (in particular in `Weak::upgrade`) prior to decrements
+ // of the `weak` count (via `Weak::drop`, which uses release). If the upgraded
+ // weak ref was never dropped, the CAS here will fail so we do not care to synchronize.
+ if self.inner().weak.compare_exchange(1, usize::MAX, Acquire, Relaxed).is_ok() {
+ // This needs to be an `Acquire` to synchronize with the decrement of the `strong`
+ // counter in `drop` -- the only access that happens when any but the last reference
+ // is being dropped.
+ let unique = self.inner().strong.load(Acquire) == 1;
+
+ // The release write here synchronizes with a read in `downgrade`,
+ // effectively preventing the above read of `strong` from happening
+ // after the write.
+ self.inner().weak.store(1, Release); // release the lock
+ unique
+ } else {
+ false
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<#[may_dangle] T: ?Sized> Drop for Arc<T> {
+ /// Drops the `Arc`.
+ ///
+ /// This will decrement the strong reference count. If the strong reference
+ /// count reaches zero then the only other references (if any) are
+ /// [`Weak`], so we `drop` the inner value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ ///
+ /// struct Foo;
+ ///
+ /// impl Drop for Foo {
+ /// fn drop(&mut self) {
+ /// println!("dropped!");
+ /// }
+ /// }
+ ///
+ /// let foo = Arc::new(Foo);
+ /// let foo2 = Arc::clone(&foo);
+ ///
+ /// drop(foo); // Doesn't print anything
+ /// drop(foo2); // Prints "dropped!"
+ /// ```
+ #[inline]
+ fn drop(&mut self) {
+ // Because `fetch_sub` is already atomic, we do not need to synchronize
+ // with other threads unless we are going to delete the object. This
+ // same logic applies to the below `fetch_sub` to the `weak` count.
+ if self.inner().strong.fetch_sub(1, Release) != 1 {
+ return;
+ }
+
+ // This fence is needed to prevent reordering of use of the data and
+ // deletion of the data. Because it is marked `Release`, the decreasing
+ // of the reference count synchronizes with this `Acquire` fence. This
+ // means that use of the data happens before decreasing the reference
+ // count, which happens before this fence, which happens before the
+ // deletion of the data.
+ //
+ // As explained in the [Boost documentation][1],
+ //
+ // > It is important to enforce any possible access to the object in one
+ // > thread (through an existing reference) to *happen before* deleting
+ // > the object in a different thread. This is achieved by a "release"
+ // > operation after dropping a reference (any access to the object
+ // > through this reference must obviously happened before), and an
+ // > "acquire" operation before deleting the object.
+ //
+ // In particular, while the contents of an Arc are usually immutable, it's
+ // possible to have interior writes to something like a Mutex<T>. Since a
+ // Mutex is not acquired when it is deleted, we can't rely on its
+ // synchronization logic to make writes in thread A visible to a destructor
+ // running in thread B.
+ //
+ // Also note that the Acquire fence here could probably be replaced with an
+ // Acquire load, which could improve performance in highly-contended
+ // situations. See [2].
+ //
+ // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
+ // [2]: (https://github.com/rust-lang/rust/pull/41714)
+ acquire!(self.inner().strong);
+
+ unsafe {
+ self.drop_slow();
+ }
+ }
+}
+
+impl Arc<dyn Any + Send + Sync> {
+ #[inline]
+ #[stable(feature = "rc_downcast", since = "1.29.0")]
+ /// Attempt to downcast the `Arc<dyn Any + Send + Sync>` to a concrete type.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::any::Any;
+ /// use std::sync::Arc;
+ ///
+ /// fn print_if_string(value: Arc<dyn Any + Send + Sync>) {
+ /// if let Ok(string) = value.downcast::<String>() {
+ /// println!("String ({}): {}", string.len(), string);
+ /// }
+ /// }
+ ///
+ /// let my_string = "Hello World".to_string();
+ /// print_if_string(Arc::new(my_string));
+ /// print_if_string(Arc::new(0i8));
+ /// ```
+ pub fn downcast<T>(self) -> Result<Arc<T>, Self>
+ where
+ T: Any + Send + Sync + 'static,
+ {
+ if (*self).is::<T>() {
+ let ptr = self.ptr.cast::<ArcInner<T>>();
+ mem::forget(self);
+ Ok(Arc::from_inner(ptr))
+ } else {
+ Err(self)
+ }
+ }
+}
+
+impl<T> Weak<T> {
+ /// Constructs a new `Weak<T>`, without allocating any memory.
+ /// Calling [`upgrade`] on the return value always gives [`None`].
+ ///
+ /// [`upgrade`]: Weak::upgrade
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Weak;
+ ///
+ /// let empty: Weak<i64> = Weak::new();
+ /// assert!(empty.upgrade().is_none());
+ /// ```
+ #[stable(feature = "downgraded_weak", since = "1.10.0")]
+ pub fn new() -> Weak<T> {
+ Weak { ptr: NonNull::new(usize::MAX as *mut ArcInner<T>).expect("MAX is not 0") }
+ }
+}
+
+/// Helper type to allow accessing the reference counts without
+/// making any assertions about the data field.
+struct WeakInner<'a> {
+ weak: &'a atomic::AtomicUsize,
+ strong: &'a atomic::AtomicUsize,
+}
+
+impl<T: ?Sized> Weak<T> {
+ /// Returns a raw pointer to the object `T` pointed to by this `Weak<T>`.
+ ///
+ /// The pointer is valid only if there are some strong references. The pointer may be dangling,
+ /// unaligned or even [`null`] otherwise.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ /// use std::ptr;
+ ///
+ /// let strong = Arc::new("hello".to_owned());
+ /// let weak = Arc::downgrade(&strong);
+ /// // Both point to the same object
+ /// assert!(ptr::eq(&*strong, weak.as_ptr()));
+ /// // The strong here keeps it alive, so we can still access the object.
+ /// assert_eq!("hello", unsafe { &*weak.as_ptr() });
+ ///
+ /// drop(strong);
+ /// // But not any more. We can do weak.as_ptr(), but accessing the pointer would lead to
+ /// // undefined behaviour.
+ /// // assert_eq!("hello", unsafe { &*weak.as_ptr() });
+ /// ```
+ ///
+ /// [`null`]: core::ptr::null
+ #[stable(feature = "weak_into_raw", since = "1.45.0")]
+ pub fn as_ptr(&self) -> *const T {
+ let ptr: *mut ArcInner<T> = NonNull::as_ptr(self.ptr);
+
+ if is_dangling(ptr) {
+ // If the pointer is dangling, we return the sentinel directly. This cannot be
+ // a valid payload address, as the payload is at least as aligned as ArcInner (usize).
+ ptr as *const T
+ } else {
+ // SAFETY: if is_dangling returns false, then the pointer is dereferencable.
+ // The payload may be dropped at this point, and we have to maintain provenance,
+ // so use raw pointer manipulation.
+ unsafe { ptr::addr_of_mut!((*ptr).data) }
+ }
+ }
+
+ /// Consumes the `Weak<T>` and turns it into a raw pointer.
+ ///
+ /// This converts the weak pointer into a raw pointer, while still preserving the ownership of
+ /// one weak reference (the weak count is not modified by this operation). It can be turned
+ /// back into the `Weak<T>` with [`from_raw`].
+ ///
+ /// The same restrictions of accessing the target of the pointer as with
+ /// [`as_ptr`] apply.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::{Arc, Weak};
+ ///
+ /// let strong = Arc::new("hello".to_owned());
+ /// let weak = Arc::downgrade(&strong);
+ /// let raw = weak.into_raw();
+ ///
+ /// assert_eq!(1, Arc::weak_count(&strong));
+ /// assert_eq!("hello", unsafe { &*raw });
+ ///
+ /// drop(unsafe { Weak::from_raw(raw) });
+ /// assert_eq!(0, Arc::weak_count(&strong));
+ /// ```
+ ///
+ /// [`from_raw`]: Weak::from_raw
+ /// [`as_ptr`]: Weak::as_ptr
+ #[stable(feature = "weak_into_raw", since = "1.45.0")]
+ pub fn into_raw(self) -> *const T {
+ let result = self.as_ptr();
+ mem::forget(self);
+ result
+ }
+
+ /// Converts a raw pointer previously created by [`into_raw`] back into `Weak<T>`.
+ ///
+ /// This can be used to safely get a strong reference (by calling [`upgrade`]
+ /// later) or to deallocate the weak count by dropping the `Weak<T>`.
+ ///
+ /// It takes ownership of one weak reference (with the exception of pointers created by [`new`],
+ /// as these don't own anything; the method still works on them).
+ ///
+ /// # Safety
+ ///
+ /// The pointer must have originated from the [`into_raw`] and must still own its potential
+ /// weak reference.
+ ///
+ /// It is allowed for the strong count to be 0 at the time of calling this. Nevertheless, this
+ /// takes ownership of one weak reference currently represented as a raw pointer (the weak
+ /// count is not modified by this operation) and therefore it must be paired with a previous
+ /// call to [`into_raw`].
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::{Arc, Weak};
+ ///
+ /// let strong = Arc::new("hello".to_owned());
+ ///
+ /// let raw_1 = Arc::downgrade(&strong).into_raw();
+ /// let raw_2 = Arc::downgrade(&strong).into_raw();
+ ///
+ /// assert_eq!(2, Arc::weak_count(&strong));
+ ///
+ /// assert_eq!("hello", &*unsafe { Weak::from_raw(raw_1) }.upgrade().unwrap());
+ /// assert_eq!(1, Arc::weak_count(&strong));
+ ///
+ /// drop(strong);
+ ///
+ /// // Decrement the last weak count.
+ /// assert!(unsafe { Weak::from_raw(raw_2) }.upgrade().is_none());
+ /// ```
+ ///
+ /// [`new`]: Weak::new
+ /// [`into_raw`]: Weak::into_raw
+ /// [`upgrade`]: Weak::upgrade
+ /// [`forget`]: std::mem::forget
+ #[stable(feature = "weak_into_raw", since = "1.45.0")]
+ pub unsafe fn from_raw(ptr: *const T) -> Self {
+ // See Weak::as_ptr for context on how the input pointer is derived.
+
+ let ptr = if is_dangling(ptr as *mut T) {
+ // This is a dangling Weak.
+ ptr as *mut ArcInner<T>
+ } else {
+ // Otherwise, we're guaranteed the pointer came from a nondangling Weak.
+ // SAFETY: data_offset is safe to call, as ptr references a real (potentially dropped) T.
+ let offset = unsafe { data_offset(ptr) };
+ // Thus, we reverse the offset to get the whole RcBox.
+ // SAFETY: the pointer originated from a Weak, so this offset is safe.
+ unsafe { (ptr as *mut ArcInner<T>).set_ptr_value((ptr as *mut u8).offset(-offset)) }
+ };
+
+ // SAFETY: we now have recovered the original Weak pointer, so can create the Weak.
+ Weak { ptr: unsafe { NonNull::new_unchecked(ptr) } }
+ }
+}
+
+impl<T: ?Sized> Weak<T> {
+ /// Attempts to upgrade the `Weak` pointer to an [`Arc`], delaying
+ /// dropping of the inner value if successful.
+ ///
+ /// Returns [`None`] if the inner value has since been dropped.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ ///
+ /// let five = Arc::new(5);
+ ///
+ /// let weak_five = Arc::downgrade(&five);
+ ///
+ /// let strong_five: Option<Arc<_>> = weak_five.upgrade();
+ /// assert!(strong_five.is_some());
+ ///
+ /// // Destroy all strong pointers.
+ /// drop(strong_five);
+ /// drop(five);
+ ///
+ /// assert!(weak_five.upgrade().is_none());
+ /// ```
+ #[stable(feature = "arc_weak", since = "1.4.0")]
+ pub fn upgrade(&self) -> Option<Arc<T>> {
+ // We use a CAS loop to increment the strong count instead of a
+ // fetch_add as this function should never take the reference count
+ // from zero to one.
+ let inner = self.inner()?;
+
+ // Relaxed load because any write of 0 that we can observe
+ // leaves the field in a permanently zero state (so a
+ // "stale" read of 0 is fine), and any other value is
+ // confirmed via the CAS below.
+ let mut n = inner.strong.load(Relaxed);
+
+ loop {
+ if n == 0 {
+ return None;
+ }
+
+ // See comments in `Arc::clone` for why we do this (for `mem::forget`).
+ if n > MAX_REFCOUNT {
+ abort();
+ }
+
+ // Relaxed is fine for the failure case because we don't have any expectations about the new state.
+ // Acquire is necessary for the success case to synchronise with `Arc::new_cyclic`, when the inner
+ // value can be initialized after `Weak` references have already been created. In that case, we
+ // expect to observe the fully initialized value.
+ match inner.strong.compare_exchange_weak(n, n + 1, Acquire, Relaxed) {
+ Ok(_) => return Some(Arc::from_inner(self.ptr)), // null checked above
+ Err(old) => n = old,
+ }
+ }
+ }
+
+ /// Gets the number of strong (`Arc`) pointers pointing to this allocation.
+ ///
+ /// If `self` was created using [`Weak::new`], this will return 0.
+ #[stable(feature = "weak_counts", since = "1.41.0")]
+ pub fn strong_count(&self) -> usize {
+ if let Some(inner) = self.inner() { inner.strong.load(SeqCst) } else { 0 }
+ }
+
+ /// Gets an approximation of the number of `Weak` pointers pointing to this
+ /// allocation.
+ ///
+ /// If `self` was created using [`Weak::new`], or if there are no remaining
+ /// strong pointers, this will return 0.
+ ///
+ /// # Accuracy
+ ///
+ /// Due to implementation details, the returned value can be off by 1 in
+ /// either direction when other threads are manipulating any `Arc`s or
+ /// `Weak`s pointing to the same allocation.
+ #[stable(feature = "weak_counts", since = "1.41.0")]
+ pub fn weak_count(&self) -> usize {
+ self.inner()
+ .map(|inner| {
+ let weak = inner.weak.load(SeqCst);
+ let strong = inner.strong.load(SeqCst);
+ if strong == 0 {
+ 0
+ } else {
+ // Since we observed that there was at least one strong pointer
+ // after reading the weak count, we know that the implicit weak
+ // reference (present whenever any strong references are alive)
+ // was still around when we observed the weak count, and can
+ // therefore safely subtract it.
+ weak - 1
+ }
+ })
+ .unwrap_or(0)
+ }
+
+ /// Returns `None` when the pointer is dangling and there is no allocated `ArcInner`,
+ /// (i.e., when this `Weak` was created by `Weak::new`).
+ #[inline]
+ fn inner(&self) -> Option<WeakInner<'_>> {
+ if is_dangling(self.ptr.as_ptr()) {
+ None
+ } else {
+ // We are careful to *not* create a reference covering the "data" field, as
+ // the field may be mutated concurrently (for example, if the last `Arc`
+ // is dropped, the data field will be dropped in-place).
+ Some(unsafe {
+ let ptr = self.ptr.as_ptr();
+ WeakInner { strong: &(*ptr).strong, weak: &(*ptr).weak }
+ })
+ }
+ }
+
+ /// Returns `true` if the two `Weak`s point to the same allocation (similar to
+ /// [`ptr::eq`]), or if both don't point to any allocation
+ /// (because they were created with `Weak::new()`).
+ ///
+ /// # Notes
+ ///
+ /// Since this compares pointers it means that `Weak::new()` will equal each
+ /// other, even though they don't point to any allocation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ ///
+ /// let first_rc = Arc::new(5);
+ /// let first = Arc::downgrade(&first_rc);
+ /// let second = Arc::downgrade(&first_rc);
+ ///
+ /// assert!(first.ptr_eq(&second));
+ ///
+ /// let third_rc = Arc::new(5);
+ /// let third = Arc::downgrade(&third_rc);
+ ///
+ /// assert!(!first.ptr_eq(&third));
+ /// ```
+ ///
+ /// Comparing `Weak::new`.
+ ///
+ /// ```
+ /// use std::sync::{Arc, Weak};
+ ///
+ /// let first = Weak::new();
+ /// let second = Weak::new();
+ /// assert!(first.ptr_eq(&second));
+ ///
+ /// let third_rc = Arc::new(());
+ /// let third = Arc::downgrade(&third_rc);
+ /// assert!(!first.ptr_eq(&third));
+ /// ```
+ ///
+ /// [`ptr::eq`]: core::ptr::eq
+ #[inline]
+ #[stable(feature = "weak_ptr_eq", since = "1.39.0")]
+ pub fn ptr_eq(&self, other: &Self) -> bool {
+ self.ptr.as_ptr() == other.ptr.as_ptr()
+ }
+}
+
+#[stable(feature = "arc_weak", since = "1.4.0")]
+impl<T: ?Sized> Clone for Weak<T> {
+ /// Makes a clone of the `Weak` pointer that points to the same allocation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::{Arc, Weak};
+ ///
+ /// let weak_five = Arc::downgrade(&Arc::new(5));
+ ///
+ /// let _ = Weak::clone(&weak_five);
+ /// ```
+ #[inline]
+ fn clone(&self) -> Weak<T> {
+ let inner = if let Some(inner) = self.inner() {
+ inner
+ } else {
+ return Weak { ptr: self.ptr };
+ };
+ // See comments in Arc::clone() for why this is relaxed. This can use a
+ // fetch_add (ignoring the lock) because the weak count is only locked
+ // where are *no other* weak pointers in existence. (So we can't be
+ // running this code in that case).
+ let old_size = inner.weak.fetch_add(1, Relaxed);
+
+ // See comments in Arc::clone() for why we do this (for mem::forget).
+ if old_size > MAX_REFCOUNT {
+ abort();
+ }
+
+ Weak { ptr: self.ptr }
+ }
+}
+
+#[stable(feature = "downgraded_weak", since = "1.10.0")]
+impl<T> Default for Weak<T> {
+ /// Constructs a new `Weak<T>`, without allocating memory.
+ /// Calling [`upgrade`] on the return value always
+ /// gives [`None`].
+ ///
+ /// [`upgrade`]: Weak::upgrade
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Weak;
+ ///
+ /// let empty: Weak<i64> = Default::default();
+ /// assert!(empty.upgrade().is_none());
+ /// ```
+ fn default() -> Weak<T> {
+ Weak::new()
+ }
+}
+
+#[stable(feature = "arc_weak", since = "1.4.0")]
+unsafe impl<#[may_dangle] T: ?Sized> Drop for Weak<T> {
+ /// Drops the `Weak` pointer.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::{Arc, Weak};
+ ///
+ /// struct Foo;
+ ///
+ /// impl Drop for Foo {
+ /// fn drop(&mut self) {
+ /// println!("dropped!");
+ /// }
+ /// }
+ ///
+ /// let foo = Arc::new(Foo);
+ /// let weak_foo = Arc::downgrade(&foo);
+ /// let other_weak_foo = Weak::clone(&weak_foo);
+ ///
+ /// drop(weak_foo); // Doesn't print anything
+ /// drop(foo); // Prints "dropped!"
+ ///
+ /// assert!(other_weak_foo.upgrade().is_none());
+ /// ```
+ fn drop(&mut self) {
+ // If we find out that we were the last weak pointer, then its time to
+ // deallocate the data entirely. See the discussion in Arc::drop() about
+ // the memory orderings
+ //
+ // It's not necessary to check for the locked state here, because the
+ // weak count can only be locked if there was precisely one weak ref,
+ // meaning that drop could only subsequently run ON that remaining weak
+ // ref, which can only happen after the lock is released.
+ let inner = if let Some(inner) = self.inner() { inner } else { return };
+
+ if inner.weak.fetch_sub(1, Release) == 1 {
+ acquire!(inner.weak);
+ unsafe { Global.deallocate(self.ptr.cast(), Layout::for_value_raw(self.ptr.as_ptr())) }
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+trait ArcEqIdent<T: ?Sized + PartialEq> {
+ fn eq(&self, other: &Arc<T>) -> bool;
+ fn ne(&self, other: &Arc<T>) -> bool;
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + PartialEq> ArcEqIdent<T> for Arc<T> {
+ #[inline]
+ default fn eq(&self, other: &Arc<T>) -> bool {
+ **self == **other
+ }
+ #[inline]
+ default fn ne(&self, other: &Arc<T>) -> bool {
+ **self != **other
+ }
+}
+
+/// We're doing this specialization here, and not as a more general optimization on `&T`, because it
+/// would otherwise add a cost to all equality checks on refs. We assume that `Arc`s are used to
+/// store large values, that are slow to clone, but also heavy to check for equality, causing this
+/// cost to pay off more easily. It's also more likely to have two `Arc` clones, that point to
+/// the same value, than two `&T`s.
+///
+/// We can only do this when `T: Eq` as a `PartialEq` might be deliberately irreflexive.
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + crate::rc::MarkerEq> ArcEqIdent<T> for Arc<T> {
+ #[inline]
+ fn eq(&self, other: &Arc<T>) -> bool {
+ Arc::ptr_eq(self, other) || **self == **other
+ }
+
+ #[inline]
+ fn ne(&self, other: &Arc<T>) -> bool {
+ !Arc::ptr_eq(self, other) && **self != **other
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + PartialEq> PartialEq for Arc<T> {
+ /// Equality for two `Arc`s.
+ ///
+ /// Two `Arc`s are equal if their inner values are equal, even if they are
+ /// stored in different allocation.
+ ///
+ /// If `T` also implements `Eq` (implying reflexivity of equality),
+ /// two `Arc`s that point to the same allocation are always equal.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ ///
+ /// let five = Arc::new(5);
+ ///
+ /// assert!(five == Arc::new(5));
+ /// ```
+ #[inline]
+ fn eq(&self, other: &Arc<T>) -> bool {
+ ArcEqIdent::eq(self, other)
+ }
+
+ /// Inequality for two `Arc`s.
+ ///
+ /// Two `Arc`s are unequal if their inner values are unequal.
+ ///
+ /// If `T` also implements `Eq` (implying reflexivity of equality),
+ /// two `Arc`s that point to the same value are never unequal.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ ///
+ /// let five = Arc::new(5);
+ ///
+ /// assert!(five != Arc::new(6));
+ /// ```
+ #[inline]
+ fn ne(&self, other: &Arc<T>) -> bool {
+ ArcEqIdent::ne(self, other)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + PartialOrd> PartialOrd for Arc<T> {
+ /// Partial comparison for two `Arc`s.
+ ///
+ /// The two are compared by calling `partial_cmp()` on their inner values.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ /// use std::cmp::Ordering;
+ ///
+ /// let five = Arc::new(5);
+ ///
+ /// assert_eq!(Some(Ordering::Less), five.partial_cmp(&Arc::new(6)));
+ /// ```
+ fn partial_cmp(&self, other: &Arc<T>) -> Option<Ordering> {
+ (**self).partial_cmp(&**other)
+ }
+
+ /// Less-than comparison for two `Arc`s.
+ ///
+ /// The two are compared by calling `<` on their inner values.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ ///
+ /// let five = Arc::new(5);
+ ///
+ /// assert!(five < Arc::new(6));
+ /// ```
+ fn lt(&self, other: &Arc<T>) -> bool {
+ *(*self) < *(*other)
+ }
+
+ /// 'Less than or equal to' comparison for two `Arc`s.
+ ///
+ /// The two are compared by calling `<=` on their inner values.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ ///
+ /// let five = Arc::new(5);
+ ///
+ /// assert!(five <= Arc::new(5));
+ /// ```
+ fn le(&self, other: &Arc<T>) -> bool {
+ *(*self) <= *(*other)
+ }
+
+ /// Greater-than comparison for two `Arc`s.
+ ///
+ /// The two are compared by calling `>` on their inner values.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ ///
+ /// let five = Arc::new(5);
+ ///
+ /// assert!(five > Arc::new(4));
+ /// ```
+ fn gt(&self, other: &Arc<T>) -> bool {
+ *(*self) > *(*other)
+ }
+
+ /// 'Greater than or equal to' comparison for two `Arc`s.
+ ///
+ /// The two are compared by calling `>=` on their inner values.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ ///
+ /// let five = Arc::new(5);
+ ///
+ /// assert!(five >= Arc::new(5));
+ /// ```
+ fn ge(&self, other: &Arc<T>) -> bool {
+ *(*self) >= *(*other)
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + Ord> Ord for Arc<T> {
+ /// Comparison for two `Arc`s.
+ ///
+ /// The two are compared by calling `cmp()` on their inner values.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ /// use std::cmp::Ordering;
+ ///
+ /// let five = Arc::new(5);
+ ///
+ /// assert_eq!(Ordering::Less, five.cmp(&Arc::new(6)));
+ /// ```
+ fn cmp(&self, other: &Arc<T>) -> Ordering {
+ (**self).cmp(&**other)
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + Eq> Eq for Arc<T> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + fmt::Display> fmt::Display for Arc<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(&**self, f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + fmt::Debug> fmt::Debug for Arc<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&**self, f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> fmt::Pointer for Arc<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Pointer::fmt(&(&**self as *const T), f)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Default> Default for Arc<T> {
+ /// Creates a new `Arc<T>`, with the `Default` value for `T`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ ///
+ /// let x: Arc<i32> = Default::default();
+ /// assert_eq!(*x, 0);
+ /// ```
+ fn default() -> Arc<T> {
+ Arc::new(Default::default())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + Hash> Hash for Arc<T> {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ (**self).hash(state)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "from_for_ptrs", since = "1.6.0")]
+impl<T> From<T> for Arc<T> {
+ fn from(t: T) -> Self {
+ Arc::new(t)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "shared_from_slice", since = "1.21.0")]
+impl<T: Clone> From<&[T]> for Arc<[T]> {
+ /// Allocate a reference-counted slice and fill it by cloning `v`'s items.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # use std::sync::Arc;
+ /// let original: &[i32] = &[1, 2, 3];
+ /// let shared: Arc<[i32]> = Arc::from(original);
+ /// assert_eq!(&[1, 2, 3], &shared[..]);
+ /// ```
+ #[inline]
+ fn from(v: &[T]) -> Arc<[T]> {
+ <Self as ArcFromSlice<T>>::from_slice(v)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "shared_from_slice", since = "1.21.0")]
+impl From<&str> for Arc<str> {
+ /// Allocate a reference-counted `str` and copy `v` into it.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # use std::sync::Arc;
+ /// let shared: Arc<str> = Arc::from("eggplant");
+ /// assert_eq!("eggplant", &shared[..]);
+ /// ```
+ #[inline]
+ fn from(v: &str) -> Arc<str> {
+ let arc = Arc::<[u8]>::from(v.as_bytes());
+ unsafe { Arc::from_raw(Arc::into_raw(arc) as *const str) }
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "shared_from_slice", since = "1.21.0")]
+impl From<String> for Arc<str> {
+ /// Allocate a reference-counted `str` and copy `v` into it.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # use std::sync::Arc;
+ /// let unique: String = "eggplant".to_owned();
+ /// let shared: Arc<str> = Arc::from(unique);
+ /// assert_eq!("eggplant", &shared[..]);
+ /// ```
+ #[inline]
+ fn from(v: String) -> Arc<str> {
+ Arc::from(&v[..])
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "shared_from_slice", since = "1.21.0")]
+impl<T: ?Sized> From<Box<T>> for Arc<T> {
+ /// Move a boxed object to a new, reference-counted allocation.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # use std::sync::Arc;
+ /// let unique: Box<str> = Box::from("eggplant");
+ /// let shared: Arc<str> = Arc::from(unique);
+ /// assert_eq!("eggplant", &shared[..]);
+ /// ```
+ #[inline]
+ fn from(v: Box<T>) -> Arc<T> {
+ Arc::from_box(v)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "shared_from_slice", since = "1.21.0")]
+impl<T> From<Vec<T>> for Arc<[T]> {
+ /// Allocate a reference-counted slice and move `v`'s items into it.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # use std::sync::Arc;
+ /// let unique: Vec<i32> = vec![1, 2, 3];
+ /// let shared: Arc<[i32]> = Arc::from(unique);
+ /// assert_eq!(&[1, 2, 3], &shared[..]);
+ /// ```
+ #[inline]
+ fn from(mut v: Vec<T>) -> Arc<[T]> {
+ unsafe {
+ let arc = Arc::copy_from_slice(&v);
+
+ // Allow the Vec to free its memory, but not destroy its contents
+ v.set_len(0);
+
+ arc
+ }
+ }
+}
+
+// Avoid `error: specializing impl repeats parameter` implementing `TryFrom`.
+impl<T> Arc<[T]> {
+ /// Tries to allocate a reference-counted slice and move `v`'s items into it.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # use std::sync::Arc;
+ /// let unique: Vec<i32> = vec![1, 2, 3];
+ /// let shared: Arc<[i32]> = Arc::try_from(unique).unwrap();
+ /// assert_eq!(&[1, 2, 3], &shared[..]);
+ /// ```
+ #[stable(feature = "kernel", since = "1.0.0")]
+ #[inline]
+ pub fn try_from_vec(mut v: Vec<T>) -> Result<Self, TryReserveError> {
+ unsafe {
+ let arc = Arc::try_copy_from_slice(&v)?;
+
+ // Allow the Vec to free its memory, but not destroy its contents
+ v.set_len(0);
+
+ Ok(arc)
+ }
+ }
+}
+
+#[stable(feature = "shared_from_cow", since = "1.45.0")]
+impl<'a, B> From<Cow<'a, B>> for Arc<B>
+where
+ B: ToOwned + ?Sized,
+ Arc<B>: From<&'a B> + From<B::Owned>,
+{
+ /// Create an atomically reference-counted pointer from
+ /// a clone-on-write pointer by copying its content.
+ ///
+ /// # Example
+ ///
+ /// ```rust
+ /// # use std::sync::Arc;
+ /// # use std::borrow::Cow;
+ /// let cow: Cow<str> = Cow::Borrowed("eggplant");
+ /// let shared: Arc<str> = Arc::from(cow);
+ /// assert_eq!("eggplant", &shared[..]);
+ /// ```
+ #[inline]
+ fn from(cow: Cow<'a, B>) -> Arc<B> {
+ match cow {
+ Cow::Borrowed(s) => Arc::from(s),
+ Cow::Owned(s) => Arc::from(s),
+ }
+ }
+}
+
+#[stable(feature = "boxed_slice_try_from", since = "1.43.0")]
+impl<T, const N: usize> TryFrom<Arc<[T]>> for Arc<[T; N]> {
+ type Error = Arc<[T]>;
+
+ fn try_from(boxed_slice: Arc<[T]>) -> Result<Self, Self::Error> {
+ if boxed_slice.len() == N {
+ Ok(unsafe { Arc::from_raw(Arc::into_raw(boxed_slice) as *mut [T; N]) })
+ } else {
+ Err(boxed_slice)
+ }
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "shared_from_iter", since = "1.37.0")]
+impl<T> iter::FromIterator<T> for Arc<[T]> {
+ /// Takes each element in the `Iterator` and collects it into an `Arc<[T]>`.
+ ///
+ /// # Performance characteristics
+ ///
+ /// ## The general case
+ ///
+ /// In the general case, collecting into `Arc<[T]>` is done by first
+ /// collecting into a `Vec<T>`. That is, when writing the following:
+ ///
+ /// ```rust
+ /// # use std::sync::Arc;
+ /// let evens: Arc<[u8]> = (0..10).filter(|&x| x % 2 == 0).collect();
+ /// # assert_eq!(&*evens, &[0, 2, 4, 6, 8]);
+ /// ```
+ ///
+ /// this behaves as if we wrote:
+ ///
+ /// ```rust
+ /// # use std::sync::Arc;
+ /// let evens: Arc<[u8]> = (0..10).filter(|&x| x % 2 == 0)
+ /// .collect::<Vec<_>>() // The first set of allocations happens here.
+ /// .into(); // A second allocation for `Arc<[T]>` happens here.
+ /// # assert_eq!(&*evens, &[0, 2, 4, 6, 8]);
+ /// ```
+ ///
+ /// This will allocate as many times as needed for constructing the `Vec<T>`
+ /// and then it will allocate once for turning the `Vec<T>` into the `Arc<[T]>`.
+ ///
+ /// ## Iterators of known length
+ ///
+ /// When your `Iterator` implements `TrustedLen` and is of an exact size,
+ /// a single allocation will be made for the `Arc<[T]>`. For example:
+ ///
+ /// ```rust
+ /// # use std::sync::Arc;
+ /// let evens: Arc<[u8]> = (0..10).collect(); // Just a single allocation happens here.
+ /// # assert_eq!(&*evens, &*(0..10).collect::<Vec<_>>());
+ /// ```
+ fn from_iter<I: iter::IntoIterator<Item = T>>(iter: I) -> Self {
+ ToArcSlice::to_arc_slice(iter.into_iter())
+ }
+}
+
+/// Specialization trait used for collecting into `Arc<[T]>`.
+trait ToArcSlice<T>: Iterator<Item = T> + Sized {
+ fn to_arc_slice(self) -> Arc<[T]>;
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<T, I: Iterator<Item = T>> ToArcSlice<T> for I {
+ default fn to_arc_slice(self) -> Arc<[T]> {
+ self.collect::<Vec<T>>().into()
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<T, I: iter::TrustedLen<Item = T>> ToArcSlice<T> for I {
+ fn to_arc_slice(self) -> Arc<[T]> {
+ // This is the case for a `TrustedLen` iterator.
+ let (low, high) = self.size_hint();
+ if let Some(high) = high {
+ debug_assert_eq!(
+ low,
+ high,
+ "TrustedLen iterator's size hint is not exact: {:?}",
+ (low, high)
+ );
+
+ unsafe {
+ // SAFETY: We need to ensure that the iterator has an exact length and we have.
+ Arc::from_iter_exact(self, low)
+ }
+ } else {
+ // TrustedLen contract guarantees that `upper_bound == `None` implies an iterator
+ // length exceeding `usize::MAX`.
+ // The default implementation would collect into a vec which would panic.
+ // Thus we panic here immediately without invoking `Vec` code.
+ panic!("capacity overflow");
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> borrow::Borrow<T> for Arc<T> {
+ fn borrow(&self) -> &T {
+ &**self
+ }
+}
+
+#[stable(since = "1.5.0", feature = "smart_ptr_as_ref")]
+impl<T: ?Sized> AsRef<T> for Arc<T> {
+ fn as_ref(&self) -> &T {
+ &**self
+ }
+}
+
+#[stable(feature = "pin", since = "1.33.0")]
+impl<T: ?Sized> Unpin for Arc<T> {}
+
+/// Get the offset within an `ArcInner` for the payload behind a pointer.
+///
+/// # Safety
+///
+/// The pointer must point to (and have valid metadata for) a previously
+/// valid instance of T, but the T is allowed to be dropped.
+unsafe fn data_offset<T: ?Sized>(ptr: *const T) -> isize {
+ // Align the unsized value to the end of the ArcInner.
+ // Because RcBox is repr(C), it will always be the last field in memory.
+ // SAFETY: since the only unsized types possible are slices, trait objects,
+ // and extern types, the input safety requirement is currently enough to
+ // satisfy the requirements of align_of_val_raw; this is an implementation
+ // detail of the language that may not be relied upon outside of std.
+ unsafe { data_offset_align(align_of_val_raw(ptr)) }
+}
+
+#[inline]
+fn data_offset_align(align: usize) -> isize {
+ let layout = Layout::new::<ArcInner<()>>();
+ (layout.size() + layout.padding_needed_for(align)) as isize
+}
diff --git a/rust/alloc/vec/drain.rs b/rust/alloc/vec/drain.rs
new file mode 100644
index 000000000000..65f7e32e95fd
--- /dev/null
+++ b/rust/alloc/vec/drain.rs
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+use crate::alloc::{Allocator, Global};
+use core::fmt;
+use core::iter::{FusedIterator, TrustedLen};
+use core::mem::{self};
+use core::ptr::{self, NonNull};
+use core::slice::{self};
+
+use super::Vec;
+
+/// A draining iterator for `Vec<T>`.
+///
+/// This `struct` is created by [`Vec::drain`].
+/// See its documentation for more.
+///
+/// # Example
+///
+/// ```
+/// let mut v = vec![0, 1, 2];
+/// let iter: std::vec::Drain<_> = v.drain(..);
+/// ```
+#[stable(feature = "drain", since = "1.6.0")]
+pub struct Drain<
+ 'a,
+ T: 'a,
+ #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator + 'a = Global,
+> {
+ /// Index of tail to preserve
+ pub(super) tail_start: usize,
+ /// Length of tail
+ pub(super) tail_len: usize,
+ /// Current remaining range to remove
+ pub(super) iter: slice::Iter<'a, T>,
+ pub(super) vec: NonNull<Vec<T, A>>,
+}
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl<T: fmt::Debug, A: Allocator> fmt::Debug for Drain<'_, T, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("Drain").field(&self.iter.as_slice()).finish()
+ }
+}
+
+impl<'a, T, A: Allocator> Drain<'a, T, A> {
+ /// Returns the remaining items of this iterator as a slice.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec!['a', 'b', 'c'];
+ /// let mut drain = vec.drain(..);
+ /// assert_eq!(drain.as_slice(), &['a', 'b', 'c']);
+ /// let _ = drain.next().unwrap();
+ /// assert_eq!(drain.as_slice(), &['b', 'c']);
+ /// ```
+ #[stable(feature = "vec_drain_as_slice", since = "1.46.0")]
+ pub fn as_slice(&self) -> &[T] {
+ self.iter.as_slice()
+ }
+
+ /// Returns a reference to the underlying allocator.
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[inline]
+ pub fn allocator(&self) -> &A {
+ unsafe { self.vec.as_ref().allocator() }
+ }
+}
+
+#[stable(feature = "vec_drain_as_slice", since = "1.46.0")]
+impl<'a, T, A: Allocator> AsRef<[T]> for Drain<'a, T, A> {
+ fn as_ref(&self) -> &[T] {
+ self.as_slice()
+ }
+}
+
+#[stable(feature = "drain", since = "1.6.0")]
+unsafe impl<T: Sync, A: Sync + Allocator> Sync for Drain<'_, T, A> {}
+#[stable(feature = "drain", since = "1.6.0")]
+unsafe impl<T: Send, A: Send + Allocator> Send for Drain<'_, T, A> {}
+
+#[stable(feature = "drain", since = "1.6.0")]
+impl<T, A: Allocator> Iterator for Drain<'_, T, A> {
+ type Item = T;
+
+ #[inline]
+ fn next(&mut self) -> Option<T> {
+ self.iter.next().map(|elt| unsafe { ptr::read(elt as *const _) })
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+}
+
+#[stable(feature = "drain", since = "1.6.0")]
+impl<T, A: Allocator> DoubleEndedIterator for Drain<'_, T, A> {
+ #[inline]
+ fn next_back(&mut self) -> Option<T> {
+ self.iter.next_back().map(|elt| unsafe { ptr::read(elt as *const _) })
+ }
+}
+
+#[stable(feature = "drain", since = "1.6.0")]
+impl<T, A: Allocator> Drop for Drain<'_, T, A> {
+ fn drop(&mut self) {
+ /// Continues dropping the remaining elements in the `Drain`, then moves back the
+ /// un-`Drain`ed elements to restore the original `Vec`.
+ struct DropGuard<'r, 'a, T, A: Allocator>(&'r mut Drain<'a, T, A>);
+
+ impl<'r, 'a, T, A: Allocator> Drop for DropGuard<'r, 'a, T, A> {
+ fn drop(&mut self) {
+ // Continue the same loop we have below. If the loop already finished, this does
+ // nothing.
+ self.0.for_each(drop);
+
+ if self.0.tail_len > 0 {
+ unsafe {
+ let source_vec = self.0.vec.as_mut();
+ // memmove back untouched tail, update to new length
+ let start = source_vec.len();
+ let tail = self.0.tail_start;
+ if tail != start {
+ let src = source_vec.as_ptr().add(tail);
+ let dst = source_vec.as_mut_ptr().add(start);
+ ptr::copy(src, dst, self.0.tail_len);
+ }
+ source_vec.set_len(start + self.0.tail_len);
+ }
+ }
+ }
+ }
+
+ // exhaust self first
+ while let Some(item) = self.next() {
+ let guard = DropGuard(self);
+ drop(item);
+ mem::forget(guard);
+ }
+
+ // Drop a `DropGuard` to move back the non-drained tail of `self`.
+ DropGuard(self);
+ }
+}
+
+#[stable(feature = "drain", since = "1.6.0")]
+impl<T, A: Allocator> ExactSizeIterator for Drain<'_, T, A> {
+ fn is_empty(&self) -> bool {
+ self.iter.is_empty()
+ }
+}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<T, A: Allocator> TrustedLen for Drain<'_, T, A> {}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T, A: Allocator> FusedIterator for Drain<'_, T, A> {}
diff --git a/rust/alloc/vec/drain_filter.rs b/rust/alloc/vec/drain_filter.rs
new file mode 100644
index 000000000000..b04fce041622
--- /dev/null
+++ b/rust/alloc/vec/drain_filter.rs
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+use crate::alloc::{Allocator, Global};
+use core::ptr::{self};
+use core::slice::{self};
+
+use super::Vec;
+
+/// An iterator which uses a closure to determine if an element should be removed.
+///
+/// This struct is created by [`Vec::drain_filter`].
+/// See its documentation for more.
+///
+/// # Example
+///
+/// ```
+/// #![feature(drain_filter)]
+///
+/// let mut v = vec![0, 1, 2];
+/// let iter: std::vec::DrainFilter<_, _> = v.drain_filter(|x| *x % 2 == 0);
+/// ```
+#[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")]
+#[derive(Debug)]
+pub struct DrainFilter<
+ 'a,
+ T,
+ F,
+ #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
+> where
+ F: FnMut(&mut T) -> bool,
+{
+ pub(super) vec: &'a mut Vec<T, A>,
+ /// The index of the item that will be inspected by the next call to `next`.
+ pub(super) idx: usize,
+ /// The number of items that have been drained (removed) thus far.
+ pub(super) del: usize,
+ /// The original length of `vec` prior to draining.
+ pub(super) old_len: usize,
+ /// The filter test predicate.
+ pub(super) pred: F,
+ /// A flag that indicates a panic has occurred in the filter test predicate.
+ /// This is used as a hint in the drop implementation to prevent consumption
+ /// of the remainder of the `DrainFilter`. Any unprocessed items will be
+ /// backshifted in the `vec`, but no further items will be dropped or
+ /// tested by the filter predicate.
+ pub(super) panic_flag: bool,
+}
+
+impl<T, F, A: Allocator> DrainFilter<'_, T, F, A>
+where
+ F: FnMut(&mut T) -> bool,
+{
+ /// Returns a reference to the underlying allocator.
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[inline]
+ pub fn allocator(&self) -> &A {
+ self.vec.allocator()
+ }
+}
+
+#[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")]
+impl<T, F, A: Allocator> Iterator for DrainFilter<'_, T, F, A>
+where
+ F: FnMut(&mut T) -> bool,
+{
+ type Item = T;
+
+ fn next(&mut self) -> Option<T> {
+ unsafe {
+ while self.idx < self.old_len {
+ let i = self.idx;
+ let v = slice::from_raw_parts_mut(self.vec.as_mut_ptr(), self.old_len);
+ self.panic_flag = true;
+ let drained = (self.pred)(&mut v[i]);
+ self.panic_flag = false;
+ // Update the index *after* the predicate is called. If the index
+ // is updated prior and the predicate panics, the element at this
+ // index would be leaked.
+ self.idx += 1;
+ if drained {
+ self.del += 1;
+ return Some(ptr::read(&v[i]));
+ } else if self.del > 0 {
+ let del = self.del;
+ let src: *const T = &v[i];
+ let dst: *mut T = &mut v[i - del];
+ ptr::copy_nonoverlapping(src, dst, 1);
+ }
+ }
+ None
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (0, Some(self.old_len - self.idx))
+ }
+}
+
+#[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")]
+impl<T, F, A: Allocator> Drop for DrainFilter<'_, T, F, A>
+where
+ F: FnMut(&mut T) -> bool,
+{
+ fn drop(&mut self) {
+ struct BackshiftOnDrop<'a, 'b, T, F, A: Allocator>
+ where
+ F: FnMut(&mut T) -> bool,
+ {
+ drain: &'b mut DrainFilter<'a, T, F, A>,
+ }
+
+ impl<'a, 'b, T, F, A: Allocator> Drop for BackshiftOnDrop<'a, 'b, T, F, A>
+ where
+ F: FnMut(&mut T) -> bool,
+ {
+ fn drop(&mut self) {
+ unsafe {
+ if self.drain.idx < self.drain.old_len && self.drain.del > 0 {
+ // This is a pretty messed up state, and there isn't really an
+ // obviously right thing to do. We don't want to keep trying
+ // to execute `pred`, so we just backshift all the unprocessed
+ // elements and tell the vec that they still exist. The backshift
+ // is required to prevent a double-drop of the last successfully
+ // drained item prior to a panic in the predicate.
+ let ptr = self.drain.vec.as_mut_ptr();
+ let src = ptr.add(self.drain.idx);
+ let dst = src.sub(self.drain.del);
+ let tail_len = self.drain.old_len - self.drain.idx;
+ src.copy_to(dst, tail_len);
+ }
+ self.drain.vec.set_len(self.drain.old_len - self.drain.del);
+ }
+ }
+ }
+
+ let backshift = BackshiftOnDrop { drain: self };
+
+ // Attempt to consume any remaining elements if the filter predicate
+ // has not yet panicked. We'll backshift any remaining elements
+ // whether we've already panicked or if the consumption here panics.
+ if !backshift.drain.panic_flag {
+ backshift.drain.for_each(drop);
+ }
+ }
+}
diff --git a/rust/alloc/vec/into_iter.rs b/rust/alloc/vec/into_iter.rs
new file mode 100644
index 000000000000..86a167aad103
--- /dev/null
+++ b/rust/alloc/vec/into_iter.rs
@@ -0,0 +1,296 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+use crate::alloc::{Allocator, Global};
+use crate::raw_vec::RawVec;
+use core::fmt;
+use core::intrinsics::arith_offset;
+use core::iter::{FusedIterator, InPlaceIterable, SourceIter, TrustedLen, TrustedRandomAccess};
+use core::marker::PhantomData;
+use core::mem::{self};
+use core::ptr::{self, NonNull};
+use core::slice::{self};
+
+/// An iterator that moves out of a vector.
+///
+/// This `struct` is created by the `into_iter` method on [`Vec`](super::Vec)
+/// (provided by the [`IntoIterator`] trait).
+///
+/// # Example
+///
+/// ```
+/// let v = vec![0, 1, 2];
+/// let iter: std::vec::IntoIter<_> = v.into_iter();
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct IntoIter<
+ T,
+ #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
+> {
+ pub(super) buf: NonNull<T>,
+ pub(super) phantom: PhantomData<T>,
+ pub(super) cap: usize,
+ pub(super) alloc: A,
+ pub(super) ptr: *const T,
+ pub(super) end: *const T,
+}
+
+#[stable(feature = "vec_intoiter_debug", since = "1.13.0")]
+impl<T: fmt::Debug, A: Allocator> fmt::Debug for IntoIter<T, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("IntoIter").field(&self.as_slice()).finish()
+ }
+}
+
+impl<T, A: Allocator> IntoIter<T, A> {
+ /// Returns the remaining items of this iterator as a slice.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let vec = vec!['a', 'b', 'c'];
+ /// let mut into_iter = vec.into_iter();
+ /// assert_eq!(into_iter.as_slice(), &['a', 'b', 'c']);
+ /// let _ = into_iter.next().unwrap();
+ /// assert_eq!(into_iter.as_slice(), &['b', 'c']);
+ /// ```
+ #[stable(feature = "vec_into_iter_as_slice", since = "1.15.0")]
+ pub fn as_slice(&self) -> &[T] {
+ unsafe { slice::from_raw_parts(self.ptr, self.len()) }
+ }
+
+ /// Returns the remaining items of this iterator as a mutable slice.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let vec = vec!['a', 'b', 'c'];
+ /// let mut into_iter = vec.into_iter();
+ /// assert_eq!(into_iter.as_slice(), &['a', 'b', 'c']);
+ /// into_iter.as_mut_slice()[2] = 'z';
+ /// assert_eq!(into_iter.next().unwrap(), 'a');
+ /// assert_eq!(into_iter.next().unwrap(), 'b');
+ /// assert_eq!(into_iter.next().unwrap(), 'z');
+ /// ```
+ #[stable(feature = "vec_into_iter_as_slice", since = "1.15.0")]
+ pub fn as_mut_slice(&mut self) -> &mut [T] {
+ unsafe { &mut *self.as_raw_mut_slice() }
+ }
+
+ /// Returns a reference to the underlying allocator.
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[inline]
+ pub fn allocator(&self) -> &A {
+ &self.alloc
+ }
+
+ fn as_raw_mut_slice(&mut self) -> *mut [T] {
+ ptr::slice_from_raw_parts_mut(self.ptr as *mut T, self.len())
+ }
+
+ /// Drops remaining elements and relinquishes the backing allocation.
+ ///
+ /// This is roughly equivalent to the following, but more efficient
+ ///
+ /// ```
+ /// # let mut into_iter = Vec::<u8>::with_capacity(10).into_iter();
+ /// (&mut into_iter).for_each(core::mem::drop);
+ /// unsafe { core::ptr::write(&mut into_iter, Vec::new().into_iter()); }
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ pub(super) fn forget_allocation_drop_remaining(&mut self) {
+ let remaining = self.as_raw_mut_slice();
+
+ // overwrite the individual fields instead of creating a new
+ // struct and then overwriting &mut self.
+ // this creates less assembly
+ self.cap = 0;
+ self.buf = unsafe { NonNull::new_unchecked(RawVec::NEW.ptr()) };
+ self.ptr = self.buf.as_ptr();
+ self.end = self.buf.as_ptr();
+
+ unsafe {
+ ptr::drop_in_place(remaining);
+ }
+ }
+}
+
+#[stable(feature = "vec_intoiter_as_ref", since = "1.46.0")]
+impl<T, A: Allocator> AsRef<[T]> for IntoIter<T, A> {
+ fn as_ref(&self) -> &[T] {
+ self.as_slice()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<T: Send, A: Allocator + Send> Send for IntoIter<T, A> {}
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<T: Sync, A: Allocator> Sync for IntoIter<T, A> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, A: Allocator> Iterator for IntoIter<T, A> {
+ type Item = T;
+
+ #[inline]
+ fn next(&mut self) -> Option<T> {
+ if self.ptr as *const _ == self.end {
+ None
+ } else if mem::size_of::<T>() == 0 {
+ // purposefully don't use 'ptr.offset' because for
+ // vectors with 0-size elements this would return the
+ // same pointer.
+ self.ptr = unsafe { arith_offset(self.ptr as *const i8, 1) as *mut T };
+
+ // Make up a value of this ZST.
+ Some(unsafe { mem::zeroed() })
+ } else {
+ let old = self.ptr;
+ self.ptr = unsafe { self.ptr.offset(1) };
+
+ Some(unsafe { ptr::read(old) })
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let exact = if mem::size_of::<T>() == 0 {
+ (self.end as usize).wrapping_sub(self.ptr as usize)
+ } else {
+ unsafe { self.end.offset_from(self.ptr) as usize }
+ };
+ (exact, Some(exact))
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.len()
+ }
+
+ unsafe fn __iterator_get_unchecked(&mut self, i: usize) -> Self::Item
+ where
+ Self: TrustedRandomAccess,
+ {
+ // SAFETY: the caller must guarantee that `i` is in bounds of the
+ // `Vec<T>`, so `i` cannot overflow an `isize`, and the `self.ptr.add(i)`
+ // is guaranteed to pointer to an element of the `Vec<T>` and
+ // thus guaranteed to be valid to dereference.
+ //
+ // Also note the implementation of `Self: TrustedRandomAccess` requires
+ // that `T: Copy` so reading elements from the buffer doesn't invalidate
+ // them for `Drop`.
+ unsafe {
+ if mem::size_of::<T>() == 0 { mem::zeroed() } else { ptr::read(self.ptr.add(i)) }
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> {
+ #[inline]
+ fn next_back(&mut self) -> Option<T> {
+ if self.end == self.ptr {
+ None
+ } else if mem::size_of::<T>() == 0 {
+ // See above for why 'ptr.offset' isn't used
+ self.end = unsafe { arith_offset(self.end as *const i8, -1) as *mut T };
+
+ // Make up a value of this ZST.
+ Some(unsafe { mem::zeroed() })
+ } else {
+ self.end = unsafe { self.end.offset(-1) };
+
+ Some(unsafe { ptr::read(self.end) })
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, A: Allocator> ExactSizeIterator for IntoIter<T, A> {
+ fn is_empty(&self) -> bool {
+ self.ptr == self.end
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T, A: Allocator> FusedIterator for IntoIter<T, A> {}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<T, A: Allocator> TrustedLen for IntoIter<T, A> {}
+
+#[doc(hidden)]
+#[unstable(issue = "none", feature = "std_internals")]
+// T: Copy as approximation for !Drop since get_unchecked does not advance self.ptr
+// and thus we can't implement drop-handling
+unsafe impl<T, A: Allocator> TrustedRandomAccess for IntoIter<T, A>
+where
+ T: Copy,
+{
+ const MAY_HAVE_SIDE_EFFECT: bool = false;
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "vec_into_iter_clone", since = "1.8.0")]
+impl<T: Clone, A: Allocator + Clone> Clone for IntoIter<T, A> {
+ #[cfg(not(test))]
+ fn clone(&self) -> Self {
+ self.as_slice().to_vec_in(self.alloc.clone()).into_iter()
+ }
+ #[cfg(test)]
+ fn clone(&self) -> Self {
+ crate::slice::to_vec(self.as_slice(), self.alloc.clone()).into_iter()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<#[may_dangle] T, A: Allocator> Drop for IntoIter<T, A> {
+ fn drop(&mut self) {
+ struct DropGuard<'a, T, A: Allocator>(&'a mut IntoIter<T, A>);
+
+ impl<T, A: Allocator> Drop for DropGuard<'_, T, A> {
+ fn drop(&mut self) {
+ unsafe {
+ // `IntoIter::alloc` is not used anymore after this
+ let alloc = ptr::read(&self.0.alloc);
+ // RawVec handles deallocation
+ let _ = RawVec::from_raw_parts_in(self.0.buf.as_ptr(), self.0.cap, alloc);
+ }
+ }
+ }
+
+ let guard = DropGuard(self);
+ // destroy the remaining elements
+ unsafe {
+ ptr::drop_in_place(guard.0.as_raw_mut_slice());
+ }
+ // now `guard` will be dropped and do the rest
+ }
+}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+#[doc(hidden)]
+unsafe impl<T, A: Allocator> InPlaceIterable for IntoIter<T, A> {}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+#[doc(hidden)]
+unsafe impl<T, A: Allocator> SourceIter for IntoIter<T, A> {
+ type Source = Self;
+
+ #[inline]
+ unsafe fn as_inner(&mut self) -> &mut Self::Source {
+ self
+ }
+}
+
+// internal helper trait for in-place iteration specialization.
+#[rustc_specialization_trait]
+pub(crate) trait AsIntoIter {
+ type Item;
+ fn as_into_iter(&mut self) -> &mut IntoIter<Self::Item>;
+}
+
+impl<T> AsIntoIter for IntoIter<T> {
+ type Item = T;
+
+ fn as_into_iter(&mut self) -> &mut IntoIter<Self::Item> {
+ self
+ }
+}
diff --git a/rust/alloc/vec/is_zero.rs b/rust/alloc/vec/is_zero.rs
new file mode 100644
index 000000000000..40e1e667c9fb
--- /dev/null
+++ b/rust/alloc/vec/is_zero.rs
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+use crate::boxed::Box;
+
+#[rustc_specialization_trait]
+pub(super) unsafe trait IsZero {
+ /// Whether this value is zero
+ fn is_zero(&self) -> bool;
+}
+
+macro_rules! impl_is_zero {
+ ($t:ty, $is_zero:expr) => {
+ unsafe impl IsZero for $t {
+ #[inline]
+ fn is_zero(&self) -> bool {
+ $is_zero(*self)
+ }
+ }
+ };
+}
+
+impl_is_zero!(i16, |x| x == 0);
+impl_is_zero!(i32, |x| x == 0);
+impl_is_zero!(i64, |x| x == 0);
+impl_is_zero!(i128, |x| x == 0);
+impl_is_zero!(isize, |x| x == 0);
+
+impl_is_zero!(u16, |x| x == 0);
+impl_is_zero!(u32, |x| x == 0);
+impl_is_zero!(u64, |x| x == 0);
+impl_is_zero!(u128, |x| x == 0);
+impl_is_zero!(usize, |x| x == 0);
+
+impl_is_zero!(bool, |x| x == false);
+impl_is_zero!(char, |x| x == '\0');
+
+impl_is_zero!(f32, |x: f32| x.to_bits() == 0);
+impl_is_zero!(f64, |x: f64| x.to_bits() == 0);
+
+unsafe impl<T> IsZero for *const T {
+ #[inline]
+ fn is_zero(&self) -> bool {
+ (*self).is_null()
+ }
+}
+
+unsafe impl<T> IsZero for *mut T {
+ #[inline]
+ fn is_zero(&self) -> bool {
+ (*self).is_null()
+ }
+}
+
+// `Option<&T>` and `Option<Box<T>>` are guaranteed to represent `None` as null.
+// For fat pointers, the bytes that would be the pointer metadata in the `Some`
+// variant are padding in the `None` variant, so ignoring them and
+// zero-initializing instead is ok.
+// `Option<&mut T>` never implements `Clone`, so there's no need for an impl of
+// `SpecFromElem`.
+
+unsafe impl<T: ?Sized> IsZero for Option<&T> {
+ #[inline]
+ fn is_zero(&self) -> bool {
+ self.is_none()
+ }
+}
+
+unsafe impl<T: ?Sized> IsZero for Option<Box<T>> {
+ #[inline]
+ fn is_zero(&self) -> bool {
+ self.is_none()
+ }
+}
+
+// `Option<num::NonZeroU32>` and similar have a representation guarantee that
+// they're the same size as the corresponding `u32` type, as well as a guarantee
+// that transmuting between `NonZeroU32` and `Option<num::NonZeroU32>` works.
+// While the documentation officially makes it UB to transmute from `None`,
+// we're the standard library so we can make extra inferences, and we know that
+// the only niche available to represent `None` is the one that's all zeros.
+
+macro_rules! impl_is_zero_option_of_nonzero {
+ ($($t:ident,)+) => {$(
+ unsafe impl IsZero for Option<core::num::$t> {
+ #[inline]
+ fn is_zero(&self) -> bool {
+ self.is_none()
+ }
+ }
+ )+};
+}
+
+impl_is_zero_option_of_nonzero!(
+ NonZeroU8,
+ NonZeroU16,
+ NonZeroU32,
+ NonZeroU64,
+ NonZeroU128,
+ NonZeroI8,
+ NonZeroI16,
+ NonZeroI32,
+ NonZeroI64,
+ NonZeroI128,
+ NonZeroUsize,
+ NonZeroIsize,
+);
diff --git a/rust/alloc/vec/mod.rs b/rust/alloc/vec/mod.rs
new file mode 100644
index 000000000000..2abffb93e498
--- /dev/null
+++ b/rust/alloc/vec/mod.rs
@@ -0,0 +1,3255 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+//! A contiguous growable array type with heap-allocated contents, written
+//! `Vec<T>`.
+//!
+//! Vectors have `O(1)` indexing, amortized `O(1)` push (to the end) and
+//! `O(1)` pop (from the end).
+//!
+//! Vectors ensure they never allocate more than `isize::MAX` bytes.
+//!
+//! # Examples
+//!
+//! You can explicitly create a [`Vec`] with [`Vec::new`]:
+//!
+//! ```
+//! let v: Vec<i32> = Vec::new();
+//! ```
+//!
+//! ...or by using the [`vec!`] macro:
+//!
+//! ```
+//! let v: Vec<i32> = vec![];
+//!
+//! let v = vec![1, 2, 3, 4, 5];
+//!
+//! let v = vec![0; 10]; // ten zeroes
+//! ```
+//!
+//! You can [`push`] values onto the end of a vector (which will grow the vector
+//! as needed):
+//!
+//! ```
+//! let mut v = vec![1, 2];
+//!
+//! v.push(3);
+//! ```
+//!
+//! Popping values works in much the same way:
+//!
+//! ```
+//! let mut v = vec![1, 2];
+//!
+//! let two = v.pop();
+//! ```
+//!
+//! Vectors also support indexing (through the [`Index`] and [`IndexMut`] traits):
+//!
+//! ```
+//! let mut v = vec![1, 2, 3];
+//! let three = v[2];
+//! v[1] = v[1] + 5;
+//! ```
+//!
+//! [`push`]: Vec::push
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+#[cfg(not(no_global_oom_handling))]
+use core::cmp;
+use core::cmp::Ordering;
+use core::convert::TryFrom;
+use core::fmt;
+use core::hash::{Hash, Hasher};
+use core::intrinsics::{arith_offset, assume};
+use core::iter;
+#[cfg(not(no_global_oom_handling))]
+use core::iter::FromIterator;
+use core::marker::PhantomData;
+use core::mem::{self, ManuallyDrop, MaybeUninit};
+use core::ops::{self, Index, IndexMut, Range, RangeBounds};
+use core::ptr::{self, NonNull};
+use core::slice::{self, SliceIndex};
+
+use crate::alloc::{Allocator, Global};
+use crate::borrow::{Cow, ToOwned};
+use crate::boxed::Box;
+use crate::collections::TryReserveError;
+use crate::raw_vec::RawVec;
+
+#[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")]
+pub use self::drain_filter::DrainFilter;
+
+mod drain_filter;
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "vec_splice", since = "1.21.0")]
+pub use self::splice::Splice;
+
+#[cfg(not(no_global_oom_handling))]
+mod splice;
+
+#[stable(feature = "drain", since = "1.6.0")]
+pub use self::drain::Drain;
+
+mod drain;
+
+#[cfg(not(no_global_oom_handling))]
+mod cow;
+
+#[cfg(not(no_global_oom_handling))]
+pub(crate) use self::into_iter::AsIntoIter;
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::into_iter::IntoIter;
+
+mod into_iter;
+
+#[cfg(not(no_global_oom_handling))]
+use self::is_zero::IsZero;
+
+mod is_zero;
+
+#[cfg(not(no_global_oom_handling))]
+mod source_iter_marker;
+
+mod partial_eq;
+
+#[cfg(not(no_global_oom_handling))]
+use self::spec_from_elem::SpecFromElem;
+
+#[cfg(not(no_global_oom_handling))]
+mod spec_from_elem;
+
+use self::set_len_on_drop::SetLenOnDrop;
+
+mod set_len_on_drop;
+
+#[cfg(not(no_global_oom_handling))]
+use self::in_place_drop::InPlaceDrop;
+
+#[cfg(not(no_global_oom_handling))]
+mod in_place_drop;
+
+#[cfg(not(no_global_oom_handling))]
+use self::spec_from_iter_nested::SpecFromIterNested;
+
+#[cfg(not(no_global_oom_handling))]
+mod spec_from_iter_nested;
+
+#[cfg(not(no_global_oom_handling))]
+use self::spec_from_iter::SpecFromIter;
+
+#[cfg(not(no_global_oom_handling))]
+mod spec_from_iter;
+
+#[cfg(not(no_global_oom_handling))]
+use self::spec_extend::SpecExtend;
+
+use self::spec_extend::TrySpecExtend;
+
+mod spec_extend;
+
+/// A contiguous growable array type, written as `Vec<T>` and pronounced 'vector'.
+///
+/// # Examples
+///
+/// ```
+/// let mut vec = Vec::new();
+/// vec.push(1);
+/// vec.push(2);
+///
+/// assert_eq!(vec.len(), 2);
+/// assert_eq!(vec[0], 1);
+///
+/// assert_eq!(vec.pop(), Some(2));
+/// assert_eq!(vec.len(), 1);
+///
+/// vec[0] = 7;
+/// assert_eq!(vec[0], 7);
+///
+/// vec.extend([1, 2, 3].iter().copied());
+///
+/// for x in &vec {
+/// println!("{}", x);
+/// }
+/// assert_eq!(vec, [7, 1, 2, 3]);
+/// ```
+///
+/// The [`vec!`] macro is provided to make initialization more convenient:
+///
+/// ```
+/// let mut vec = vec![1, 2, 3];
+/// vec.push(4);
+/// assert_eq!(vec, [1, 2, 3, 4]);
+/// ```
+///
+/// It can also initialize each element of a `Vec<T>` with a given value.
+/// This may be more efficient than performing allocation and initialization
+/// in separate steps, especially when initializing a vector of zeros:
+///
+/// ```
+/// let vec = vec![0; 5];
+/// assert_eq!(vec, [0, 0, 0, 0, 0]);
+///
+/// // The following is equivalent, but potentially slower:
+/// let mut vec = Vec::with_capacity(5);
+/// vec.resize(5, 0);
+/// assert_eq!(vec, [0, 0, 0, 0, 0]);
+/// ```
+///
+/// For more information, see
+/// [Capacity and Reallocation](#capacity-and-reallocation).
+///
+/// Use a `Vec<T>` as an efficient stack:
+///
+/// ```
+/// let mut stack = Vec::new();
+///
+/// stack.push(1);
+/// stack.push(2);
+/// stack.push(3);
+///
+/// while let Some(top) = stack.pop() {
+/// // Prints 3, 2, 1
+/// println!("{}", top);
+/// }
+/// ```
+///
+/// # Indexing
+///
+/// The `Vec` type allows to access values by index, because it implements the
+/// [`Index`] trait. An example will be more explicit:
+///
+/// ```
+/// let v = vec![0, 2, 4, 6];
+/// println!("{}", v[1]); // it will display '2'
+/// ```
+///
+/// However be careful: if you try to access an index which isn't in the `Vec`,
+/// your software will panic! You cannot do this:
+///
+/// ```should_panic
+/// let v = vec![0, 2, 4, 6];
+/// println!("{}", v[6]); // it will panic!
+/// ```
+///
+/// Use [`get`] and [`get_mut`] if you want to check whether the index is in
+/// the `Vec`.
+///
+/// # Slicing
+///
+/// A `Vec` can be mutable. On the other hand, slices are read-only objects.
+/// To get a [slice][prim@slice], use [`&`]. Example:
+///
+/// ```
+/// fn read_slice(slice: &[usize]) {
+/// // ...
+/// }
+///
+/// let v = vec![0, 1];
+/// read_slice(&v);
+///
+/// // ... and that's all!
+/// // you can also do it like this:
+/// let u: &[usize] = &v;
+/// // or like this:
+/// let u: &[_] = &v;
+/// ```
+///
+/// In Rust, it's more common to pass slices as arguments rather than vectors
+/// when you just want to provide read access. The same goes for [`String`] and
+/// [`&str`].
+///
+/// # Capacity and reallocation
+///
+/// The capacity of a vector is the amount of space allocated for any future
+/// elements that will be added onto the vector. This is not to be confused with
+/// the *length* of a vector, which specifies the number of actual elements
+/// within the vector. If a vector's length exceeds its capacity, its capacity
+/// will automatically be increased, but its elements will have to be
+/// reallocated.
+///
+/// For example, a vector with capacity 10 and length 0 would be an empty vector
+/// with space for 10 more elements. Pushing 10 or fewer elements onto the
+/// vector will not change its capacity or cause reallocation to occur. However,
+/// if the vector's length is increased to 11, it will have to reallocate, which
+/// can be slow. For this reason, it is recommended to use [`Vec::with_capacity`]
+/// whenever possible to specify how big the vector is expected to get.
+///
+/// # Guarantees
+///
+/// Due to its incredibly fundamental nature, `Vec` makes a lot of guarantees
+/// about its design. This ensures that it's as low-overhead as possible in
+/// the general case, and can be correctly manipulated in primitive ways
+/// by unsafe code. Note that these guarantees refer to an unqualified `Vec<T>`.
+/// If additional type parameters are added (e.g., to support custom allocators),
+/// overriding their defaults may change the behavior.
+///
+/// Most fundamentally, `Vec` is and always will be a (pointer, capacity, length)
+/// triplet. No more, no less. The order of these fields is completely
+/// unspecified, and you should use the appropriate methods to modify these.
+/// The pointer will never be null, so this type is null-pointer-optimized.
+///
+/// However, the pointer might not actually point to allocated memory. In particular,
+/// if you construct a `Vec` with capacity 0 via [`Vec::new`], [`vec![]`][`vec!`],
+/// [`Vec::with_capacity(0)`][`Vec::with_capacity`], or by calling [`shrink_to_fit`]
+/// on an empty Vec, it will not allocate memory. Similarly, if you store zero-sized
+/// types inside a `Vec`, it will not allocate space for them. *Note that in this case
+/// the `Vec` might not report a [`capacity`] of 0*. `Vec` will allocate if and only
+/// if [`mem::size_of::<T>`]`() * capacity() > 0`. In general, `Vec`'s allocation
+/// details are very subtle &mdash; if you intend to allocate memory using a `Vec`
+/// and use it for something else (either to pass to unsafe code, or to build your
+/// own memory-backed collection), be sure to deallocate this memory by using
+/// `from_raw_parts` to recover the `Vec` and then dropping it.
+///
+/// If a `Vec` *has* allocated memory, then the memory it points to is on the heap
+/// (as defined by the allocator Rust is configured to use by default), and its
+/// pointer points to [`len`] initialized, contiguous elements in order (what
+/// you would see if you coerced it to a slice), followed by [`capacity`]` -
+/// `[`len`] logically uninitialized, contiguous elements.
+///
+/// A vector containing the elements `'a'` and `'b'` with capacity 4 can be
+/// visualized as below. The top part is the `Vec` struct, it contains a
+/// pointer to the head of the allocation in the heap, length and capacity.
+/// The bottom part is the allocation on the heap, a contiguous memory block.
+///
+/// ```text
+/// ptr len capacity
+/// +--------+--------+--------+
+/// | 0x0123 | 2 | 4 |
+/// +--------+--------+--------+
+/// |
+/// v
+/// Heap +--------+--------+--------+--------+
+/// | 'a' | 'b' | uninit | uninit |
+/// +--------+--------+--------+--------+
+/// ```
+///
+/// - **uninit** represents memory that is not initialized, see [`MaybeUninit`].
+/// - Note: the ABI is not stable and `Vec` makes no guarantees about its memory
+/// layout (including the order of fields).
+///
+/// `Vec` will never perform a "small optimization" where elements are actually
+/// stored on the stack for two reasons:
+///
+/// * It would make it more difficult for unsafe code to correctly manipulate
+/// a `Vec`. The contents of a `Vec` wouldn't have a stable address if it were
+/// only moved, and it would be more difficult to determine if a `Vec` had
+/// actually allocated memory.
+///
+/// * It would penalize the general case, incurring an additional branch
+/// on every access.
+///
+/// `Vec` will never automatically shrink itself, even if completely empty. This
+/// ensures no unnecessary allocations or deallocations occur. Emptying a `Vec`
+/// and then filling it back up to the same [`len`] should incur no calls to
+/// the allocator. If you wish to free up unused memory, use
+/// [`shrink_to_fit`] or [`shrink_to`].
+///
+/// [`push`] and [`insert`] will never (re)allocate if the reported capacity is
+/// sufficient. [`push`] and [`insert`] *will* (re)allocate if
+/// [`len`]` == `[`capacity`]. That is, the reported capacity is completely
+/// accurate, and can be relied on. It can even be used to manually free the memory
+/// allocated by a `Vec` if desired. Bulk insertion methods *may* reallocate, even
+/// when not necessary.
+///
+/// `Vec` does not guarantee any particular growth strategy when reallocating
+/// when full, nor when [`reserve`] is called. The current strategy is basic
+/// and it may prove desirable to use a non-constant growth factor. Whatever
+/// strategy is used will of course guarantee *O*(1) amortized [`push`].
+///
+/// `vec![x; n]`, `vec![a, b, c, d]`, and
+/// [`Vec::with_capacity(n)`][`Vec::with_capacity`], will all produce a `Vec`
+/// with exactly the requested capacity. If [`len`]` == `[`capacity`],
+/// (as is the case for the [`vec!`] macro), then a `Vec<T>` can be converted to
+/// and from a [`Box<[T]>`][owned slice] without reallocating or moving the elements.
+///
+/// `Vec` will not specifically overwrite any data that is removed from it,
+/// but also won't specifically preserve it. Its uninitialized memory is
+/// scratch space that it may use however it wants. It will generally just do
+/// whatever is most efficient or otherwise easy to implement. Do not rely on
+/// removed data to be erased for security purposes. Even if you drop a `Vec`, its
+/// buffer may simply be reused by another `Vec`. Even if you zero a `Vec`'s memory
+/// first, that might not actually happen because the optimizer does not consider
+/// this a side-effect that must be preserved. There is one case which we will
+/// not break, however: using `unsafe` code to write to the excess capacity,
+/// and then increasing the length to match, is always valid.
+///
+/// Currently, `Vec` does not guarantee the order in which elements are dropped.
+/// The order has changed in the past and may change again.
+///
+/// [`get`]: ../../std/vec/struct.Vec.html#method.get
+/// [`get_mut`]: ../../std/vec/struct.Vec.html#method.get_mut
+/// [`String`]: crate::string::String
+/// [`&str`]: type@str
+/// [`shrink_to_fit`]: Vec::shrink_to_fit
+/// [`shrink_to`]: Vec::shrink_to
+/// [`capacity`]: Vec::capacity
+/// [`mem::size_of::<T>`]: core::mem::size_of
+/// [`len`]: Vec::len
+/// [`push`]: Vec::push
+/// [`insert`]: Vec::insert
+/// [`reserve`]: Vec::reserve
+/// [`MaybeUninit`]: core::mem::MaybeUninit
+/// [owned slice]: Box
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "vec_type")]
+pub struct Vec<T, #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global> {
+ buf: RawVec<T, A>,
+ len: usize,
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Inherent methods
+////////////////////////////////////////////////////////////////////////////////
+
+impl<T> Vec<T> {
+ /// Constructs a new, empty `Vec<T>`.
+ ///
+ /// The vector will not allocate until elements are pushed onto it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #![allow(unused_mut)]
+ /// let mut vec: Vec<i32> = Vec::new();
+ /// ```
+ #[inline]
+ #[rustc_const_stable(feature = "const_vec_new", since = "1.39.0")]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const fn new() -> Self {
+ Vec { buf: RawVec::NEW, len: 0 }
+ }
+
+ /// Constructs a new, empty `Vec<T>` with the specified capacity.
+ ///
+ /// The vector will be able to hold exactly `capacity` elements without
+ /// reallocating. If `capacity` is 0, the vector will not allocate.
+ ///
+ /// It is important to note that although the returned vector has the
+ /// *capacity* specified, the vector will have a zero *length*. For an
+ /// explanation of the difference between length and capacity, see
+ /// *[Capacity and reallocation]*.
+ ///
+ /// [Capacity and reallocation]: #capacity-and-reallocation
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new capacity exceeds `isize::MAX` bytes.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = Vec::with_capacity(10);
+ ///
+ /// // The vector contains no items, even though it has capacity for more
+ /// assert_eq!(vec.len(), 0);
+ /// assert_eq!(vec.capacity(), 10);
+ ///
+ /// // These are all done without reallocating...
+ /// for i in 0..10 {
+ /// vec.push(i);
+ /// }
+ /// assert_eq!(vec.len(), 10);
+ /// assert_eq!(vec.capacity(), 10);
+ ///
+ /// // ...but this may make the vector reallocate
+ /// vec.push(11);
+ /// assert_eq!(vec.len(), 11);
+ /// assert!(vec.capacity() >= 11);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[doc(alias = "malloc")]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn with_capacity(capacity: usize) -> Self {
+ Self::with_capacity_in(capacity, Global)
+ }
+
+ /// Tries to construct a new, empty `Vec<T>` with the specified capacity.
+ ///
+ /// The vector will be able to hold exactly `capacity` elements without
+ /// reallocating. If `capacity` is 0, the vector will not allocate.
+ ///
+ /// It is important to note that although the returned vector has the
+ /// *capacity* specified, the vector will have a zero *length*. For an
+ /// explanation of the difference between length and capacity, see
+ /// *[Capacity and reallocation]*.
+ ///
+ /// [Capacity and reallocation]: #capacity-and-reallocation
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = Vec::try_with_capacity(10).unwrap();
+ ///
+ /// // The vector contains no items, even though it has capacity for more
+ /// assert_eq!(vec.len(), 0);
+ /// assert_eq!(vec.capacity(), 10);
+ ///
+ /// // These are all done without reallocating...
+ /// for i in 0..10 {
+ /// vec.push(i);
+ /// }
+ /// assert_eq!(vec.len(), 10);
+ /// assert_eq!(vec.capacity(), 10);
+ ///
+ /// // ...but this may make the vector reallocate
+ /// vec.push(11);
+ /// assert_eq!(vec.len(), 11);
+ /// assert!(vec.capacity() >= 11);
+ ///
+ /// let mut result = Vec::try_with_capacity(usize::MAX);
+ /// assert!(result.is_err());
+ /// ```
+ #[inline]
+ #[doc(alias = "malloc")]
+ #[stable(feature = "kernel", since = "1.0.0")]
+ pub fn try_with_capacity(capacity: usize) -> Result<Self, TryReserveError> {
+ Self::try_with_capacity_in(capacity, Global)
+ }
+
+ /// Creates a `Vec<T>` directly from the raw components of another vector.
+ ///
+ /// # Safety
+ ///
+ /// This is highly unsafe, due to the number of invariants that aren't
+ /// checked:
+ ///
+ /// * `ptr` needs to have been previously allocated via [`String`]/`Vec<T>`
+ /// (at least, it's highly likely to be incorrect if it wasn't).
+ /// * `T` needs to have the same size and alignment as what `ptr` was allocated with.
+ /// (`T` having a less strict alignment is not sufficient, the alignment really
+ /// needs to be equal to satisfy the [`dealloc`] requirement that memory must be
+ /// allocated and deallocated with the same layout.)
+ /// * `length` needs to be less than or equal to `capacity`.
+ /// * `capacity` needs to be the capacity that the pointer was allocated with.
+ ///
+ /// Violating these may cause problems like corrupting the allocator's
+ /// internal data structures. For example it is **not** safe
+ /// to build a `Vec<u8>` from a pointer to a C `char` array with length `size_t`.
+ /// It's also not safe to build one from a `Vec<u16>` and its length, because
+ /// the allocator cares about the alignment, and these two types have different
+ /// alignments. The buffer was allocated with alignment 2 (for `u16`), but after
+ /// turning it into a `Vec<u8>` it'll be deallocated with alignment 1.
+ ///
+ /// The ownership of `ptr` is effectively transferred to the
+ /// `Vec<T>` which may then deallocate, reallocate or change the
+ /// contents of memory pointed to by the pointer at will. Ensure
+ /// that nothing else uses the pointer after calling this
+ /// function.
+ ///
+ /// [`String`]: crate::string::String
+ /// [`dealloc`]: crate::alloc::GlobalAlloc::dealloc
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ptr;
+ /// use std::mem;
+ ///
+ /// let v = vec![1, 2, 3];
+ ///
+ // FIXME Update this when vec_into_raw_parts is stabilized
+ /// // Prevent running `v`'s destructor so we are in complete control
+ /// // of the allocation.
+ /// let mut v = mem::ManuallyDrop::new(v);
+ ///
+ /// // Pull out the various important pieces of information about `v`
+ /// let p = v.as_mut_ptr();
+ /// let len = v.len();
+ /// let cap = v.capacity();
+ ///
+ /// unsafe {
+ /// // Overwrite memory with 4, 5, 6
+ /// for i in 0..len as isize {
+ /// ptr::write(p.offset(i), 4 + i);
+ /// }
+ ///
+ /// // Put everything back together into a Vec
+ /// let rebuilt = Vec::from_raw_parts(p, len, cap);
+ /// assert_eq!(rebuilt, [4, 5, 6]);
+ /// }
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub unsafe fn from_raw_parts(ptr: *mut T, length: usize, capacity: usize) -> Self {
+ unsafe { Self::from_raw_parts_in(ptr, length, capacity, Global) }
+ }
+}
+
+impl<T, A: Allocator> Vec<T, A> {
+ /// Constructs a new, empty `Vec<T, A>`.
+ ///
+ /// The vector will not allocate until elements are pushed onto it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// # #[allow(unused_mut)]
+ /// let mut vec: Vec<i32, _> = Vec::new_in(System);
+ /// ```
+ #[inline]
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ pub const fn new_in(alloc: A) -> Self {
+ Vec { buf: RawVec::new_in(alloc), len: 0 }
+ }
+
+ /// Constructs a new, empty `Vec<T, A>` with the specified capacity with the provided
+ /// allocator.
+ ///
+ /// The vector will be able to hold exactly `capacity` elements without
+ /// reallocating. If `capacity` is 0, the vector will not allocate.
+ ///
+ /// It is important to note that although the returned vector has the
+ /// *capacity* specified, the vector will have a zero *length*. For an
+ /// explanation of the difference between length and capacity, see
+ /// *[Capacity and reallocation]*.
+ ///
+ /// [Capacity and reallocation]: #capacity-and-reallocation
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new capacity exceeds `isize::MAX` bytes.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// let mut vec = Vec::with_capacity_in(10, System);
+ ///
+ /// // The vector contains no items, even though it has capacity for more
+ /// assert_eq!(vec.len(), 0);
+ /// assert_eq!(vec.capacity(), 10);
+ ///
+ /// // These are all done without reallocating...
+ /// for i in 0..10 {
+ /// vec.push(i);
+ /// }
+ /// assert_eq!(vec.len(), 10);
+ /// assert_eq!(vec.capacity(), 10);
+ ///
+ /// // ...but this may make the vector reallocate
+ /// vec.push(11);
+ /// assert_eq!(vec.len(), 11);
+ /// assert!(vec.capacity() >= 11);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ pub fn with_capacity_in(capacity: usize, alloc: A) -> Self {
+ Vec { buf: RawVec::with_capacity_in(capacity, alloc), len: 0 }
+ }
+
+ /// Tries to construct a new, empty `Vec<T, A>` with the specified capacity
+ /// with the provided allocator.
+ ///
+ /// The vector will be able to hold exactly `capacity` elements without
+ /// reallocating. If `capacity` is 0, the vector will not allocate.
+ ///
+ /// It is important to note that although the returned vector has the
+ /// *capacity* specified, the vector will have a zero *length*. For an
+ /// explanation of the difference between length and capacity, see
+ /// *[Capacity and reallocation]*.
+ ///
+ /// [Capacity and reallocation]: #capacity-and-reallocation
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// let mut vec = Vec::try_with_capacity_in(10, System).unwrap();
+ ///
+ /// // The vector contains no items, even though it has capacity for more
+ /// assert_eq!(vec.len(), 0);
+ /// assert_eq!(vec.capacity(), 10);
+ ///
+ /// // These are all done without reallocating...
+ /// for i in 0..10 {
+ /// vec.push(i);
+ /// }
+ /// assert_eq!(vec.len(), 10);
+ /// assert_eq!(vec.capacity(), 10);
+ ///
+ /// // ...but this may make the vector reallocate
+ /// vec.push(11);
+ /// assert_eq!(vec.len(), 11);
+ /// assert!(vec.capacity() >= 11);
+ ///
+ /// let mut result = Vec::try_with_capacity_in(usize::MAX, System);
+ /// assert!(result.is_err());
+ /// ```
+ #[inline]
+ #[stable(feature = "kernel", since = "1.0.0")]
+ pub fn try_with_capacity_in(capacity: usize, alloc: A) -> Result<Self, TryReserveError> {
+ Ok(Vec { buf: RawVec::try_with_capacity_in(capacity, alloc)?, len: 0 })
+ }
+
+ /// Creates a `Vec<T, A>` directly from the raw components of another vector.
+ ///
+ /// # Safety
+ ///
+ /// This is highly unsafe, due to the number of invariants that aren't
+ /// checked:
+ ///
+ /// * `ptr` needs to have been previously allocated via [`String`]/`Vec<T>`
+ /// (at least, it's highly likely to be incorrect if it wasn't).
+ /// * `T` needs to have the same size and alignment as what `ptr` was allocated with.
+ /// (`T` having a less strict alignment is not sufficient, the alignment really
+ /// needs to be equal to satisfy the [`dealloc`] requirement that memory must be
+ /// allocated and deallocated with the same layout.)
+ /// * `length` needs to be less than or equal to `capacity`.
+ /// * `capacity` needs to be the capacity that the pointer was allocated with.
+ ///
+ /// Violating these may cause problems like corrupting the allocator's
+ /// internal data structures. For example it is **not** safe
+ /// to build a `Vec<u8>` from a pointer to a C `char` array with length `size_t`.
+ /// It's also not safe to build one from a `Vec<u16>` and its length, because
+ /// the allocator cares about the alignment, and these two types have different
+ /// alignments. The buffer was allocated with alignment 2 (for `u16`), but after
+ /// turning it into a `Vec<u8>` it'll be deallocated with alignment 1.
+ ///
+ /// The ownership of `ptr` is effectively transferred to the
+ /// `Vec<T>` which may then deallocate, reallocate or change the
+ /// contents of memory pointed to by the pointer at will. Ensure
+ /// that nothing else uses the pointer after calling this
+ /// function.
+ ///
+ /// [`String`]: crate::string::String
+ /// [`dealloc`]: crate::alloc::GlobalAlloc::dealloc
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// use std::ptr;
+ /// use std::mem;
+ ///
+ /// let mut v = Vec::with_capacity_in(3, System);
+ /// v.push(1);
+ /// v.push(2);
+ /// v.push(3);
+ ///
+ // FIXME Update this when vec_into_raw_parts is stabilized
+ /// // Prevent running `v`'s destructor so we are in complete control
+ /// // of the allocation.
+ /// let mut v = mem::ManuallyDrop::new(v);
+ ///
+ /// // Pull out the various important pieces of information about `v`
+ /// let p = v.as_mut_ptr();
+ /// let len = v.len();
+ /// let cap = v.capacity();
+ /// let alloc = v.allocator();
+ ///
+ /// unsafe {
+ /// // Overwrite memory with 4, 5, 6
+ /// for i in 0..len as isize {
+ /// ptr::write(p.offset(i), 4 + i);
+ /// }
+ ///
+ /// // Put everything back together into a Vec
+ /// let rebuilt = Vec::from_raw_parts_in(p, len, cap, alloc.clone());
+ /// assert_eq!(rebuilt, [4, 5, 6]);
+ /// }
+ /// ```
+ #[inline]
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ pub unsafe fn from_raw_parts_in(ptr: *mut T, length: usize, capacity: usize, alloc: A) -> Self {
+ unsafe { Vec { buf: RawVec::from_raw_parts_in(ptr, capacity, alloc), len: length } }
+ }
+
+ /// Decomposes a `Vec<T>` into its raw components.
+ ///
+ /// Returns the raw pointer to the underlying data, the length of
+ /// the vector (in elements), and the allocated capacity of the
+ /// data (in elements). These are the same arguments in the same
+ /// order as the arguments to [`from_raw_parts`].
+ ///
+ /// After calling this function, the caller is responsible for the
+ /// memory previously managed by the `Vec`. The only way to do
+ /// this is to convert the raw pointer, length, and capacity back
+ /// into a `Vec` with the [`from_raw_parts`] function, allowing
+ /// the destructor to perform the cleanup.
+ ///
+ /// [`from_raw_parts`]: Vec::from_raw_parts
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(vec_into_raw_parts)]
+ /// let v: Vec<i32> = vec![-1, 0, 1];
+ ///
+ /// let (ptr, len, cap) = v.into_raw_parts();
+ ///
+ /// let rebuilt = unsafe {
+ /// // We can now make changes to the components, such as
+ /// // transmuting the raw pointer to a compatible type.
+ /// let ptr = ptr as *mut u32;
+ ///
+ /// Vec::from_raw_parts(ptr, len, cap)
+ /// };
+ /// assert_eq!(rebuilt, [4294967295, 0, 1]);
+ /// ```
+ #[unstable(feature = "vec_into_raw_parts", reason = "new API", issue = "65816")]
+ pub fn into_raw_parts(self) -> (*mut T, usize, usize) {
+ let mut me = ManuallyDrop::new(self);
+ (me.as_mut_ptr(), me.len(), me.capacity())
+ }
+
+ /// Decomposes a `Vec<T>` into its raw components.
+ ///
+ /// Returns the raw pointer to the underlying data, the length of the vector (in elements),
+ /// the allocated capacity of the data (in elements), and the allocator. These are the same
+ /// arguments in the same order as the arguments to [`from_raw_parts_in`].
+ ///
+ /// After calling this function, the caller is responsible for the
+ /// memory previously managed by the `Vec`. The only way to do
+ /// this is to convert the raw pointer, length, and capacity back
+ /// into a `Vec` with the [`from_raw_parts_in`] function, allowing
+ /// the destructor to perform the cleanup.
+ ///
+ /// [`from_raw_parts_in`]: Vec::from_raw_parts_in
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api, vec_into_raw_parts)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// let mut v: Vec<i32, System> = Vec::new_in(System);
+ /// v.push(-1);
+ /// v.push(0);
+ /// v.push(1);
+ ///
+ /// let (ptr, len, cap, alloc) = v.into_raw_parts_with_alloc();
+ ///
+ /// let rebuilt = unsafe {
+ /// // We can now make changes to the components, such as
+ /// // transmuting the raw pointer to a compatible type.
+ /// let ptr = ptr as *mut u32;
+ ///
+ /// Vec::from_raw_parts_in(ptr, len, cap, alloc)
+ /// };
+ /// assert_eq!(rebuilt, [4294967295, 0, 1]);
+ /// ```
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ // #[unstable(feature = "vec_into_raw_parts", reason = "new API", issue = "65816")]
+ pub fn into_raw_parts_with_alloc(self) -> (*mut T, usize, usize, A) {
+ let mut me = ManuallyDrop::new(self);
+ let len = me.len();
+ let capacity = me.capacity();
+ let ptr = me.as_mut_ptr();
+ let alloc = unsafe { ptr::read(me.allocator()) };
+ (ptr, len, capacity, alloc)
+ }
+
+ /// Returns the number of elements the vector can hold without
+ /// reallocating.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let vec: Vec<i32> = Vec::with_capacity(10);
+ /// assert_eq!(vec.capacity(), 10);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn capacity(&self) -> usize {
+ self.buf.capacity()
+ }
+
+ /// Reserves capacity for at least `additional` more elements to be inserted
+ /// in the given `Vec<T>`. The collection may reserve more space to avoid
+ /// frequent reallocations. After calling `reserve`, capacity will be
+ /// greater than or equal to `self.len() + additional`. Does nothing if
+ /// capacity is already sufficient.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new capacity exceeds `isize::MAX` bytes.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![1];
+ /// vec.reserve(10);
+ /// assert!(vec.capacity() >= 11);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[doc(alias = "realloc")]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn reserve(&mut self, additional: usize) {
+ self.buf.reserve(self.len, additional);
+ }
+
+ /// Reserves the minimum capacity for exactly `additional` more elements to
+ /// be inserted in the given `Vec<T>`. After calling `reserve_exact`,
+ /// capacity will be greater than or equal to `self.len() + additional`.
+ /// Does nothing if the capacity is already sufficient.
+ ///
+ /// Note that the allocator may give the collection more space than it
+ /// requests. Therefore, capacity can not be relied upon to be precisely
+ /// minimal. Prefer `reserve` if future insertions are expected.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new capacity overflows `usize`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![1];
+ /// vec.reserve_exact(10);
+ /// assert!(vec.capacity() >= 11);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[doc(alias = "realloc")]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn reserve_exact(&mut self, additional: usize) {
+ self.buf.reserve_exact(self.len, additional);
+ }
+
+ /// Tries to reserve capacity for at least `additional` more elements to be inserted
+ /// in the given `Vec<T>`. The collection may reserve more space to avoid
+ /// frequent reallocations. After calling `try_reserve`, capacity will be
+ /// greater than or equal to `self.len() + additional`. Does nothing if
+ /// capacity is already sufficient.
+ ///
+ /// # Errors
+ ///
+ /// If the capacity overflows, or the allocator reports a failure, then an error
+ /// is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(try_reserve)]
+ /// use std::collections::TryReserveError;
+ ///
+ /// fn process_data(data: &[u32]) -> Result<Vec<u32>, TryReserveError> {
+ /// let mut output = Vec::new();
+ ///
+ /// // Pre-reserve the memory, exiting if we can't
+ /// output.try_reserve(data.len())?;
+ ///
+ /// // Now we know this can't OOM in the middle of our complex work
+ /// output.extend(data.iter().map(|&val| {
+ /// val * 2 + 5 // very complicated
+ /// }));
+ ///
+ /// Ok(output)
+ /// }
+ /// # process_data(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?");
+ /// ```
+ #[doc(alias = "realloc")]
+ #[unstable(feature = "try_reserve", reason = "new API", issue = "48043")]
+ pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
+ self.buf.try_reserve(self.len, additional)
+ }
+
+ /// Tries to reserve the minimum capacity for exactly `additional`
+ /// elements to be inserted in the given `Vec<T>`. After calling
+ /// `try_reserve_exact`, capacity will be greater than or equal to
+ /// `self.len() + additional` if it returns `Ok(())`.
+ /// Does nothing if the capacity is already sufficient.
+ ///
+ /// Note that the allocator may give the collection more space than it
+ /// requests. Therefore, capacity can not be relied upon to be precisely
+ /// minimal. Prefer `reserve` if future insertions are expected.
+ ///
+ /// # Errors
+ ///
+ /// If the capacity overflows, or the allocator reports a failure, then an error
+ /// is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(try_reserve)]
+ /// use std::collections::TryReserveError;
+ ///
+ /// fn process_data(data: &[u32]) -> Result<Vec<u32>, TryReserveError> {
+ /// let mut output = Vec::new();
+ ///
+ /// // Pre-reserve the memory, exiting if we can't
+ /// output.try_reserve_exact(data.len())?;
+ ///
+ /// // Now we know this can't OOM in the middle of our complex work
+ /// output.extend(data.iter().map(|&val| {
+ /// val * 2 + 5 // very complicated
+ /// }));
+ ///
+ /// Ok(output)
+ /// }
+ /// # process_data(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?");
+ /// ```
+ #[doc(alias = "realloc")]
+ #[unstable(feature = "try_reserve", reason = "new API", issue = "48043")]
+ pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
+ self.buf.try_reserve_exact(self.len, additional)
+ }
+
+ /// Shrinks the capacity of the vector as much as possible.
+ ///
+ /// It will drop down as close as possible to the length but the allocator
+ /// may still inform the vector that there is space for a few more elements.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = Vec::with_capacity(10);
+ /// vec.extend([1, 2, 3]);
+ /// assert_eq!(vec.capacity(), 10);
+ /// vec.shrink_to_fit();
+ /// assert!(vec.capacity() >= 3);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[doc(alias = "realloc")]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn shrink_to_fit(&mut self) {
+ // The capacity is never less than the length, and there's nothing to do when
+ // they are equal, so we can avoid the panic case in `RawVec::shrink_to_fit`
+ // by only calling it with a greater capacity.
+ if self.capacity() > self.len {
+ self.buf.shrink_to_fit(self.len);
+ }
+ }
+
+ /// Tries to shrink the capacity of the vector as much as possible.
+ ///
+ /// It will drop down as close as possible to the length but the allocator
+ /// may still inform the vector that there is space for a few more elements.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = Vec::with_capacity(10);
+ /// vec.extend([1, 2, 3]);
+ /// assert_eq!(vec.capacity(), 10);
+ /// vec.try_shrink_to_fit().unwrap();
+ /// assert!(vec.capacity() >= 3);
+ /// ```
+ #[doc(alias = "realloc")]
+ #[stable(feature = "kernel", since = "1.0.0")]
+ pub fn try_shrink_to_fit(&mut self) -> Result<(), TryReserveError> {
+ // The capacity is never less than the length, and there's nothing to do when
+ // they are equal, so we can avoid the panic case in `RawVec::try_shrink_to_fit`
+ // by only calling it with a greater capacity.
+ if self.capacity() <= self.len {
+ return Ok(());
+ }
+
+ self.buf.try_shrink_to_fit(self.len)
+ }
+
+ /// Shrinks the capacity of the vector with a lower bound.
+ ///
+ /// The capacity will remain at least as large as both the length
+ /// and the supplied value.
+ ///
+ /// If the current capacity is less than the lower limit, this is a no-op.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(shrink_to)]
+ /// let mut vec = Vec::with_capacity(10);
+ /// vec.extend([1, 2, 3]);
+ /// assert_eq!(vec.capacity(), 10);
+ /// vec.shrink_to(4);
+ /// assert!(vec.capacity() >= 4);
+ /// vec.shrink_to(0);
+ /// assert!(vec.capacity() >= 3);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[doc(alias = "realloc")]
+ #[unstable(feature = "shrink_to", reason = "new API", issue = "56431")]
+ pub fn shrink_to(&mut self, min_capacity: usize) {
+ if self.capacity() > min_capacity {
+ self.buf.shrink_to_fit(cmp::max(self.len, min_capacity));
+ }
+ }
+
+ /// Converts the vector into [`Box<[T]>`][owned slice].
+ ///
+ /// Note that this will drop any excess capacity.
+ ///
+ /// [owned slice]: Box
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let v = vec![1, 2, 3];
+ ///
+ /// let slice = v.into_boxed_slice();
+ /// ```
+ ///
+ /// Any excess capacity is removed:
+ ///
+ /// ```
+ /// let mut vec = Vec::with_capacity(10);
+ /// vec.extend([1, 2, 3]);
+ ///
+ /// assert_eq!(vec.capacity(), 10);
+ /// let slice = vec.into_boxed_slice();
+ /// assert_eq!(slice.into_vec().capacity(), 3);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn into_boxed_slice(mut self) -> Box<[T], A> {
+ unsafe {
+ self.shrink_to_fit();
+ let me = ManuallyDrop::new(self);
+ let buf = ptr::read(&me.buf);
+ let len = me.len();
+ buf.into_box(len).assume_init()
+ }
+ }
+
+ /// Tries to convert the vector into [`Box<[T]>`][owned slice].
+ ///
+ /// Note that this will drop any excess capacity.
+ ///
+ /// [owned slice]: Box
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let v = vec![1, 2, 3];
+ ///
+ /// let slice = v.try_into_boxed_slice().unwrap();
+ /// ```
+ ///
+ /// Any excess capacity is removed:
+ ///
+ /// ```
+ /// let mut vec = Vec::with_capacity(10);
+ /// vec.extend([1, 2, 3]);
+ ///
+ /// assert_eq!(vec.capacity(), 10);
+ /// let slice = vec.try_into_boxed_slice().unwrap();
+ /// assert_eq!(slice.into_vec().capacity(), 3);
+ /// ```
+ #[stable(feature = "kernel", since = "1.0.0")]
+ pub fn try_into_boxed_slice(mut self) -> Result<Box<[T], A>, TryReserveError> {
+ unsafe {
+ self.try_shrink_to_fit()?;
+ let me = ManuallyDrop::new(self);
+ let buf = ptr::read(&me.buf);
+ let len = me.len();
+ Ok(buf.into_box(len).assume_init())
+ }
+ }
+
+ /// Shortens the vector, keeping the first `len` elements and dropping
+ /// the rest.
+ ///
+ /// If `len` is greater than the vector's current length, this has no
+ /// effect.
+ ///
+ /// The [`drain`] method can emulate `truncate`, but causes the excess
+ /// elements to be returned instead of dropped.
+ ///
+ /// Note that this method has no effect on the allocated capacity
+ /// of the vector.
+ ///
+ /// # Examples
+ ///
+ /// Truncating a five element vector to two elements:
+ ///
+ /// ```
+ /// let mut vec = vec![1, 2, 3, 4, 5];
+ /// vec.truncate(2);
+ /// assert_eq!(vec, [1, 2]);
+ /// ```
+ ///
+ /// No truncation occurs when `len` is greater than the vector's current
+ /// length:
+ ///
+ /// ```
+ /// let mut vec = vec![1, 2, 3];
+ /// vec.truncate(8);
+ /// assert_eq!(vec, [1, 2, 3]);
+ /// ```
+ ///
+ /// Truncating when `len == 0` is equivalent to calling the [`clear`]
+ /// method.
+ ///
+ /// ```
+ /// let mut vec = vec![1, 2, 3];
+ /// vec.truncate(0);
+ /// assert_eq!(vec, []);
+ /// ```
+ ///
+ /// [`clear`]: Vec::clear
+ /// [`drain`]: Vec::drain
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn truncate(&mut self, len: usize) {
+ // This is safe because:
+ //
+ // * the slice passed to `drop_in_place` is valid; the `len > self.len`
+ // case avoids creating an invalid slice, and
+ // * the `len` of the vector is shrunk before calling `drop_in_place`,
+ // such that no value will be dropped twice in case `drop_in_place`
+ // were to panic once (if it panics twice, the program aborts).
+ unsafe {
+ // Note: It's intentional that this is `>` and not `>=`.
+ // Changing it to `>=` has negative performance
+ // implications in some cases. See #78884 for more.
+ if len > self.len {
+ return;
+ }
+ let remaining_len = self.len - len;
+ let s = ptr::slice_from_raw_parts_mut(self.as_mut_ptr().add(len), remaining_len);
+ self.len = len;
+ ptr::drop_in_place(s);
+ }
+ }
+
+ /// Extracts a slice containing the entire vector.
+ ///
+ /// Equivalent to `&s[..]`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::io::{self, Write};
+ /// let buffer = vec![1, 2, 3, 5, 8];
+ /// io::sink().write(buffer.as_slice()).unwrap();
+ /// ```
+ #[inline]
+ #[stable(feature = "vec_as_slice", since = "1.7.0")]
+ pub fn as_slice(&self) -> &[T] {
+ self
+ }
+
+ /// Extracts a mutable slice of the entire vector.
+ ///
+ /// Equivalent to `&mut s[..]`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::io::{self, Read};
+ /// let mut buffer = vec![0; 3];
+ /// io::repeat(0b101).read_exact(buffer.as_mut_slice()).unwrap();
+ /// ```
+ #[inline]
+ #[stable(feature = "vec_as_slice", since = "1.7.0")]
+ pub fn as_mut_slice(&mut self) -> &mut [T] {
+ self
+ }
+
+ /// Returns a raw pointer to the vector's buffer.
+ ///
+ /// The caller must ensure that the vector outlives the pointer this
+ /// function returns, or else it will end up pointing to garbage.
+ /// Modifying the vector may cause its buffer to be reallocated,
+ /// which would also make any pointers to it invalid.
+ ///
+ /// The caller must also ensure that the memory the pointer (non-transitively) points to
+ /// is never written to (except inside an `UnsafeCell`) using this pointer or any pointer
+ /// derived from it. If you need to mutate the contents of the slice, use [`as_mut_ptr`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = vec![1, 2, 4];
+ /// let x_ptr = x.as_ptr();
+ ///
+ /// unsafe {
+ /// for i in 0..x.len() {
+ /// assert_eq!(*x_ptr.add(i), 1 << i);
+ /// }
+ /// }
+ /// ```
+ ///
+ /// [`as_mut_ptr`]: Vec::as_mut_ptr
+ #[stable(feature = "vec_as_ptr", since = "1.37.0")]
+ #[inline]
+ pub fn as_ptr(&self) -> *const T {
+ // We shadow the slice method of the same name to avoid going through
+ // `deref`, which creates an intermediate reference.
+ let ptr = self.buf.ptr();
+ unsafe {
+ assume(!ptr.is_null());
+ }
+ ptr
+ }
+
+ /// Returns an unsafe mutable pointer to the vector's buffer.
+ ///
+ /// The caller must ensure that the vector outlives the pointer this
+ /// function returns, or else it will end up pointing to garbage.
+ /// Modifying the vector may cause its buffer to be reallocated,
+ /// which would also make any pointers to it invalid.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// // Allocate vector big enough for 4 elements.
+ /// let size = 4;
+ /// let mut x: Vec<i32> = Vec::with_capacity(size);
+ /// let x_ptr = x.as_mut_ptr();
+ ///
+ /// // Initialize elements via raw pointer writes, then set length.
+ /// unsafe {
+ /// for i in 0..size {
+ /// *x_ptr.add(i) = i as i32;
+ /// }
+ /// x.set_len(size);
+ /// }
+ /// assert_eq!(&*x, &[0, 1, 2, 3]);
+ /// ```
+ #[stable(feature = "vec_as_ptr", since = "1.37.0")]
+ #[inline]
+ pub fn as_mut_ptr(&mut self) -> *mut T {
+ // We shadow the slice method of the same name to avoid going through
+ // `deref_mut`, which creates an intermediate reference.
+ let ptr = self.buf.ptr();
+ unsafe {
+ assume(!ptr.is_null());
+ }
+ ptr
+ }
+
+ /// Returns a reference to the underlying allocator.
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[inline]
+ pub fn allocator(&self) -> &A {
+ self.buf.allocator()
+ }
+
+ /// Forces the length of the vector to `new_len`.
+ ///
+ /// This is a low-level operation that maintains none of the normal
+ /// invariants of the type. Normally changing the length of a vector
+ /// is done using one of the safe operations instead, such as
+ /// [`truncate`], [`resize`], [`extend`], or [`clear`].
+ ///
+ /// [`truncate`]: Vec::truncate
+ /// [`resize`]: Vec::resize
+ /// [`extend`]: Extend::extend
+ /// [`clear`]: Vec::clear
+ ///
+ /// # Safety
+ ///
+ /// - `new_len` must be less than or equal to [`capacity()`].
+ /// - The elements at `old_len..new_len` must be initialized.
+ ///
+ /// [`capacity()`]: Vec::capacity
+ ///
+ /// # Examples
+ ///
+ /// This method can be useful for situations in which the vector
+ /// is serving as a buffer for other code, particularly over FFI:
+ ///
+ /// ```no_run
+ /// # #![allow(dead_code)]
+ /// # // This is just a minimal skeleton for the doc example;
+ /// # // don't use this as a starting point for a real library.
+ /// # pub struct StreamWrapper { strm: *mut std::ffi::c_void }
+ /// # const Z_OK: i32 = 0;
+ /// # extern "C" {
+ /// # fn deflateGetDictionary(
+ /// # strm: *mut std::ffi::c_void,
+ /// # dictionary: *mut u8,
+ /// # dictLength: *mut usize,
+ /// # ) -> i32;
+ /// # }
+ /// # impl StreamWrapper {
+ /// pub fn get_dictionary(&self) -> Option<Vec<u8>> {
+ /// // Per the FFI method's docs, "32768 bytes is always enough".
+ /// let mut dict = Vec::with_capacity(32_768);
+ /// let mut dict_length = 0;
+ /// // SAFETY: When `deflateGetDictionary` returns `Z_OK`, it holds that:
+ /// // 1. `dict_length` elements were initialized.
+ /// // 2. `dict_length` <= the capacity (32_768)
+ /// // which makes `set_len` safe to call.
+ /// unsafe {
+ /// // Make the FFI call...
+ /// let r = deflateGetDictionary(self.strm, dict.as_mut_ptr(), &mut dict_length);
+ /// if r == Z_OK {
+ /// // ...and update the length to what was initialized.
+ /// dict.set_len(dict_length);
+ /// Some(dict)
+ /// } else {
+ /// None
+ /// }
+ /// }
+ /// }
+ /// # }
+ /// ```
+ ///
+ /// While the following example is sound, there is a memory leak since
+ /// the inner vectors were not freed prior to the `set_len` call:
+ ///
+ /// ```
+ /// let mut vec = vec![vec![1, 0, 0],
+ /// vec![0, 1, 0],
+ /// vec![0, 0, 1]];
+ /// // SAFETY:
+ /// // 1. `old_len..0` is empty so no elements need to be initialized.
+ /// // 2. `0 <= capacity` always holds whatever `capacity` is.
+ /// unsafe {
+ /// vec.set_len(0);
+ /// }
+ /// ```
+ ///
+ /// Normally, here, one would use [`clear`] instead to correctly drop
+ /// the contents and thus not leak memory.
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub unsafe fn set_len(&mut self, new_len: usize) {
+ debug_assert!(new_len <= self.capacity());
+
+ self.len = new_len;
+ }
+
+ /// Removes an element from the vector and returns it.
+ ///
+ /// The removed element is replaced by the last element of the vector.
+ ///
+ /// This does not preserve ordering, but is O(1).
+ ///
+ /// # Panics
+ ///
+ /// Panics if `index` is out of bounds.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = vec!["foo", "bar", "baz", "qux"];
+ ///
+ /// assert_eq!(v.swap_remove(1), "bar");
+ /// assert_eq!(v, ["foo", "qux", "baz"]);
+ ///
+ /// assert_eq!(v.swap_remove(0), "foo");
+ /// assert_eq!(v, ["baz", "qux"]);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn swap_remove(&mut self, index: usize) -> T {
+ #[cold]
+ #[inline(never)]
+ fn assert_failed(index: usize, len: usize) -> ! {
+ panic!("swap_remove index (is {}) should be < len (is {})", index, len);
+ }
+
+ let len = self.len();
+ if index >= len {
+ assert_failed(index, len);
+ }
+ unsafe {
+ // We replace self[index] with the last element. Note that if the
+ // bounds check above succeeds there must be a last element (which
+ // can be self[index] itself).
+ let last = ptr::read(self.as_ptr().add(len - 1));
+ let hole = self.as_mut_ptr().add(index);
+ self.set_len(len - 1);
+ ptr::replace(hole, last)
+ }
+ }
+
+ /// Inserts an element at position `index` within the vector, shifting all
+ /// elements after it to the right.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `index > len`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![1, 2, 3];
+ /// vec.insert(1, 4);
+ /// assert_eq!(vec, [1, 4, 2, 3]);
+ /// vec.insert(4, 5);
+ /// assert_eq!(vec, [1, 4, 2, 3, 5]);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn insert(&mut self, index: usize, element: T) {
+ #[cold]
+ #[inline(never)]
+ fn assert_failed(index: usize, len: usize) -> ! {
+ panic!("insertion index (is {}) should be <= len (is {})", index, len);
+ }
+
+ let len = self.len();
+ if index > len {
+ assert_failed(index, len);
+ }
+
+ // space for the new element
+ if len == self.buf.capacity() {
+ self.reserve(1);
+ }
+
+ unsafe {
+ // infallible
+ // The spot to put the new value
+ {
+ let p = self.as_mut_ptr().add(index);
+ // Shift everything over to make space. (Duplicating the
+ // `index`th element into two consecutive places.)
+ ptr::copy(p, p.offset(1), len - index);
+ // Write it in, overwriting the first copy of the `index`th
+ // element.
+ ptr::write(p, element);
+ }
+ self.set_len(len + 1);
+ }
+ }
+
+ /// Removes and returns the element at position `index` within the vector,
+ /// shifting all elements after it to the left.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `index` is out of bounds.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = vec![1, 2, 3];
+ /// assert_eq!(v.remove(1), 2);
+ /// assert_eq!(v, [1, 3]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn remove(&mut self, index: usize) -> T {
+ #[cold]
+ #[inline(never)]
+ fn assert_failed(index: usize, len: usize) -> ! {
+ panic!("removal index (is {}) should be < len (is {})", index, len);
+ }
+
+ let len = self.len();
+ if index >= len {
+ assert_failed(index, len);
+ }
+ unsafe {
+ // infallible
+ let ret;
+ {
+ // the place we are taking from.
+ let ptr = self.as_mut_ptr().add(index);
+ // copy it out, unsafely having a copy of the value on
+ // the stack and in the vector at the same time.
+ ret = ptr::read(ptr);
+
+ // Shift everything down to fill in that spot.
+ ptr::copy(ptr.offset(1), ptr, len - index - 1);
+ }
+ self.set_len(len - 1);
+ ret
+ }
+ }
+
+ /// Retains only the elements specified by the predicate.
+ ///
+ /// In other words, remove all elements `e` such that `f(&e)` returns `false`.
+ /// This method operates in place, visiting each element exactly once in the
+ /// original order, and preserves the order of the retained elements.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![1, 2, 3, 4];
+ /// vec.retain(|&x| x % 2 == 0);
+ /// assert_eq!(vec, [2, 4]);
+ /// ```
+ ///
+ /// Because the elements are visited exactly once in the original order,
+ /// external state may be used to decide which elements to keep.
+ ///
+ /// ```
+ /// let mut vec = vec![1, 2, 3, 4, 5];
+ /// let keep = [false, true, true, false, true];
+ /// let mut iter = keep.iter();
+ /// vec.retain(|_| *iter.next().unwrap());
+ /// assert_eq!(vec, [2, 3, 5]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn retain<F>(&mut self, mut f: F)
+ where
+ F: FnMut(&T) -> bool,
+ {
+ let original_len = self.len();
+ // Avoid double drop if the drop guard is not executed,
+ // since we may make some holes during the process.
+ unsafe { self.set_len(0) };
+
+ // Vec: [Kept, Kept, Hole, Hole, Hole, Hole, Unchecked, Unchecked]
+ // |<- processed len ->| ^- next to check
+ // |<- deleted cnt ->|
+ // |<- original_len ->|
+ // Kept: Elements which predicate returns true on.
+ // Hole: Moved or dropped element slot.
+ // Unchecked: Unchecked valid elements.
+ //
+ // This drop guard will be invoked when predicate or `drop` of element panicked.
+ // It shifts unchecked elements to cover holes and `set_len` to the correct length.
+ // In cases when predicate and `drop` never panick, it will be optimized out.
+ struct BackshiftOnDrop<'a, T, A: Allocator> {
+ v: &'a mut Vec<T, A>,
+ processed_len: usize,
+ deleted_cnt: usize,
+ original_len: usize,
+ }
+
+ impl<T, A: Allocator> Drop for BackshiftOnDrop<'_, T, A> {
+ fn drop(&mut self) {
+ if self.deleted_cnt > 0 {
+ // SAFETY: Trailing unchecked items must be valid since we never touch them.
+ unsafe {
+ ptr::copy(
+ self.v.as_ptr().add(self.processed_len),
+ self.v.as_mut_ptr().add(self.processed_len - self.deleted_cnt),
+ self.original_len - self.processed_len,
+ );
+ }
+ }
+ // SAFETY: After filling holes, all items are in contiguous memory.
+ unsafe {
+ self.v.set_len(self.original_len - self.deleted_cnt);
+ }
+ }
+ }
+
+ let mut g = BackshiftOnDrop { v: self, processed_len: 0, deleted_cnt: 0, original_len };
+
+ while g.processed_len < original_len {
+ // SAFETY: Unchecked element must be valid.
+ let cur = unsafe { &mut *g.v.as_mut_ptr().add(g.processed_len) };
+ if !f(cur) {
+ // Advance early to avoid double drop if `drop_in_place` panicked.
+ g.processed_len += 1;
+ g.deleted_cnt += 1;
+ // SAFETY: We never touch this element again after dropped.
+ unsafe { ptr::drop_in_place(cur) };
+ // We already advanced the counter.
+ continue;
+ }
+ if g.deleted_cnt > 0 {
+ // SAFETY: `deleted_cnt` > 0, so the hole slot must not overlap with current element.
+ // We use copy for move, and never touch this element again.
+ unsafe {
+ let hole_slot = g.v.as_mut_ptr().add(g.processed_len - g.deleted_cnt);
+ ptr::copy_nonoverlapping(cur, hole_slot, 1);
+ }
+ }
+ g.processed_len += 1;
+ }
+
+ // All item are processed. This can be optimized to `set_len` by LLVM.
+ drop(g);
+ }
+
+ /// Removes all but the first of consecutive elements in the vector that resolve to the same
+ /// key.
+ ///
+ /// If the vector is sorted, this removes all duplicates.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![10, 20, 21, 30, 20];
+ ///
+ /// vec.dedup_by_key(|i| *i / 10);
+ ///
+ /// assert_eq!(vec, [10, 20, 30, 20]);
+ /// ```
+ #[stable(feature = "dedup_by", since = "1.16.0")]
+ #[inline]
+ pub fn dedup_by_key<F, K>(&mut self, mut key: F)
+ where
+ F: FnMut(&mut T) -> K,
+ K: PartialEq,
+ {
+ self.dedup_by(|a, b| key(a) == key(b))
+ }
+
+ /// Removes all but the first of consecutive elements in the vector satisfying a given equality
+ /// relation.
+ ///
+ /// The `same_bucket` function is passed references to two elements from the vector and
+ /// must determine if the elements compare equal. The elements are passed in opposite order
+ /// from their order in the slice, so if `same_bucket(a, b)` returns `true`, `a` is removed.
+ ///
+ /// If the vector is sorted, this removes all duplicates.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec!["foo", "bar", "Bar", "baz", "bar"];
+ ///
+ /// vec.dedup_by(|a, b| a.eq_ignore_ascii_case(b));
+ ///
+ /// assert_eq!(vec, ["foo", "bar", "baz", "bar"]);
+ /// ```
+ #[stable(feature = "dedup_by", since = "1.16.0")]
+ pub fn dedup_by<F>(&mut self, mut same_bucket: F)
+ where
+ F: FnMut(&mut T, &mut T) -> bool,
+ {
+ let len = self.len();
+ if len <= 1 {
+ return;
+ }
+
+ /* INVARIANT: vec.len() > read >= write > write-1 >= 0 */
+ struct FillGapOnDrop<'a, T, A: core::alloc::Allocator> {
+ /* Offset of the element we want to check if it is duplicate */
+ read: usize,
+
+ /* Offset of the place where we want to place the non-duplicate
+ * when we find it. */
+ write: usize,
+
+ /* The Vec that would need correction if `same_bucket` panicked */
+ vec: &'a mut Vec<T, A>,
+ }
+
+ impl<'a, T, A: core::alloc::Allocator> Drop for FillGapOnDrop<'a, T, A> {
+ fn drop(&mut self) {
+ /* This code gets executed when `same_bucket` panics */
+
+ /* SAFETY: invariant guarantees that `read - write`
+ * and `len - read` never overflow and that the copy is always
+ * in-bounds. */
+ unsafe {
+ let ptr = self.vec.as_mut_ptr();
+ let len = self.vec.len();
+
+ /* How many items were left when `same_bucket` paniced.
+ * Basically vec[read..].len() */
+ let items_left = len.wrapping_sub(self.read);
+
+ /* Pointer to first item in vec[write..write+items_left] slice */
+ let dropped_ptr = ptr.add(self.write);
+ /* Pointer to first item in vec[read..] slice */
+ let valid_ptr = ptr.add(self.read);
+
+ /* Copy `vec[read..]` to `vec[write..write+items_left]`.
+ * The slices can overlap, so `copy_nonoverlapping` cannot be used */
+ ptr::copy(valid_ptr, dropped_ptr, items_left);
+
+ /* How many items have been already dropped
+ * Basically vec[read..write].len() */
+ let dropped = self.read.wrapping_sub(self.write);
+
+ self.vec.set_len(len - dropped);
+ }
+ }
+ }
+
+ let mut gap = FillGapOnDrop { read: 1, write: 1, vec: self };
+ let ptr = gap.vec.as_mut_ptr();
+
+ /* Drop items while going through Vec, it should be more efficient than
+ * doing slice partition_dedup + truncate */
+
+ /* SAFETY: Because of the invariant, read_ptr, prev_ptr and write_ptr
+ * are always in-bounds and read_ptr never aliases prev_ptr */
+ unsafe {
+ while gap.read < len {
+ let read_ptr = ptr.add(gap.read);
+ let prev_ptr = ptr.add(gap.write.wrapping_sub(1));
+
+ if same_bucket(&mut *read_ptr, &mut *prev_ptr) {
+ // Increase `gap.read` now since the drop may panic.
+ gap.read += 1;
+ /* We have found duplicate, drop it in-place */
+ ptr::drop_in_place(read_ptr);
+ } else {
+ let write_ptr = ptr.add(gap.write);
+
+ /* Because `read_ptr` can be equal to `write_ptr`, we either
+ * have to use `copy` or conditional `copy_nonoverlapping`.
+ * Looks like the first option is faster. */
+ ptr::copy(read_ptr, write_ptr, 1);
+
+ /* We have filled that place, so go further */
+ gap.write += 1;
+ gap.read += 1;
+ }
+ }
+
+ /* Technically we could let `gap` clean up with its Drop, but
+ * when `same_bucket` is guaranteed to not panic, this bloats a little
+ * the codegen, so we just do it manually */
+ gap.vec.set_len(gap.write);
+ mem::forget(gap);
+ }
+ }
+
+ /// Appends an element to the back of a collection.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new capacity exceeds `isize::MAX` bytes.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![1, 2];
+ /// vec.push(3);
+ /// assert_eq!(vec, [1, 2, 3]);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn push(&mut self, value: T) {
+ // This will panic or abort if we would allocate > isize::MAX bytes
+ // or if the length increment would overflow for zero-sized types.
+ if self.len == self.buf.capacity() {
+ self.reserve(1);
+ }
+ unsafe {
+ let end = self.as_mut_ptr().add(self.len);
+ ptr::write(end, value);
+ self.len += 1;
+ }
+ }
+
+ /// Tries to append an element to the back of a collection.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![1, 2];
+ /// vec.try_push(3).unwrap();
+ /// assert_eq!(vec, [1, 2, 3]);
+ /// ```
+ #[inline]
+ #[stable(feature = "kernel", since = "1.0.0")]
+ pub fn try_push(&mut self, value: T) -> Result<(), TryReserveError> {
+ if self.len == self.buf.capacity() {
+ self.try_reserve(1)?;
+ }
+ unsafe {
+ let end = self.as_mut_ptr().add(self.len);
+ ptr::write(end, value);
+ self.len += 1;
+ }
+ Ok(())
+ }
+
+ /// Removes the last element from a vector and returns it, or [`None`] if it
+ /// is empty.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![1, 2, 3];
+ /// assert_eq!(vec.pop(), Some(3));
+ /// assert_eq!(vec, [1, 2]);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn pop(&mut self) -> Option<T> {
+ if self.len == 0 {
+ None
+ } else {
+ unsafe {
+ self.len -= 1;
+ Some(ptr::read(self.as_ptr().add(self.len())))
+ }
+ }
+ }
+
+ /// Moves all the elements of `other` into `Self`, leaving `other` empty.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the number of elements in the vector overflows a `usize`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![1, 2, 3];
+ /// let mut vec2 = vec![4, 5, 6];
+ /// vec.append(&mut vec2);
+ /// assert_eq!(vec, [1, 2, 3, 4, 5, 6]);
+ /// assert_eq!(vec2, []);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[stable(feature = "append", since = "1.4.0")]
+ pub fn append(&mut self, other: &mut Self) {
+ unsafe {
+ self.append_elements(other.as_slice() as _);
+ other.set_len(0);
+ }
+ }
+
+ /// Appends elements to `Self` from other buffer.
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ unsafe fn append_elements(&mut self, other: *const [T]) {
+ let count = unsafe { (*other).len() };
+ self.reserve(count);
+ let len = self.len();
+ unsafe { ptr::copy_nonoverlapping(other as *const T, self.as_mut_ptr().add(len), count) };
+ self.len += count;
+ }
+
+ /// Tries to append elements to `Self` from other buffer.
+ #[inline]
+ unsafe fn try_append_elements(&mut self, other: *const [T]) -> Result<(), TryReserveError> {
+ let count = unsafe { (*other).len() };
+ self.try_reserve(count)?;
+ let len = self.len();
+ unsafe { ptr::copy_nonoverlapping(other as *const T, self.as_mut_ptr().add(len), count) };
+ self.len += count;
+ Ok(())
+ }
+
+ /// Creates a draining iterator that removes the specified range in the vector
+ /// and yields the removed items.
+ ///
+ /// When the iterator **is** dropped, all elements in the range are removed
+ /// from the vector, even if the iterator was not fully consumed. If the
+ /// iterator **is not** dropped (with [`mem::forget`] for example), it is
+ /// unspecified how many elements are removed.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the starting point is greater than the end point or if
+ /// the end point is greater than the length of the vector.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = vec![1, 2, 3];
+ /// let u: Vec<_> = v.drain(1..).collect();
+ /// assert_eq!(v, &[1]);
+ /// assert_eq!(u, &[2, 3]);
+ ///
+ /// // A full range clears the vector
+ /// v.drain(..);
+ /// assert_eq!(v, &[]);
+ /// ```
+ #[stable(feature = "drain", since = "1.6.0")]
+ pub fn drain<R>(&mut self, range: R) -> Drain<'_, T, A>
+ where
+ R: RangeBounds<usize>,
+ {
+ // Memory safety
+ //
+ // When the Drain is first created, it shortens the length of
+ // the source vector to make sure no uninitialized or moved-from elements
+ // are accessible at all if the Drain's destructor never gets to run.
+ //
+ // Drain will ptr::read out the values to remove.
+ // When finished, remaining tail of the vec is copied back to cover
+ // the hole, and the vector length is restored to the new length.
+ //
+ let len = self.len();
+ let Range { start, end } = slice::range(range, ..len);
+
+ unsafe {
+ // set self.vec length's to start, to be safe in case Drain is leaked
+ self.set_len(start);
+ // Use the borrow in the IterMut to indicate borrowing behavior of the
+ // whole Drain iterator (like &mut T).
+ let range_slice = slice::from_raw_parts_mut(self.as_mut_ptr().add(start), end - start);
+ Drain {
+ tail_start: end,
+ tail_len: len - end,
+ iter: range_slice.iter(),
+ vec: NonNull::from(self),
+ }
+ }
+ }
+
+ /// Clears the vector, removing all values.
+ ///
+ /// Note that this method has no effect on the allocated capacity
+ /// of the vector.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = vec![1, 2, 3];
+ ///
+ /// v.clear();
+ ///
+ /// assert!(v.is_empty());
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn clear(&mut self) {
+ self.truncate(0)
+ }
+
+ /// Returns the number of elements in the vector, also referred to
+ /// as its 'length'.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let a = vec![1, 2, 3];
+ /// assert_eq!(a.len(), 3);
+ /// ```
+ #[doc(alias = "length")]
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn len(&self) -> usize {
+ self.len
+ }
+
+ /// Returns `true` if the vector contains no elements.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = Vec::new();
+ /// assert!(v.is_empty());
+ ///
+ /// v.push(1);
+ /// assert!(!v.is_empty());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+
+ /// Splits the collection into two at the given index.
+ ///
+ /// Returns a newly allocated vector containing the elements in the range
+ /// `[at, len)`. After the call, the original vector will be left containing
+ /// the elements `[0, at)` with its previous capacity unchanged.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `at > len`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![1, 2, 3];
+ /// let vec2 = vec.split_off(1);
+ /// assert_eq!(vec, [1]);
+ /// assert_eq!(vec2, [2, 3]);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[must_use = "use `.truncate()` if you don't need the other half"]
+ #[stable(feature = "split_off", since = "1.4.0")]
+ pub fn split_off(&mut self, at: usize) -> Self
+ where
+ A: Clone,
+ {
+ #[cold]
+ #[inline(never)]
+ fn assert_failed(at: usize, len: usize) -> ! {
+ panic!("`at` split index (is {}) should be <= len (is {})", at, len);
+ }
+
+ if at > self.len() {
+ assert_failed(at, self.len());
+ }
+
+ if at == 0 {
+ // the new vector can take over the original buffer and avoid the copy
+ return mem::replace(
+ self,
+ Vec::with_capacity_in(self.capacity(), self.allocator().clone()),
+ );
+ }
+
+ let other_len = self.len - at;
+ let mut other = Vec::with_capacity_in(other_len, self.allocator().clone());
+
+ // Unsafely `set_len` and copy items to `other`.
+ unsafe {
+ self.set_len(at);
+ other.set_len(other_len);
+
+ ptr::copy_nonoverlapping(self.as_ptr().add(at), other.as_mut_ptr(), other.len());
+ }
+ other
+ }
+
+ /// Resizes the `Vec` in-place so that `len` is equal to `new_len`.
+ ///
+ /// If `new_len` is greater than `len`, the `Vec` is extended by the
+ /// difference, with each additional slot filled with the result of
+ /// calling the closure `f`. The return values from `f` will end up
+ /// in the `Vec` in the order they have been generated.
+ ///
+ /// If `new_len` is less than `len`, the `Vec` is simply truncated.
+ ///
+ /// This method uses a closure to create new values on every push. If
+ /// you'd rather [`Clone`] a given value, use [`Vec::resize`]. If you
+ /// want to use the [`Default`] trait to generate values, you can
+ /// pass [`Default::default`] as the second argument.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![1, 2, 3];
+ /// vec.resize_with(5, Default::default);
+ /// assert_eq!(vec, [1, 2, 3, 0, 0]);
+ ///
+ /// let mut vec = vec![];
+ /// let mut p = 1;
+ /// vec.resize_with(4, || { p *= 2; p });
+ /// assert_eq!(vec, [2, 4, 8, 16]);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "vec_resize_with", since = "1.33.0")]
+ pub fn resize_with<F>(&mut self, new_len: usize, f: F)
+ where
+ F: FnMut() -> T,
+ {
+ let len = self.len();
+ if new_len > len {
+ self.extend_with(new_len - len, ExtendFunc(f));
+ } else {
+ self.truncate(new_len);
+ }
+ }
+
+ /// Consumes and leaks the `Vec`, returning a mutable reference to the contents,
+ /// `&'a mut [T]`. Note that the type `T` must outlive the chosen lifetime
+ /// `'a`. If the type has only static references, or none at all, then this
+ /// may be chosen to be `'static`.
+ ///
+ /// This function is similar to the [`leak`][Box::leak] function on [`Box`]
+ /// except that there is no way to recover the leaked memory.
+ ///
+ /// This function is mainly useful for data that lives for the remainder of
+ /// the program's life. Dropping the returned reference will cause a memory
+ /// leak.
+ ///
+ /// # Examples
+ ///
+ /// Simple usage:
+ ///
+ /// ```
+ /// let x = vec![1, 2, 3];
+ /// let static_ref: &'static mut [usize] = x.leak();
+ /// static_ref[0] += 1;
+ /// assert_eq!(static_ref, &[2, 2, 3]);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "vec_leak", since = "1.47.0")]
+ #[inline]
+ pub fn leak<'a>(self) -> &'a mut [T]
+ where
+ A: 'a,
+ {
+ Box::leak(self.into_boxed_slice())
+ }
+
+ /// Returns the remaining spare capacity of the vector as a slice of
+ /// `MaybeUninit<T>`.
+ ///
+ /// The returned slice can be used to fill the vector with data (e.g. by
+ /// reading from a file) before marking the data as initialized using the
+ /// [`set_len`] method.
+ ///
+ /// [`set_len`]: Vec::set_len
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(vec_spare_capacity, maybe_uninit_extra)]
+ ///
+ /// // Allocate vector big enough for 10 elements.
+ /// let mut v = Vec::with_capacity(10);
+ ///
+ /// // Fill in the first 3 elements.
+ /// let uninit = v.spare_capacity_mut();
+ /// uninit[0].write(0);
+ /// uninit[1].write(1);
+ /// uninit[2].write(2);
+ ///
+ /// // Mark the first 3 elements of the vector as being initialized.
+ /// unsafe {
+ /// v.set_len(3);
+ /// }
+ ///
+ /// assert_eq!(&v, &[0, 1, 2]);
+ /// ```
+ #[unstable(feature = "vec_spare_capacity", issue = "75017")]
+ #[inline]
+ pub fn spare_capacity_mut(&mut self) -> &mut [MaybeUninit<T>] {
+ // Note:
+ // This method is not implemented in terms of `split_at_spare_mut`,
+ // to prevent invalidation of pointers to the buffer.
+ unsafe {
+ slice::from_raw_parts_mut(
+ self.as_mut_ptr().add(self.len) as *mut MaybeUninit<T>,
+ self.buf.capacity() - self.len,
+ )
+ }
+ }
+
+ /// Returns vector content as a slice of `T`, along with the remaining spare
+ /// capacity of the vector as a slice of `MaybeUninit<T>`.
+ ///
+ /// The returned spare capacity slice can be used to fill the vector with data
+ /// (e.g. by reading from a file) before marking the data as initialized using
+ /// the [`set_len`] method.
+ ///
+ /// [`set_len`]: Vec::set_len
+ ///
+ /// Note that this is a low-level API, which should be used with care for
+ /// optimization purposes. If you need to append data to a `Vec`
+ /// you can use [`push`], [`extend`], [`extend_from_slice`],
+ /// [`extend_from_within`], [`insert`], [`append`], [`resize`] or
+ /// [`resize_with`], depending on your exact needs.
+ ///
+ /// [`push`]: Vec::push
+ /// [`extend`]: Vec::extend
+ /// [`extend_from_slice`]: Vec::extend_from_slice
+ /// [`extend_from_within`]: Vec::extend_from_within
+ /// [`insert`]: Vec::insert
+ /// [`append`]: Vec::append
+ /// [`resize`]: Vec::resize
+ /// [`resize_with`]: Vec::resize_with
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(vec_split_at_spare, maybe_uninit_extra)]
+ ///
+ /// let mut v = vec![1, 1, 2];
+ ///
+ /// // Reserve additional space big enough for 10 elements.
+ /// v.reserve(10);
+ ///
+ /// let (init, uninit) = v.split_at_spare_mut();
+ /// let sum = init.iter().copied().sum::<u32>();
+ ///
+ /// // Fill in the next 4 elements.
+ /// uninit[0].write(sum);
+ /// uninit[1].write(sum * 2);
+ /// uninit[2].write(sum * 3);
+ /// uninit[3].write(sum * 4);
+ ///
+ /// // Mark the 4 elements of the vector as being initialized.
+ /// unsafe {
+ /// let len = v.len();
+ /// v.set_len(len + 4);
+ /// }
+ ///
+ /// assert_eq!(&v, &[1, 1, 2, 4, 8, 12, 16]);
+ /// ```
+ #[unstable(feature = "vec_split_at_spare", issue = "81944")]
+ #[inline]
+ pub fn split_at_spare_mut(&mut self) -> (&mut [T], &mut [MaybeUninit<T>]) {
+ // SAFETY:
+ // - len is ignored and so never changed
+ let (init, spare, _) = unsafe { self.split_at_spare_mut_with_len() };
+ (init, spare)
+ }
+
+ /// Safety: changing returned .2 (&mut usize) is considered the same as calling `.set_len(_)`.
+ ///
+ /// This method provides unique access to all vec parts at once in `extend_from_within`.
+ unsafe fn split_at_spare_mut_with_len(
+ &mut self,
+ ) -> (&mut [T], &mut [MaybeUninit<T>], &mut usize) {
+ let Range { start: ptr, end: spare_ptr } = self.as_mut_ptr_range();
+ let spare_ptr = spare_ptr.cast::<MaybeUninit<T>>();
+ let spare_len = self.buf.capacity() - self.len;
+
+ // SAFETY:
+ // - `ptr` is guaranteed to be valid for `len` elements
+ // - `spare_ptr` is pointing one element past the buffer, so it doesn't overlap with `initialized`
+ unsafe {
+ let initialized = slice::from_raw_parts_mut(ptr, self.len);
+ let spare = slice::from_raw_parts_mut(spare_ptr, spare_len);
+
+ (initialized, spare, &mut self.len)
+ }
+ }
+}
+
+impl<T: Clone, A: Allocator> Vec<T, A> {
+ /// Resizes the `Vec` in-place so that `len` is equal to `new_len`.
+ ///
+ /// If `new_len` is greater than `len`, the `Vec` is extended by the
+ /// difference, with each additional slot filled with `value`.
+ /// If `new_len` is less than `len`, the `Vec` is simply truncated.
+ ///
+ /// This method requires `T` to implement [`Clone`],
+ /// in order to be able to clone the passed value.
+ /// If you need more flexibility (or want to rely on [`Default`] instead of
+ /// [`Clone`]), use [`Vec::resize_with`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec!["hello"];
+ /// vec.resize(3, "world");
+ /// assert_eq!(vec, ["hello", "world", "world"]);
+ ///
+ /// let mut vec = vec![1, 2, 3, 4];
+ /// vec.resize(2, 0);
+ /// assert_eq!(vec, [1, 2]);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "vec_resize", since = "1.5.0")]
+ pub fn resize(&mut self, new_len: usize, value: T) {
+ let len = self.len();
+
+ if new_len > len {
+ self.extend_with(new_len - len, ExtendElement(value))
+ } else {
+ self.truncate(new_len);
+ }
+ }
+
+ /// Tries to resize the `Vec` in-place so that `len` is equal to `new_len`.
+ ///
+ /// If `new_len` is greater than `len`, the `Vec` is extended by the
+ /// difference, with each additional slot filled with `value`.
+ /// If `new_len` is less than `len`, the `Vec` is simply truncated.
+ ///
+ /// This method requires `T` to implement [`Clone`],
+ /// in order to be able to clone the passed value.
+ /// If you need more flexibility (or want to rely on [`Default`] instead of
+ /// [`Clone`]), use [`Vec::resize_with`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec!["hello"];
+ /// vec.try_resize(3, "world").unwrap();
+ /// assert_eq!(vec, ["hello", "world", "world"]);
+ ///
+ /// let mut vec = vec![1, 2, 3, 4];
+ /// vec.try_resize(2, 0).unwrap();
+ /// assert_eq!(vec, [1, 2]);
+ ///
+ /// let mut vec = vec![42];
+ /// let result = vec.try_resize(usize::MAX, 0);
+ /// assert!(result.is_err());
+ /// ```
+ #[stable(feature = "kernel", since = "1.0.0")]
+ pub fn try_resize(&mut self, new_len: usize, value: T) -> Result<(), TryReserveError> {
+ let len = self.len();
+
+ if new_len > len {
+ self.try_extend_with(new_len - len, ExtendElement(value))
+ } else {
+ self.truncate(new_len);
+ Ok(())
+ }
+ }
+
+ /// Clones and appends all elements in a slice to the `Vec`.
+ ///
+ /// Iterates over the slice `other`, clones each element, and then appends
+ /// it to this `Vec`. The `other` vector is traversed in-order.
+ ///
+ /// Note that this function is same as [`extend`] except that it is
+ /// specialized to work with slices instead. If and when Rust gets
+ /// specialization this function will likely be deprecated (but still
+ /// available).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![1];
+ /// vec.extend_from_slice(&[2, 3, 4]);
+ /// assert_eq!(vec, [1, 2, 3, 4]);
+ /// ```
+ ///
+ /// [`extend`]: Vec::extend
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "vec_extend_from_slice", since = "1.6.0")]
+ pub fn extend_from_slice(&mut self, other: &[T]) {
+ self.spec_extend(other.iter())
+ }
+
+ /// Tries to clone and append all elements in a slice to the `Vec`.
+ ///
+ /// Iterates over the slice `other`, clones each element, and then appends
+ /// it to this `Vec`. The `other` vector is traversed in-order.
+ ///
+ /// Note that this function is same as [`extend`] except that it is
+ /// specialized to work with slices instead. If and when Rust gets
+ /// specialization this function will likely be deprecated (but still
+ /// available).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![1];
+ /// vec.try_extend_from_slice(&[2, 3, 4]).unwrap();
+ /// assert_eq!(vec, [1, 2, 3, 4]);
+ /// ```
+ ///
+ /// [`extend`]: Vec::extend
+ #[stable(feature = "kernel", since = "1.0.0")]
+ pub fn try_extend_from_slice(&mut self, other: &[T]) -> Result<(), TryReserveError> {
+ self.try_spec_extend(other.iter())
+ }
+
+ /// Copies elements from `src` range to the end of the vector.
+ ///
+ /// ## Examples
+ ///
+ /// ```
+ /// let mut vec = vec![0, 1, 2, 3, 4];
+ ///
+ /// vec.extend_from_within(2..);
+ /// assert_eq!(vec, [0, 1, 2, 3, 4, 2, 3, 4]);
+ ///
+ /// vec.extend_from_within(..2);
+ /// assert_eq!(vec, [0, 1, 2, 3, 4, 2, 3, 4, 0, 1]);
+ ///
+ /// vec.extend_from_within(4..8);
+ /// assert_eq!(vec, [0, 1, 2, 3, 4, 2, 3, 4, 0, 1, 4, 2, 3, 4]);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "vec_extend_from_within", since = "1.53.0")]
+ pub fn extend_from_within<R>(&mut self, src: R)
+ where
+ R: RangeBounds<usize>,
+ {
+ let range = slice::range(src, ..self.len());
+ self.reserve(range.len());
+
+ // SAFETY:
+ // - `slice::range` guarantees that the given range is valid for indexing self
+ unsafe {
+ self.spec_extend_from_within(range);
+ }
+ }
+}
+
+// This code generalizes `extend_with_{element,default}`.
+trait ExtendWith<T> {
+ fn next(&mut self) -> T;
+ fn last(self) -> T;
+}
+
+struct ExtendElement<T>(T);
+impl<T: Clone> ExtendWith<T> for ExtendElement<T> {
+ fn next(&mut self) -> T {
+ self.0.clone()
+ }
+ fn last(self) -> T {
+ self.0
+ }
+}
+
+struct ExtendDefault;
+impl<T: Default> ExtendWith<T> for ExtendDefault {
+ fn next(&mut self) -> T {
+ Default::default()
+ }
+ fn last(self) -> T {
+ Default::default()
+ }
+}
+
+struct ExtendFunc<F>(F);
+impl<T, F: FnMut() -> T> ExtendWith<T> for ExtendFunc<F> {
+ fn next(&mut self) -> T {
+ (self.0)()
+ }
+ fn last(mut self) -> T {
+ (self.0)()
+ }
+}
+
+impl<T, A: Allocator> Vec<T, A> {
+ #[cfg(not(no_global_oom_handling))]
+ /// Extend the vector by `n` values, using the given generator.
+ fn extend_with<E: ExtendWith<T>>(&mut self, n: usize, mut value: E) {
+ self.reserve(n);
+
+ unsafe {
+ let mut ptr = self.as_mut_ptr().add(self.len());
+ // Use SetLenOnDrop to work around bug where compiler
+ // may not realize the store through `ptr` through self.set_len()
+ // don't alias.
+ let mut local_len = SetLenOnDrop::new(&mut self.len);
+
+ // Write all elements except the last one
+ for _ in 1..n {
+ ptr::write(ptr, value.next());
+ ptr = ptr.offset(1);
+ // Increment the length in every step in case next() panics
+ local_len.increment_len(1);
+ }
+
+ if n > 0 {
+ // We can write the last element directly without cloning needlessly
+ ptr::write(ptr, value.last());
+ local_len.increment_len(1);
+ }
+
+ // len set by scope guard
+ }
+ }
+
+ /// Try to extend the vector by `n` values, using the given generator.
+ fn try_extend_with<E: ExtendWith<T>>(&mut self, n: usize, mut value: E) -> Result<(), TryReserveError> {
+ self.try_reserve(n)?;
+
+ unsafe {
+ let mut ptr = self.as_mut_ptr().add(self.len());
+ // Use SetLenOnDrop to work around bug where compiler
+ // may not realize the store through `ptr` through self.set_len()
+ // don't alias.
+ let mut local_len = SetLenOnDrop::new(&mut self.len);
+
+ // Write all elements except the last one
+ for _ in 1..n {
+ ptr::write(ptr, value.next());
+ ptr = ptr.offset(1);
+ // Increment the length in every step in case next() panics
+ local_len.increment_len(1);
+ }
+
+ if n > 0 {
+ // We can write the last element directly without cloning needlessly
+ ptr::write(ptr, value.last());
+ local_len.increment_len(1);
+ }
+
+ // len set by scope guard
+ Ok(())
+ }
+ }
+}
+
+impl<T: PartialEq, A: Allocator> Vec<T, A> {
+ /// Removes consecutive repeated elements in the vector according to the
+ /// [`PartialEq`] trait implementation.
+ ///
+ /// If the vector is sorted, this removes all duplicates.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![1, 2, 2, 3, 2];
+ ///
+ /// vec.dedup();
+ ///
+ /// assert_eq!(vec, [1, 2, 3, 2]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn dedup(&mut self) {
+ self.dedup_by(|a, b| a == b)
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Internal methods and functions
+////////////////////////////////////////////////////////////////////////////////
+
+#[doc(hidden)]
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub fn from_elem<T: Clone>(elem: T, n: usize) -> Vec<T> {
+ <T as SpecFromElem>::from_elem(elem, n, Global)
+}
+
+#[doc(hidden)]
+#[cfg(not(no_global_oom_handling))]
+#[unstable(feature = "allocator_api", issue = "32838")]
+pub fn from_elem_in<T: Clone, A: Allocator>(elem: T, n: usize, alloc: A) -> Vec<T, A> {
+ <T as SpecFromElem>::from_elem(elem, n, alloc)
+}
+
+trait ExtendFromWithinSpec {
+ /// # Safety
+ ///
+ /// - `src` needs to be valid index
+ /// - `self.capacity() - self.len()` must be `>= src.len()`
+ unsafe fn spec_extend_from_within(&mut self, src: Range<usize>);
+}
+
+impl<T: Clone, A: Allocator> ExtendFromWithinSpec for Vec<T, A> {
+ default unsafe fn spec_extend_from_within(&mut self, src: Range<usize>) {
+ // SAFETY:
+ // - len is increased only after initializing elements
+ let (this, spare, len) = unsafe { self.split_at_spare_mut_with_len() };
+
+ // SAFETY:
+ // - caller guaratees that src is a valid index
+ let to_clone = unsafe { this.get_unchecked(src) };
+
+ iter::zip(to_clone, spare)
+ .map(|(src, dst)| dst.write(src.clone()))
+ // Note:
+ // - Element was just initialized with `MaybeUninit::write`, so it's ok to increase len
+ // - len is increased after each element to prevent leaks (see issue #82533)
+ .for_each(|_| *len += 1);
+ }
+}
+
+impl<T: Copy, A: Allocator> ExtendFromWithinSpec for Vec<T, A> {
+ unsafe fn spec_extend_from_within(&mut self, src: Range<usize>) {
+ let count = src.len();
+ {
+ let (init, spare) = self.split_at_spare_mut();
+
+ // SAFETY:
+ // - caller guaratees that `src` is a valid index
+ let source = unsafe { init.get_unchecked(src) };
+
+ // SAFETY:
+ // - Both pointers are created from unique slice references (`&mut [_]`)
+ // so they are valid and do not overlap.
+ // - Elements are :Copy so it's OK to to copy them, without doing
+ // anything with the original values
+ // - `count` is equal to the len of `source`, so source is valid for
+ // `count` reads
+ // - `.reserve(count)` guarantees that `spare.len() >= count` so spare
+ // is valid for `count` writes
+ unsafe { ptr::copy_nonoverlapping(source.as_ptr(), spare.as_mut_ptr() as _, count) };
+ }
+
+ // SAFETY:
+ // - The elements were just initialized by `copy_nonoverlapping`
+ self.len += count;
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Common trait implementations for Vec
+////////////////////////////////////////////////////////////////////////////////
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, A: Allocator> ops::Deref for Vec<T, A> {
+ type Target = [T];
+
+ fn deref(&self) -> &[T] {
+ unsafe { slice::from_raw_parts(self.as_ptr(), self.len) }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, A: Allocator> ops::DerefMut for Vec<T, A> {
+ fn deref_mut(&mut self) -> &mut [T] {
+ unsafe { slice::from_raw_parts_mut(self.as_mut_ptr(), self.len) }
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Clone, A: Allocator + Clone> Clone for Vec<T, A> {
+ #[cfg(not(test))]
+ fn clone(&self) -> Self {
+ let alloc = self.allocator().clone();
+ <[T]>::to_vec_in(&**self, alloc)
+ }
+
+ // HACK(japaric): with cfg(test) the inherent `[T]::to_vec` method, which is
+ // required for this method definition, is not available. Instead use the
+ // `slice::to_vec` function which is only available with cfg(test)
+ // NB see the slice::hack module in slice.rs for more information
+ #[cfg(test)]
+ fn clone(&self) -> Self {
+ let alloc = self.allocator().clone();
+ crate::slice::to_vec(&**self, alloc)
+ }
+
+ fn clone_from(&mut self, other: &Self) {
+ // drop anything that will not be overwritten
+ self.truncate(other.len());
+
+ // self.len <= other.len due to the truncate above, so the
+ // slices here are always in-bounds.
+ let (init, tail) = other.split_at(self.len());
+
+ // reuse the contained values' allocations/resources.
+ self.clone_from_slice(init);
+ self.extend_from_slice(tail);
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Hash, A: Allocator> Hash for Vec<T, A> {
+ #[inline]
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ Hash::hash(&**self, state)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_on_unimplemented(
+ message = "vector indices are of type `usize` or ranges of `usize`",
+ label = "vector indices are of type `usize` or ranges of `usize`"
+)]
+impl<T, I: SliceIndex<[T]>, A: Allocator> Index<I> for Vec<T, A> {
+ type Output = I::Output;
+
+ #[inline]
+ fn index(&self, index: I) -> &Self::Output {
+ Index::index(&**self, index)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_on_unimplemented(
+ message = "vector indices are of type `usize` or ranges of `usize`",
+ label = "vector indices are of type `usize` or ranges of `usize`"
+)]
+impl<T, I: SliceIndex<[T]>, A: Allocator> IndexMut<I> for Vec<T, A> {
+ #[inline]
+ fn index_mut(&mut self, index: I) -> &mut Self::Output {
+ IndexMut::index_mut(&mut **self, index)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> FromIterator<T> for Vec<T> {
+ #[inline]
+ fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Vec<T> {
+ <Self as SpecFromIter<T, I::IntoIter>>::from_iter(iter.into_iter())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, A: Allocator> IntoIterator for Vec<T, A> {
+ type Item = T;
+ type IntoIter = IntoIter<T, A>;
+
+ /// Creates a consuming iterator, that is, one that moves each value out of
+ /// the vector (from start to end). The vector cannot be used after calling
+ /// this.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let v = vec!["a".to_string(), "b".to_string()];
+ /// for s in v.into_iter() {
+ /// // s has type String, not &String
+ /// println!("{}", s);
+ /// }
+ /// ```
+ #[inline]
+ fn into_iter(self) -> IntoIter<T, A> {
+ unsafe {
+ let mut me = ManuallyDrop::new(self);
+ let alloc = ptr::read(me.allocator());
+ let begin = me.as_mut_ptr();
+ let end = if mem::size_of::<T>() == 0 {
+ arith_offset(begin as *const i8, me.len() as isize) as *const T
+ } else {
+ begin.add(me.len()) as *const T
+ };
+ let cap = me.buf.capacity();
+ IntoIter {
+ buf: NonNull::new_unchecked(begin),
+ phantom: PhantomData,
+ cap,
+ alloc,
+ ptr: begin,
+ end,
+ }
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T, A: Allocator> IntoIterator for &'a Vec<T, A> {
+ type Item = &'a T;
+ type IntoIter = slice::Iter<'a, T>;
+
+ fn into_iter(self) -> slice::Iter<'a, T> {
+ self.iter()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T, A: Allocator> IntoIterator for &'a mut Vec<T, A> {
+ type Item = &'a mut T;
+ type IntoIter = slice::IterMut<'a, T>;
+
+ fn into_iter(self) -> slice::IterMut<'a, T> {
+ self.iter_mut()
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, A: Allocator> Extend<T> for Vec<T, A> {
+ #[inline]
+ fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
+ <Self as SpecExtend<T, I::IntoIter>>::spec_extend(self, iter.into_iter())
+ }
+
+ #[inline]
+ fn extend_one(&mut self, item: T) {
+ self.push(item);
+ }
+
+ #[inline]
+ fn extend_reserve(&mut self, additional: usize) {
+ self.reserve(additional);
+ }
+}
+
+impl<T, A: Allocator> Vec<T, A> {
+ // leaf method to which various SpecFrom/SpecExtend implementations delegate when
+ // they have no further optimizations to apply
+ #[cfg(not(no_global_oom_handling))]
+ fn extend_desugared<I: Iterator<Item = T>>(&mut self, mut iterator: I) {
+ // This is the case for a general iterator.
+ //
+ // This function should be the moral equivalent of:
+ //
+ // for item in iterator {
+ // self.push(item);
+ // }
+ while let Some(element) = iterator.next() {
+ let len = self.len();
+ if len == self.capacity() {
+ let (lower, _) = iterator.size_hint();
+ self.reserve(lower.saturating_add(1));
+ }
+ unsafe {
+ ptr::write(self.as_mut_ptr().add(len), element);
+ // NB can't overflow since we would have had to alloc the address space
+ self.set_len(len + 1);
+ }
+ }
+ }
+
+ // leaf method to which various SpecFrom/SpecExtend implementations delegate when
+ // they have no further optimizations to apply
+ fn try_extend_desugared<I: Iterator<Item = T>>(&mut self, mut iterator: I) -> Result<(), TryReserveError> {
+ // This is the case for a general iterator.
+ //
+ // This function should be the moral equivalent of:
+ //
+ // for item in iterator {
+ // self.push(item);
+ // }
+ while let Some(element) = iterator.next() {
+ let len = self.len();
+ if len == self.capacity() {
+ let (lower, _) = iterator.size_hint();
+ self.try_reserve(lower.saturating_add(1))?;
+ }
+ unsafe {
+ ptr::write(self.as_mut_ptr().add(len), element);
+ // NB can't overflow since we would have had to alloc the address space
+ self.set_len(len + 1);
+ }
+ }
+
+ Ok(())
+ }
+
+ /// Creates a splicing iterator that replaces the specified range in the vector
+ /// with the given `replace_with` iterator and yields the removed items.
+ /// `replace_with` does not need to be the same length as `range`.
+ ///
+ /// `range` is removed even if the iterator is not consumed until the end.
+ ///
+ /// It is unspecified how many elements are removed from the vector
+ /// if the `Splice` value is leaked.
+ ///
+ /// The input iterator `replace_with` is only consumed when the `Splice` value is dropped.
+ ///
+ /// This is optimal if:
+ ///
+ /// * The tail (elements in the vector after `range`) is empty,
+ /// * or `replace_with` yields fewer or equal elements than `range`’s length
+ /// * or the lower bound of its `size_hint()` is exact.
+ ///
+ /// Otherwise, a temporary vector is allocated and the tail is moved twice.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the starting point is greater than the end point or if
+ /// the end point is greater than the length of the vector.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = vec![1, 2, 3];
+ /// let new = [7, 8];
+ /// let u: Vec<_> = v.splice(..2, new).collect();
+ /// assert_eq!(v, &[7, 8, 3]);
+ /// assert_eq!(u, &[1, 2]);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[stable(feature = "vec_splice", since = "1.21.0")]
+ pub fn splice<R, I>(&mut self, range: R, replace_with: I) -> Splice<'_, I::IntoIter, A>
+ where
+ R: RangeBounds<usize>,
+ I: IntoIterator<Item = T>,
+ {
+ Splice { drain: self.drain(range), replace_with: replace_with.into_iter() }
+ }
+
+ /// Creates an iterator which uses a closure to determine if an element should be removed.
+ ///
+ /// If the closure returns true, then the element is removed and yielded.
+ /// If the closure returns false, the element will remain in the vector and will not be yielded
+ /// by the iterator.
+ ///
+ /// Using this method is equivalent to the following code:
+ ///
+ /// ```
+ /// # let some_predicate = |x: &mut i32| { *x == 2 || *x == 3 || *x == 6 };
+ /// # let mut vec = vec![1, 2, 3, 4, 5, 6];
+ /// let mut i = 0;
+ /// while i < vec.len() {
+ /// if some_predicate(&mut vec[i]) {
+ /// let val = vec.remove(i);
+ /// // your code here
+ /// } else {
+ /// i += 1;
+ /// }
+ /// }
+ ///
+ /// # assert_eq!(vec, vec![1, 4, 5]);
+ /// ```
+ ///
+ /// But `drain_filter` is easier to use. `drain_filter` is also more efficient,
+ /// because it can backshift the elements of the array in bulk.
+ ///
+ /// Note that `drain_filter` also lets you mutate every element in the filter closure,
+ /// regardless of whether you choose to keep or remove it.
+ ///
+ /// # Examples
+ ///
+ /// Splitting an array into evens and odds, reusing the original allocation:
+ ///
+ /// ```
+ /// #![feature(drain_filter)]
+ /// let mut numbers = vec![1, 2, 3, 4, 5, 6, 8, 9, 11, 13, 14, 15];
+ ///
+ /// let evens = numbers.drain_filter(|x| *x % 2 == 0).collect::<Vec<_>>();
+ /// let odds = numbers;
+ ///
+ /// assert_eq!(evens, vec![2, 4, 6, 8, 14]);
+ /// assert_eq!(odds, vec![1, 3, 5, 9, 11, 13, 15]);
+ /// ```
+ #[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")]
+ pub fn drain_filter<F>(&mut self, filter: F) -> DrainFilter<'_, T, F, A>
+ where
+ F: FnMut(&mut T) -> bool,
+ {
+ let old_len = self.len();
+
+ // Guard against us getting leaked (leak amplification)
+ unsafe {
+ self.set_len(0);
+ }
+
+ DrainFilter { vec: self, idx: 0, del: 0, old_len, pred: filter, panic_flag: false }
+ }
+}
+
+/// Extend implementation that copies elements out of references before pushing them onto the Vec.
+///
+/// This implementation is specialized for slice iterators, where it uses [`copy_from_slice`] to
+/// append the entire slice at once.
+///
+/// [`copy_from_slice`]: slice::copy_from_slice
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "extend_ref", since = "1.2.0")]
+impl<'a, T: Copy + 'a, A: Allocator + 'a> Extend<&'a T> for Vec<T, A> {
+ fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
+ self.spec_extend(iter.into_iter())
+ }
+
+ #[inline]
+ fn extend_one(&mut self, &item: &'a T) {
+ self.push(item);
+ }
+
+ #[inline]
+ fn extend_reserve(&mut self, additional: usize) {
+ self.reserve(additional);
+ }
+}
+
+/// Implements comparison of vectors, [lexicographically](core::cmp::Ord#lexicographical-comparison).
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: PartialOrd, A: Allocator> PartialOrd for Vec<T, A> {
+ #[inline]
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ PartialOrd::partial_cmp(&**self, &**other)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Eq, A: Allocator> Eq for Vec<T, A> {}
+
+/// Implements ordering of vectors, [lexicographically](core::cmp::Ord#lexicographical-comparison).
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Ord, A: Allocator> Ord for Vec<T, A> {
+ #[inline]
+ fn cmp(&self, other: &Self) -> Ordering {
+ Ord::cmp(&**self, &**other)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<#[may_dangle] T, A: Allocator> Drop for Vec<T, A> {
+ fn drop(&mut self) {
+ unsafe {
+ // use drop for [T]
+ // use a raw slice to refer to the elements of the vector as weakest necessary type;
+ // could avoid questions of validity in certain cases
+ ptr::drop_in_place(ptr::slice_from_raw_parts_mut(self.as_mut_ptr(), self.len))
+ }
+ // RawVec handles deallocation
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> Default for Vec<T> {
+ /// Creates an empty `Vec<T>`.
+ fn default() -> Vec<T> {
+ Vec::new()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: fmt::Debug, A: Allocator> fmt::Debug for Vec<T, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&**self, f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, A: Allocator> AsRef<Vec<T, A>> for Vec<T, A> {
+ fn as_ref(&self) -> &Vec<T, A> {
+ self
+ }
+}
+
+#[stable(feature = "vec_as_mut", since = "1.5.0")]
+impl<T, A: Allocator> AsMut<Vec<T, A>> for Vec<T, A> {
+ fn as_mut(&mut self) -> &mut Vec<T, A> {
+ self
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, A: Allocator> AsRef<[T]> for Vec<T, A> {
+ fn as_ref(&self) -> &[T] {
+ self
+ }
+}
+
+#[stable(feature = "vec_as_mut", since = "1.5.0")]
+impl<T, A: Allocator> AsMut<[T]> for Vec<T, A> {
+ fn as_mut(&mut self) -> &mut [T] {
+ self
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Clone> From<&[T]> for Vec<T> {
+ /// Allocate a `Vec<T>` and fill it by cloning `s`'s items.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!(Vec::from(&[1, 2, 3][..]), vec![1, 2, 3]);
+ /// ```
+ #[cfg(not(test))]
+ fn from(s: &[T]) -> Vec<T> {
+ s.to_vec()
+ }
+ #[cfg(test)]
+ fn from(s: &[T]) -> Vec<T> {
+ crate::slice::to_vec(s, Global)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "vec_from_mut", since = "1.19.0")]
+impl<T: Clone> From<&mut [T]> for Vec<T> {
+ /// Allocate a `Vec<T>` and fill it by cloning `s`'s items.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!(Vec::from(&mut [1, 2, 3][..]), vec![1, 2, 3]);
+ /// ```
+ #[cfg(not(test))]
+ fn from(s: &mut [T]) -> Vec<T> {
+ s.to_vec()
+ }
+ #[cfg(test)]
+ fn from(s: &mut [T]) -> Vec<T> {
+ crate::slice::to_vec(s, Global)
+ }
+}
+
+#[stable(feature = "vec_from_array", since = "1.44.0")]
+impl<T, const N: usize> From<[T; N]> for Vec<T> {
+ #[cfg(not(test))]
+ fn from(s: [T; N]) -> Vec<T> {
+ <[T]>::into_vec(box s)
+ }
+ /// Allocate a `Vec<T>` and move `s`'s items into it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!(Vec::from([1, 2, 3]), vec![1, 2, 3]);
+ /// ```
+ #[cfg(test)]
+ fn from(s: [T; N]) -> Vec<T> {
+ crate::slice::into_vec(box s)
+ }
+}
+
+#[stable(feature = "vec_from_cow_slice", since = "1.14.0")]
+impl<'a, T> From<Cow<'a, [T]>> for Vec<T>
+where
+ [T]: ToOwned<Owned = Vec<T>>,
+{
+ /// Convert a clone-on-write slice into a vector.
+ ///
+ /// If `s` already owns a `Vec<T>`, it will be returned directly.
+ /// If `s` is borrowing a slice, a new `Vec<T>` will be allocated and
+ /// filled by cloning `s`'s items into it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use std::borrow::Cow;
+ /// let o: Cow<[i32]> = Cow::Owned(vec![1, 2, 3]);
+ /// let b: Cow<[i32]> = Cow::Borrowed(&[1, 2, 3]);
+ /// assert_eq!(Vec::from(o), Vec::from(b));
+ /// ```
+ fn from(s: Cow<'a, [T]>) -> Vec<T> {
+ s.into_owned()
+ }
+}
+
+// note: test pulls in libstd, which causes errors here
+#[cfg(not(test))]
+#[stable(feature = "vec_from_box", since = "1.18.0")]
+impl<T, A: Allocator> From<Box<[T], A>> for Vec<T, A> {
+ /// Convert a boxed slice into a vector by transferring ownership of
+ /// the existing heap allocation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let b: Box<[i32]> = vec![1, 2, 3].into_boxed_slice();
+ /// assert_eq!(Vec::from(b), vec![1, 2, 3]);
+ /// ```
+ fn from(s: Box<[T], A>) -> Self {
+ s.into_vec()
+ }
+}
+
+// note: test pulls in libstd, which causes errors here
+#[cfg(not(no_global_oom_handling))]
+#[cfg(not(test))]
+#[stable(feature = "box_from_vec", since = "1.20.0")]
+impl<T, A: Allocator> From<Vec<T, A>> for Box<[T], A> {
+ /// Convert a vector into a boxed slice.
+ ///
+ /// If `v` has excess capacity, its items will be moved into a
+ /// newly-allocated buffer with exactly the right capacity.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!(Box::from(vec![1, 2, 3]), vec![1, 2, 3].into_boxed_slice());
+ /// ```
+ fn from(v: Vec<T, A>) -> Self {
+ v.into_boxed_slice()
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl From<&str> for Vec<u8> {
+ /// Allocate a `Vec<u8>` and fill it with a UTF-8 string.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!(Vec::from("123"), vec![b'1', b'2', b'3']);
+ /// ```
+ fn from(s: &str) -> Vec<u8> {
+ From::from(s.as_bytes())
+ }
+}
+
+#[stable(feature = "array_try_from_vec", since = "1.48.0")]
+impl<T, A: Allocator, const N: usize> TryFrom<Vec<T, A>> for [T; N] {
+ type Error = Vec<T, A>;
+
+ /// Gets the entire contents of the `Vec<T>` as an array,
+ /// if its size exactly matches that of the requested array.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::convert::TryInto;
+ /// assert_eq!(vec![1, 2, 3].try_into(), Ok([1, 2, 3]));
+ /// assert_eq!(<Vec<i32>>::new().try_into(), Ok([]));
+ /// ```
+ ///
+ /// If the length doesn't match, the input comes back in `Err`:
+ /// ```
+ /// use std::convert::TryInto;
+ /// let r: Result<[i32; 4], _> = (0..10).collect::<Vec<_>>().try_into();
+ /// assert_eq!(r, Err(vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]));
+ /// ```
+ ///
+ /// If you're fine with just getting a prefix of the `Vec<T>`,
+ /// you can call [`.truncate(N)`](Vec::truncate) first.
+ /// ```
+ /// use std::convert::TryInto;
+ /// let mut v = String::from("hello world").into_bytes();
+ /// v.sort();
+ /// v.truncate(2);
+ /// let [a, b]: [_; 2] = v.try_into().unwrap();
+ /// assert_eq!(a, b' ');
+ /// assert_eq!(b, b'd');
+ /// ```
+ fn try_from(mut vec: Vec<T, A>) -> Result<[T; N], Vec<T, A>> {
+ if vec.len() != N {
+ return Err(vec);
+ }
+
+ // SAFETY: `.set_len(0)` is always sound.
+ unsafe { vec.set_len(0) };
+
+ // SAFETY: A `Vec`'s pointer is always aligned properly, and
+ // the alignment the array needs is the same as the items.
+ // We checked earlier that we have sufficient items.
+ // The items will not double-drop as the `set_len`
+ // tells the `Vec` not to also drop them.
+ let array = unsafe { ptr::read(vec.as_ptr() as *const [T; N]) };
+ Ok(array)
+ }
+}
diff --git a/rust/alloc/vec/partial_eq.rs b/rust/alloc/vec/partial_eq.rs
new file mode 100644
index 000000000000..273e99bed488
--- /dev/null
+++ b/rust/alloc/vec/partial_eq.rs
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+use crate::alloc::Allocator;
+#[cfg(not(no_global_oom_handling))]
+use crate::borrow::Cow;
+
+use super::Vec;
+
+macro_rules! __impl_slice_eq1 {
+ ([$($vars:tt)*] $lhs:ty, $rhs:ty $(where $ty:ty: $bound:ident)?, #[$stability:meta]) => {
+ #[$stability]
+ impl<T, U, $($vars)*> PartialEq<$rhs> for $lhs
+ where
+ T: PartialEq<U>,
+ $($ty: $bound)?
+ {
+ #[inline]
+ fn eq(&self, other: &$rhs) -> bool { self[..] == other[..] }
+ #[inline]
+ fn ne(&self, other: &$rhs) -> bool { self[..] != other[..] }
+ }
+ }
+}
+
+__impl_slice_eq1! { [A: Allocator] Vec<T, A>, Vec<U, A>, #[stable(feature = "rust1", since = "1.0.0")] }
+__impl_slice_eq1! { [A: Allocator] Vec<T, A>, &[U], #[stable(feature = "rust1", since = "1.0.0")] }
+__impl_slice_eq1! { [A: Allocator] Vec<T, A>, &mut [U], #[stable(feature = "rust1", since = "1.0.0")] }
+__impl_slice_eq1! { [A: Allocator] &[T], Vec<U, A>, #[stable(feature = "partialeq_vec_for_ref_slice", since = "1.46.0")] }
+__impl_slice_eq1! { [A: Allocator] &mut [T], Vec<U, A>, #[stable(feature = "partialeq_vec_for_ref_slice", since = "1.46.0")] }
+__impl_slice_eq1! { [A: Allocator] Vec<T, A>, [U], #[stable(feature = "partialeq_vec_for_slice", since = "1.48.0")] }
+__impl_slice_eq1! { [A: Allocator] [T], Vec<U, A>, #[stable(feature = "partialeq_vec_for_slice", since = "1.48.0")] }
+#[cfg(not(no_global_oom_handling))]
+__impl_slice_eq1! { [A: Allocator] Cow<'_, [T]>, Vec<U, A> where T: Clone, #[stable(feature = "rust1", since = "1.0.0")] }
+#[cfg(not(no_global_oom_handling))]
+__impl_slice_eq1! { [] Cow<'_, [T]>, &[U] where T: Clone, #[stable(feature = "rust1", since = "1.0.0")] }
+#[cfg(not(no_global_oom_handling))]
+__impl_slice_eq1! { [] Cow<'_, [T]>, &mut [U] where T: Clone, #[stable(feature = "rust1", since = "1.0.0")] }
+__impl_slice_eq1! { [A: Allocator, const N: usize] Vec<T, A>, [U; N], #[stable(feature = "rust1", since = "1.0.0")] }
+__impl_slice_eq1! { [A: Allocator, const N: usize] Vec<T, A>, &[U; N], #[stable(feature = "rust1", since = "1.0.0")] }
+
+// NOTE: some less important impls are omitted to reduce code bloat
+// FIXME(Centril): Reconsider this?
+//__impl_slice_eq1! { [const N: usize] Vec<A>, &mut [B; N], }
+//__impl_slice_eq1! { [const N: usize] [A; N], Vec<B>, }
+//__impl_slice_eq1! { [const N: usize] &[A; N], Vec<B>, }
+//__impl_slice_eq1! { [const N: usize] &mut [A; N], Vec<B>, }
+//__impl_slice_eq1! { [const N: usize] Cow<'a, [A]>, [B; N], }
+//__impl_slice_eq1! { [const N: usize] Cow<'a, [A]>, &[B; N], }
+//__impl_slice_eq1! { [const N: usize] Cow<'a, [A]>, &mut [B; N], }
diff --git a/rust/alloc/vec/set_len_on_drop.rs b/rust/alloc/vec/set_len_on_drop.rs
new file mode 100644
index 000000000000..448bf5076a0b
--- /dev/null
+++ b/rust/alloc/vec/set_len_on_drop.rs
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+// Set the length of the vec when the `SetLenOnDrop` value goes out of scope.
+//
+// The idea is: The length field in SetLenOnDrop is a local variable
+// that the optimizer will see does not alias with any stores through the Vec's data
+// pointer. This is a workaround for alias analysis issue #32155
+pub(super) struct SetLenOnDrop<'a> {
+ len: &'a mut usize,
+ local_len: usize,
+}
+
+impl<'a> SetLenOnDrop<'a> {
+ #[inline]
+ pub(super) fn new(len: &'a mut usize) -> Self {
+ SetLenOnDrop { local_len: *len, len }
+ }
+
+ #[inline]
+ pub(super) fn increment_len(&mut self, increment: usize) {
+ self.local_len += increment;
+ }
+}
+
+impl Drop for SetLenOnDrop<'_> {
+ #[inline]
+ fn drop(&mut self) {
+ *self.len = self.local_len;
+ }
+}
diff --git a/rust/alloc/vec/spec_extend.rs b/rust/alloc/vec/spec_extend.rs
new file mode 100644
index 000000000000..5a64c7ce2393
--- /dev/null
+++ b/rust/alloc/vec/spec_extend.rs
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+use crate::alloc::Allocator;
+use crate::vec::TryReserveError;
+use core::iter::TrustedLen;
+use core::ptr::{self};
+use core::slice::{self};
+
+use super::{IntoIter, SetLenOnDrop, Vec};
+
+// Specialization trait used for Vec::extend
+#[cfg(not(no_global_oom_handling))]
+pub(super) trait SpecExtend<T, I> {
+ fn spec_extend(&mut self, iter: I);
+}
+
+// Specialization trait used for Vec::try_extend
+pub(super) trait TrySpecExtend<T, I> {
+ fn try_spec_extend(&mut self, iter: I) -> Result<(), TryReserveError>;
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<T, I, A: Allocator> SpecExtend<T, I> for Vec<T, A>
+where
+ I: Iterator<Item = T>,
+{
+ default fn spec_extend(&mut self, iter: I) {
+ self.extend_desugared(iter)
+ }
+}
+
+impl<T, I, A: Allocator> TrySpecExtend<T, I> for Vec<T, A>
+where
+ I: Iterator<Item = T>,
+{
+ default fn try_spec_extend(&mut self, iter: I) -> Result<(), TryReserveError> {
+ self.try_extend_desugared(iter)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<T, I, A: Allocator> SpecExtend<T, I> for Vec<T, A>
+where
+ I: TrustedLen<Item = T>,
+{
+ default fn spec_extend(&mut self, iterator: I) {
+ // This is the case for a TrustedLen iterator.
+ let (low, high) = iterator.size_hint();
+ if let Some(additional) = high {
+ debug_assert_eq!(
+ low,
+ additional,
+ "TrustedLen iterator's size hint is not exact: {:?}",
+ (low, high)
+ );
+ self.reserve(additional);
+ unsafe {
+ let mut ptr = self.as_mut_ptr().add(self.len());
+ let mut local_len = SetLenOnDrop::new(&mut self.len);
+ iterator.for_each(move |element| {
+ ptr::write(ptr, element);
+ ptr = ptr.offset(1);
+ // NB can't overflow since we would have had to alloc the address space
+ local_len.increment_len(1);
+ });
+ }
+ } else {
+ // Per TrustedLen contract a `None` upper bound means that the iterator length
+ // truly exceeds usize::MAX, which would eventually lead to a capacity overflow anyway.
+ // Since the other branch already panics eagerly (via `reserve()`) we do the same here.
+ // This avoids additional codegen for a fallback code path which would eventually
+ // panic anyway.
+ panic!("capacity overflow");
+ }
+ }
+}
+
+impl<T, I, A: Allocator> TrySpecExtend<T, I> for Vec<T, A>
+where
+ I: TrustedLen<Item = T>,
+{
+ default fn try_spec_extend(&mut self, iterator: I) -> Result<(), TryReserveError> {
+ // This is the case for a TrustedLen iterator.
+ let (low, high) = iterator.size_hint();
+ if let Some(additional) = high {
+ debug_assert_eq!(
+ low,
+ additional,
+ "TrustedLen iterator's size hint is not exact: {:?}",
+ (low, high)
+ );
+ self.try_reserve(additional)?;
+ unsafe {
+ let mut ptr = self.as_mut_ptr().add(self.len());
+ let mut local_len = SetLenOnDrop::new(&mut self.len);
+ iterator.for_each(move |element| {
+ ptr::write(ptr, element);
+ ptr = ptr.offset(1);
+ // NB can't overflow since we would have had to alloc the address space
+ local_len.increment_len(1);
+ });
+ }
+ Ok(())
+ } else {
+ Err(TryReserveError::CapacityOverflow)
+ }
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<T, A: Allocator> SpecExtend<T, IntoIter<T>> for Vec<T, A> {
+ fn spec_extend(&mut self, mut iterator: IntoIter<T>) {
+ unsafe {
+ self.append_elements(iterator.as_slice() as _);
+ }
+ iterator.ptr = iterator.end;
+ }
+}
+
+impl<T, A: Allocator> TrySpecExtend<T, IntoIter<T>> for Vec<T, A> {
+ fn try_spec_extend(&mut self, mut iterator: IntoIter<T>) -> Result<(), TryReserveError> {
+ unsafe {
+ self.try_append_elements(iterator.as_slice() as _)?;
+ }
+ iterator.ptr = iterator.end;
+ Ok(())
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<'a, T: 'a, I, A: Allocator + 'a> SpecExtend<&'a T, I> for Vec<T, A>
+where
+ I: Iterator<Item = &'a T>,
+ T: Clone,
+{
+ default fn spec_extend(&mut self, iterator: I) {
+ self.spec_extend(iterator.cloned())
+ }
+}
+
+impl<'a, T: 'a, I, A: Allocator + 'a> TrySpecExtend<&'a T, I> for Vec<T, A>
+where
+ I: Iterator<Item = &'a T>,
+ T: Clone,
+{
+ default fn try_spec_extend(&mut self, iterator: I) -> Result<(), TryReserveError> {
+ self.try_spec_extend(iterator.cloned())
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<'a, T: 'a, A: Allocator + 'a> SpecExtend<&'a T, slice::Iter<'a, T>> for Vec<T, A>
+where
+ T: Copy,
+{
+ fn spec_extend(&mut self, iterator: slice::Iter<'a, T>) {
+ let slice = iterator.as_slice();
+ unsafe { self.append_elements(slice) };
+ }
+}
+
+impl<'a, T: 'a, A: Allocator + 'a> TrySpecExtend<&'a T, slice::Iter<'a, T>> for Vec<T, A>
+where
+ T: Copy,
+{
+ fn try_spec_extend(&mut self, iterator: slice::Iter<'a, T>) -> Result<(), TryReserveError> {
+ let slice = iterator.as_slice();
+ unsafe { self.try_append_elements(slice) }
+ }
+}
diff --git a/rust/bindgen_parameters b/rust/bindgen_parameters
new file mode 100644
index 000000000000..c2cc4a88234e
--- /dev/null
+++ b/rust/bindgen_parameters
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+
+--opaque-type xregs_state
+--opaque-type desc_struct
+--opaque-type arch_lbr_state
+--opaque-type local_apic
+
+# If SMP is disabled, `arch_spinlock_t` is defined as a ZST which triggers a Rust
+# warning. We don't need to peek into it anyway.
+--opaque-type spinlock
+
+# `seccomp`'s comment gets understood as a doctest
+--no-doc-comments
diff --git a/rust/build_error.rs b/rust/build_error.rs
new file mode 100644
index 000000000000..d47fa8393cbc
--- /dev/null
+++ b/rust/build_error.rs
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Build-time error.
+//!
+//! This crate provides a function `build_error`, which will panic in
+//! compile-time if executed in const context, and will cause a build error
+//! if not executed at compile time and the optimizer does not optimise away the
+//! call.
+//!
+//! It is used by `build_assert!` in the kernel crate, allowing checking of
+//! conditions that could be checked statically, but could not be enforced in
+//! Rust yet (e.g. perform some checks in const functions, but those
+//! functions could still be called in the runtime).
+
+#![no_std]
+#![feature(const_panic, core_panic)]
+
+/// Panics if executed in const context, or triggers a build error if not.
+#[inline(never)]
+#[cold]
+#[no_mangle]
+#[track_caller]
+pub const fn build_error(msg: &'static str) -> ! {
+ // Could also be `panic!(msg)` to avoid using unstable feature `core_panic`,
+ // but it is not allowed in Rust 2021, while `panic!("{}", msg)` could not
+ // yet be used in const context.
+ core::panicking::panic(msg);
+}
+
+#[cfg(CONFIG_RUST_BUILD_ASSERT_WARN)]
+#[link_section = ".gnu.warning.build_error"]
+#[used]
+static BUILD_ERROR_WARNING: [u8; 45] = *b"call to build_error present after compilation";
diff --git a/rust/compiler_builtins.rs b/rust/compiler_builtins.rs
new file mode 100644
index 000000000000..cb4bbf7be4e3
--- /dev/null
+++ b/rust/compiler_builtins.rs
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Our own `compiler_builtins`.
+//!
+//! Rust provides [`compiler_builtins`] as a port of LLVM's [`compiler-rt`].
+//! Since we do not need the vast majority of them, we avoid the dependency
+//! by providing this file.
+//!
+//! At the moment, some builtins are required that should not be. For instance,
+//! [`core`] has floating-point functionality which we should not be compiling
+//! in. We will work with upstream [`core`] to provide feature flags to disable
+//! the parts we do not need. For the moment, we define them to [`panic!`] at
+//! runtime for simplicity to catch mistakes, instead of performing surgery
+//! on `core.o`.
+//!
+//! In any case, all these symbols are weakened to ensure we do not override
+//! those that may be provided by the rest of the kernel.
+//!
+//! [`compiler_builtins`]: https://github.com/rust-lang/compiler-builtins
+//! [`compiler-rt`]: https://compiler-rt.llvm.org/
+
+#![feature(compiler_builtins)]
+#![compiler_builtins]
+#![no_builtins]
+#![no_std]
+
+macro_rules! define_panicking_intrinsics(
+ ($reason: tt, { $($ident: ident, )* }) => {
+ $(
+ #[doc(hidden)]
+ #[no_mangle]
+ pub extern "C" fn $ident() {
+ panic!($reason);
+ }
+ )*
+ }
+);
+
+define_panicking_intrinsics!("`f32` should not be used", {
+ __addsf3,
+ __addsf3vfp,
+ __aeabi_fcmpeq,
+ __aeabi_ul2f,
+ __divsf3,
+ __divsf3vfp,
+ __eqsf2,
+ __eqsf2vfp,
+ __fixsfdi,
+ __fixsfsi,
+ __fixsfti,
+ __fixunssfdi,
+ __fixunssfsi,
+ __fixunssfti,
+ __floatdisf,
+ __floatsisf,
+ __floattisf,
+ __floatundisf,
+ __floatunsisf,
+ __floatuntisf,
+ __gesf2,
+ __gesf2vfp,
+ __gtsf2,
+ __gtsf2vfp,
+ __lesf2,
+ __lesf2vfp,
+ __ltsf2,
+ __ltsf2vfp,
+ __mulsf3,
+ __mulsf3vfp,
+ __nesf2,
+ __nesf2vfp,
+ __powisf2,
+ __subsf3,
+ __subsf3vfp,
+ __unordsf2,
+});
+
+define_panicking_intrinsics!("`f64` should not be used", {
+ __adddf3,
+ __adddf3vfp,
+ __aeabi_dcmpeq,
+ __aeabi_ul2d,
+ __divdf3,
+ __divdf3vfp,
+ __eqdf2,
+ __eqdf2vfp,
+ __fixdfdi,
+ __fixdfsi,
+ __fixdfti,
+ __fixunsdfdi,
+ __fixunsdfsi,
+ __fixunsdfti,
+ __floatdidf,
+ __floatsidf,
+ __floattidf,
+ __floatundidf,
+ __floatunsidf,
+ __floatuntidf,
+ __gedf2,
+ __gedf2vfp,
+ __gtdf2,
+ __gtdf2vfp,
+ __ledf2,
+ __ledf2vfp,
+ __ltdf2,
+ __ltdf2vfp,
+ __muldf3,
+ __muldf3vfp,
+ __nedf2,
+ __nedf2vfp,
+ __powidf2,
+ __subdf3,
+ __subdf3vfp,
+ __unorddf2,
+});
+
+define_panicking_intrinsics!("`i128` should not be used", {
+ __ashrti3,
+ __muloti4,
+ __multi3,
+});
+
+define_panicking_intrinsics!("`u128` should not be used", {
+ __ashlti3,
+ __lshrti3,
+ __udivmodti4,
+ __udivti3,
+ __umodti3,
+});
+
+#[cfg(target_arch = "arm")]
+define_panicking_intrinsics!("`u64` division/modulo should not be used", {
+ __aeabi_uldivmod,
+ __mulodi4,
+});
+
+extern "C" {
+ fn rust_helper_BUG() -> !;
+}
+
+#[panic_handler]
+fn panic(_info: &core::panic::PanicInfo<'_>) -> ! {
+ unsafe {
+ rust_helper_BUG();
+ }
+}
diff --git a/rust/exports.c b/rust/exports.c
new file mode 100644
index 000000000000..d7dff1b3b919
--- /dev/null
+++ b/rust/exports.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// A hack to export Rust symbols for loadable modules without having to redo
+// the entire `include/linux/export.h` logic in Rust.
+//
+// This requires the Rust's new/future `v0` mangling scheme because the default
+// one ("legacy") uses invalid characters for C identifiers (thus we cannot use
+// the `EXPORT_SYMBOL_*` macros).
+
+#include <linux/module.h>
+
+#define EXPORT_SYMBOL_RUST_GPL(sym) extern int sym; EXPORT_SYMBOL_GPL(sym);
+
+#include "exports_core_generated.h"
+#include "exports_alloc_generated.h"
+#include "exports_kernel_generated.h"
diff --git a/rust/helpers.c b/rust/helpers.c
new file mode 100644
index 000000000000..a6e98abb13ef
--- /dev/null
+++ b/rust/helpers.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bug.h>
+#include <linux/build_bug.h>
+#include <linux/uaccess.h>
+#include <linux/sched/signal.h>
+#include <linux/gfp.h>
+#include <linux/highmem.h>
+#include <linux/uio.h>
+#include <linux/errname.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/security.h>
+
+void rust_helper_BUG(void)
+{
+ BUG();
+}
+
+unsigned long rust_helper_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ return copy_from_user(to, from, n);
+}
+
+unsigned long rust_helper_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ return copy_to_user(to, from, n);
+}
+
+unsigned long rust_helper_clear_user(void __user *to, unsigned long n)
+{
+ return clear_user(to, n);
+}
+
+void rust_helper_spin_lock_init(spinlock_t *lock, const char *name,
+ struct lock_class_key *key)
+{
+#ifdef CONFIG_DEBUG_SPINLOCK
+ __spin_lock_init(lock, name, key);
+#else
+ spin_lock_init(lock);
+#endif
+}
+EXPORT_SYMBOL_GPL(rust_helper_spin_lock_init);
+
+void rust_helper_spin_lock(spinlock_t *lock)
+{
+ spin_lock(lock);
+}
+EXPORT_SYMBOL_GPL(rust_helper_spin_lock);
+
+void rust_helper_spin_unlock(spinlock_t *lock)
+{
+ spin_unlock(lock);
+}
+EXPORT_SYMBOL_GPL(rust_helper_spin_unlock);
+
+void rust_helper_init_wait(struct wait_queue_entry *wq_entry)
+{
+ init_wait(wq_entry);
+}
+EXPORT_SYMBOL_GPL(rust_helper_init_wait);
+
+int rust_helper_signal_pending(struct task_struct *t)
+{
+ return signal_pending(t);
+}
+EXPORT_SYMBOL_GPL(rust_helper_signal_pending);
+
+struct page *rust_helper_alloc_pages(gfp_t gfp_mask, unsigned int order)
+{
+ return alloc_pages(gfp_mask, order);
+}
+EXPORT_SYMBOL_GPL(rust_helper_alloc_pages);
+
+void *rust_helper_kmap(struct page *page)
+{
+ return kmap(page);
+}
+EXPORT_SYMBOL_GPL(rust_helper_kmap);
+
+void rust_helper_kunmap(struct page *page)
+{
+ return kunmap(page);
+}
+EXPORT_SYMBOL_GPL(rust_helper_kunmap);
+
+int rust_helper_cond_resched(void)
+{
+ return cond_resched();
+}
+EXPORT_SYMBOL_GPL(rust_helper_cond_resched);
+
+size_t rust_helper_copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
+{
+ return copy_from_iter(addr, bytes, i);
+}
+EXPORT_SYMBOL_GPL(rust_helper_copy_from_iter);
+
+size_t rust_helper_copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
+{
+ return copy_to_iter(addr, bytes, i);
+}
+EXPORT_SYMBOL_GPL(rust_helper_copy_to_iter);
+
+bool rust_helper_is_err(__force const void *ptr)
+{
+ return IS_ERR(ptr);
+}
+EXPORT_SYMBOL_GPL(rust_helper_is_err);
+
+long rust_helper_ptr_err(__force const void *ptr)
+{
+ return PTR_ERR(ptr);
+}
+EXPORT_SYMBOL_GPL(rust_helper_ptr_err);
+
+const char *rust_helper_errname(int err)
+{
+ return errname(err);
+}
+
+void rust_helper_mutex_lock(struct mutex *lock)
+{
+ mutex_lock(lock);
+}
+EXPORT_SYMBOL_GPL(rust_helper_mutex_lock);
+
+void *
+rust_helper_platform_get_drvdata(const struct platform_device *pdev)
+{
+ return platform_get_drvdata(pdev);
+}
+EXPORT_SYMBOL_GPL(rust_helper_platform_get_drvdata);
+
+void
+rust_helper_platform_set_drvdata(struct platform_device *pdev,
+ void *data)
+{
+ return platform_set_drvdata(pdev, data);
+}
+EXPORT_SYMBOL_GPL(rust_helper_platform_set_drvdata);
+
+refcount_t rust_helper_refcount_new(void)
+{
+ return (refcount_t)REFCOUNT_INIT(1);
+}
+EXPORT_SYMBOL_GPL(rust_helper_refcount_new);
+
+void rust_helper_refcount_inc(refcount_t *r)
+{
+ refcount_inc(r);
+}
+EXPORT_SYMBOL_GPL(rust_helper_refcount_inc);
+
+bool rust_helper_refcount_dec_and_test(refcount_t *r)
+{
+ return refcount_dec_and_test(r);
+}
+EXPORT_SYMBOL_GPL(rust_helper_refcount_dec_and_test);
+
+void rust_helper_rb_link_node(struct rb_node *node, struct rb_node *parent,
+ struct rb_node **rb_link)
+{
+ rb_link_node(node, parent, rb_link);
+}
+EXPORT_SYMBOL_GPL(rust_helper_rb_link_node);
+
+struct task_struct *rust_helper_get_current(void)
+{
+ return current;
+}
+EXPORT_SYMBOL_GPL(rust_helper_get_current);
+
+void rust_helper_get_task_struct(struct task_struct * t)
+{
+ get_task_struct(t);
+}
+EXPORT_SYMBOL_GPL(rust_helper_get_task_struct);
+
+void rust_helper_put_task_struct(struct task_struct * t)
+{
+ put_task_struct(t);
+}
+EXPORT_SYMBOL_GPL(rust_helper_put_task_struct);
+
+int rust_helper_security_binder_set_context_mgr(struct task_struct *mgr)
+{
+ return security_binder_set_context_mgr(mgr);
+}
+EXPORT_SYMBOL_GPL(rust_helper_security_binder_set_context_mgr);
+
+int rust_helper_security_binder_transaction(struct task_struct *from,
+ struct task_struct *to)
+{
+ return security_binder_transaction(from, to);
+}
+EXPORT_SYMBOL_GPL(rust_helper_security_binder_transaction);
+
+int rust_helper_security_binder_transfer_binder(struct task_struct *from,
+ struct task_struct *to)
+{
+ return security_binder_transfer_binder(from, to);
+}
+EXPORT_SYMBOL_GPL(rust_helper_security_binder_transfer_binder);
+
+int rust_helper_security_binder_transfer_file(struct task_struct *from,
+ struct task_struct *to,
+ struct file *file)
+{
+ return security_binder_transfer_file(from, to, file);
+}
+EXPORT_SYMBOL_GPL(rust_helper_security_binder_transfer_file);
+
+/* We use bindgen's --size_t-is-usize option to bind the C size_t type
+ * as the Rust usize type, so we can use it in contexts where Rust
+ * expects a usize like slice (array) indices. usize is defined to be
+ * the same as C's uintptr_t type (can hold any pointer) but not
+ * necessarily the same as size_t (can hold the size of any single
+ * object). Most modern platforms use the same concrete integer type for
+ * both of them, but in case we find ourselves on a platform where
+ * that's not true, fail early instead of risking ABI or
+ * integer-overflow issues.
+ *
+ * If your platform fails this assertion, it means that you are in
+ * danger of integer-overflow bugs (even if you attempt to remove
+ * --size_t-is-usize). It may be easiest to change the kernel ABI on
+ * your platform such that size_t matches uintptr_t (i.e., to increase
+ * size_t, because uintptr_t has to be at least as big as size_t).
+*/
+static_assert(
+ sizeof(size_t) == sizeof(uintptr_t) &&
+ __alignof__(size_t) == __alignof__(uintptr_t),
+ "Rust code expects C size_t to match Rust usize"
+);
diff --git a/rust/kernel/allocator.rs b/rust/kernel/allocator.rs
new file mode 100644
index 000000000000..759cec47de2b
--- /dev/null
+++ b/rust/kernel/allocator.rs
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Allocator support.
+
+use core::alloc::{GlobalAlloc, Layout};
+use core::ptr;
+
+use crate::bindings;
+use crate::c_types;
+
+pub struct KernelAllocator;
+
+unsafe impl GlobalAlloc for KernelAllocator {
+ unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
+ // `krealloc()` is used instead of `kmalloc()` because the latter is
+ // an inline function and cannot be bound to as a result.
+ unsafe { bindings::krealloc(ptr::null(), layout.size(), bindings::GFP_KERNEL) as *mut u8 }
+ }
+
+ unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) {
+ unsafe {
+ bindings::kfree(ptr as *const c_types::c_void);
+ }
+ }
+}
+
+#[global_allocator]
+static ALLOCATOR: KernelAllocator = KernelAllocator;
+
+// `rustc` only generates these for some crate types. Even then, we would need
+// to extract the object file that has them from the archive. For the moment,
+// let's generate them ourselves instead.
+#[no_mangle]
+pub fn __rust_alloc(size: usize, _align: usize) -> *mut u8 {
+ unsafe { bindings::krealloc(core::ptr::null(), size, bindings::GFP_KERNEL) as *mut u8 }
+}
+
+#[no_mangle]
+pub fn __rust_dealloc(ptr: *mut u8, _size: usize, _align: usize) {
+ unsafe { bindings::kfree(ptr as *const c_types::c_void) };
+}
+
+#[no_mangle]
+pub fn __rust_realloc(ptr: *mut u8, _old_size: usize, _align: usize, new_size: usize) -> *mut u8 {
+ unsafe {
+ bindings::krealloc(
+ ptr as *const c_types::c_void,
+ new_size,
+ bindings::GFP_KERNEL,
+ ) as *mut u8
+ }
+}
+
+#[no_mangle]
+pub fn __rust_alloc_zeroed(size: usize, _align: usize) -> *mut u8 {
+ unsafe {
+ bindings::krealloc(
+ core::ptr::null(),
+ size,
+ bindings::GFP_KERNEL | bindings::__GFP_ZERO,
+ ) as *mut u8
+ }
+}
diff --git a/rust/kernel/bindings.rs b/rust/kernel/bindings.rs
new file mode 100644
index 000000000000..93290926ceca
--- /dev/null
+++ b/rust/kernel/bindings.rs
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Bindings
+//!
+//! Imports the generated bindings by `bindgen`.
+
+// See https://github.com/rust-lang/rust-bindgen/issues/1651.
+#![cfg_attr(test, allow(deref_nullptr))]
+#![cfg_attr(test, allow(unaligned_references))]
+#![cfg_attr(test, allow(unsafe_op_in_unsafe_fn))]
+
+#[allow(
+ clippy::all,
+ non_camel_case_types,
+ non_upper_case_globals,
+ non_snake_case,
+ improper_ctypes,
+ unsafe_op_in_unsafe_fn
+)]
+mod bindings_raw {
+ use crate::c_types;
+ include!(env!("RUST_BINDINGS_FILE"));
+}
+pub use bindings_raw::*;
+
+pub const GFP_KERNEL: gfp_t = BINDINGS_GFP_KERNEL;
+pub const __GFP_ZERO: gfp_t = BINDINGS___GFP_ZERO;
+pub const __GFP_HIGHMEM: gfp_t = ___GFP_HIGHMEM;
diff --git a/rust/kernel/bindings_helper.h b/rust/kernel/bindings_helper.h
new file mode 100644
index 000000000000..c64a6307da37
--- /dev/null
+++ b/rust/kernel/bindings_helper.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <linux/cdev.h>
+#include <linux/errname.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/sysctl.h>
+#include <linux/uaccess.h>
+#include <linux/uio.h>
+#include <linux/version.h>
+#include <linux/miscdevice.h>
+#include <linux/poll.h>
+#include <linux/mm.h>
+#include <linux/file.h>
+#include <uapi/linux/android/binder.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/security.h>
+
+// `bindgen` gets confused at certain things
+const gfp_t BINDINGS_GFP_KERNEL = GFP_KERNEL;
+const gfp_t BINDINGS___GFP_ZERO = __GFP_ZERO;
diff --git a/rust/kernel/buffer.rs b/rust/kernel/buffer.rs
new file mode 100644
index 000000000000..b2502fa968fe
--- /dev/null
+++ b/rust/kernel/buffer.rs
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Struct for writing to a pre-allocated buffer with the [`write!`] macro.
+
+use core::fmt;
+
+/// A pre-allocated buffer that implements [`core::fmt::Write`].
+///
+/// Consecutive writes will append to what has already been written.
+/// Writes that don't fit in the buffer will fail.
+pub struct Buffer<'a> {
+ slice: &'a mut [u8],
+ pos: usize,
+}
+
+impl<'a> Buffer<'a> {
+ /// Create a new buffer from an existing array.
+ pub fn new(slice: &'a mut [u8]) -> Self {
+ Buffer { slice, pos: 0 }
+ }
+
+ /// Number of bytes that have already been written to the buffer.
+ /// This will always be less than the length of the original array.
+ pub fn bytes_written(&self) -> usize {
+ self.pos
+ }
+}
+
+impl<'a> fmt::Write for Buffer<'a> {
+ fn write_str(&mut self, s: &str) -> fmt::Result {
+ if s.len() > self.slice.len() - self.pos {
+ Err(fmt::Error)
+ } else {
+ self.slice[self.pos..self.pos + s.len()].copy_from_slice(s.as_bytes());
+ self.pos += s.len();
+ Ok(())
+ }
+ }
+}
diff --git a/rust/kernel/build_assert.rs b/rust/kernel/build_assert.rs
new file mode 100644
index 000000000000..f726927185c0
--- /dev/null
+++ b/rust/kernel/build_assert.rs
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Build-time assert.
+
+/// Fails the build if the code path calling `build_error!` can possibly be executed.
+///
+/// If the macro is executed in const context, `build_error!` will panic.
+/// If the compiler or optimizer cannot guarantee that `build_error!` can never
+/// be called, a build error will be triggered.
+///
+/// # Examples
+/// ```
+/// # use kernel::build_error;
+/// #[inline]
+/// fn foo(a: usize) -> usize {
+/// a.checked_add(1).unwrap_or_else(|| build_error!("overflow"))
+/// }
+/// ```
+#[macro_export]
+macro_rules! build_error {
+ () => {{
+ $crate::build_error("")
+ }};
+ ($msg:expr) => {{
+ $crate::build_error($msg)
+ }};
+}
+
+/// Asserts that a boolean expression is `true` at compile time.
+///
+/// If the condition is evaluated to `false` in const context, `build_assert!`
+/// will panic. If the compiler or optimizer cannot guarantee the condition will
+/// be evaluated to `true`, a build error will be triggered.
+///
+/// [`static_assert!`] should be preferred to `build_assert!` whenever possible.
+///
+/// # Examples
+///
+/// These examples show that different types of [`assert!`] will trigger errors
+/// at different stage of compilation. It is preferred to err as early as
+/// possible, so [`static_assert!`] should be used whenever possible.
+/// ```compile_fail
+/// # use kernel::prelude::*;
+/// fn foo() {
+/// static_assert!(1 > 1); // Compile-time error
+/// build_assert!(1 > 1); // Build-time error
+/// assert!(1 > 1); // Run-time error
+/// }
+/// ```
+///
+/// When the condition refers to generic parameters or parameters of an inline function,
+/// [`static_assert!`] cannot be used. Use `build_assert!` in this scenario.
+/// ```no_run
+/// # use kernel::prelude::*;
+/// fn foo<const N: usize>() {
+/// // `static_assert!(N > 1);` is not allowed
+/// build_assert!(N > 1); // Build-time check
+/// assert!(N > 1); // Run-time check
+/// }
+///
+/// #[inline]
+/// fn bar(n: usize) {
+/// // `static_assert!(n > 1);` is not allowed
+/// build_assert!(n > 1); // Build-time check
+/// assert!(n > 1); // Run-time check
+/// }
+/// ```
+#[macro_export]
+macro_rules! build_assert {
+ ($cond:expr $(,)?) => {{
+ if !$cond {
+ $crate::build_error(concat!("assertion failed: ", stringify!($cond)));
+ }
+ }};
+ ($cond:expr, $msg:expr) => {{
+ if !$cond {
+ $crate::build_error($msg);
+ }
+ }};
+}
diff --git a/rust/kernel/c_types.rs b/rust/kernel/c_types.rs
new file mode 100644
index 000000000000..07593a3ba8be
--- /dev/null
+++ b/rust/kernel/c_types.rs
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! C types for the bindings.
+//!
+//! The bindings generated by `bindgen` use these types to map to the C ones.
+//!
+//! C's standard integer types may differ in width depending on
+//! the architecture, thus we need to conditionally compile those.
+
+#![allow(non_camel_case_types)]
+
+#[cfg(any(target_arch = "arm", target_arch = "x86", target_arch = "riscv32",))]
+mod c {
+ /// C `void` type.
+ pub type c_void = core::ffi::c_void;
+
+ /// C `char` type.
+ pub type c_char = i8;
+
+ /// C `signed char` type.
+ pub type c_schar = i8;
+
+ /// C `unsigned char` type.
+ pub type c_uchar = u8;
+
+ /// C `short` type.
+ pub type c_short = i16;
+
+ /// C `unsigned short` type.
+ pub type c_ushort = u16;
+
+ /// C `int` type.
+ pub type c_int = i32;
+
+ /// C `unsigned int` type.
+ pub type c_uint = u32;
+
+ /// C `long` type.
+ pub type c_long = i32;
+
+ /// C `unsigned long` type.
+ pub type c_ulong = u32;
+
+ /// C `long long` type.
+ pub type c_longlong = i64;
+
+ /// C `unsigned long long` type.
+ pub type c_ulonglong = u64;
+
+ /// C `ssize_t` type (typically defined in `<sys/types.h>` by POSIX).
+ ///
+ /// For some 32-bit architectures like this one, the kernel defines it as
+ /// `int`, i.e. it is an [`i32`].
+ pub type c_ssize_t = isize;
+
+ /// C `size_t` type (typically defined in `<stddef.h>`).
+ ///
+ /// For some 32-bit architectures like this one, the kernel defines it as
+ /// `unsigned int`, i.e. it is an [`u32`].
+ pub type c_size_t = usize;
+}
+
+#[cfg(any(
+ target_arch = "aarch64",
+ target_arch = "x86_64",
+ target_arch = "powerpc64",
+ target_arch = "riscv64",
+))]
+mod c {
+ /// C `void` type.
+ pub type c_void = core::ffi::c_void;
+
+ /// C `char` type.
+ pub type c_char = i8;
+
+ /// C `signed char` type.
+ pub type c_schar = i8;
+
+ /// C `unsigned char` type.
+ pub type c_uchar = u8;
+
+ /// C `short` type.
+ pub type c_short = i16;
+
+ /// C `unsigned short` type.
+ pub type c_ushort = u16;
+
+ /// C `int` type.
+ pub type c_int = i32;
+
+ /// C `unsigned int` type.
+ pub type c_uint = u32;
+
+ /// C `long` type.
+ pub type c_long = i64;
+
+ /// C `unsigned long` type.
+ pub type c_ulong = u64;
+
+ /// C `long long` type.
+ pub type c_longlong = i64;
+
+ /// C `unsigned long long` type.
+ pub type c_ulonglong = u64;
+
+ /// C `ssize_t` type (typically defined in `<sys/types.h>` by POSIX).
+ ///
+ /// For 64-bit architectures like this one, the kernel defines it as
+ /// `long`, i.e. it is an [`i64`].
+ pub type c_ssize_t = isize;
+
+ /// C `size_t` type (typically defined in `<stddef.h>`).
+ ///
+ /// For 64-bit architectures like this one, the kernel defines it as
+ /// `unsigned long`, i.e. it is an [`u64`].
+ pub type c_size_t = usize;
+}
+
+pub use c::*;
diff --git a/rust/kernel/chrdev.rs b/rust/kernel/chrdev.rs
new file mode 100644
index 000000000000..20e93ec05def
--- /dev/null
+++ b/rust/kernel/chrdev.rs
@@ -0,0 +1,212 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Character devices.
+//!
+//! Also called "char devices", `chrdev`, `cdev`.
+//!
+//! C header: [`include/linux/cdev.h`](../../../../include/linux/cdev.h)
+//!
+//! Reference: <https://www.kernel.org/doc/html/latest/core-api/kernel-api.html#char-devices>
+
+use alloc::boxed::Box;
+use core::convert::TryInto;
+use core::marker::PhantomPinned;
+use core::pin::Pin;
+
+use crate::bindings;
+use crate::c_types;
+use crate::error::{Error, Result};
+use crate::file_operations;
+use crate::str::CStr;
+
+/// Character device.
+///
+/// # Invariants
+///
+/// - [`self.0`] is valid and non-null.
+/// - [`(*self.0).ops`] is valid, non-null and has static lifetime.
+/// - [`(*self.0).owner`] is valid and, if non-null, has module lifetime.
+struct Cdev(*mut bindings::cdev);
+
+impl Cdev {
+ fn alloc(
+ fops: &'static bindings::file_operations,
+ module: &'static crate::ThisModule,
+ ) -> Result<Self> {
+ // SAFETY: FFI call.
+ let cdev = unsafe { bindings::cdev_alloc() };
+ if cdev.is_null() {
+ return Err(Error::ENOMEM);
+ }
+ // SAFETY: `cdev` is valid and non-null since `cdev_alloc()`
+ // returned a valid pointer which was null-checked.
+ unsafe {
+ (*cdev).ops = fops;
+ (*cdev).owner = module.0;
+ }
+ // INVARIANTS:
+ // - [`self.0`] is valid and non-null.
+ // - [`(*self.0).ops`] is valid, non-null and has static lifetime,
+ // because it was coerced from a reference with static lifetime.
+ // - [`(*self.0).owner`] is valid and, if non-null, has module lifetime,
+ // guaranteed by the [`ThisModule`] invariant.
+ Ok(Self(cdev))
+ }
+
+ fn add(&mut self, dev: bindings::dev_t, count: c_types::c_uint) -> Result {
+ // SAFETY: according to the type invariants:
+ // - [`self.0`] can be safely passed to [`bindings::cdev_add`].
+ // - [`(*self.0).ops`] will live at least as long as [`self.0`].
+ // - [`(*self.0).owner`] will live at least as long as the
+ // module, which is an implicit requirement.
+ let rc = unsafe { bindings::cdev_add(self.0, dev, count) };
+ if rc != 0 {
+ return Err(Error::from_kernel_errno(rc));
+ }
+ Ok(())
+ }
+}
+
+impl Drop for Cdev {
+ fn drop(&mut self) {
+ // SAFETY: [`self.0`] is valid and non-null by the type invariants.
+ unsafe {
+ bindings::cdev_del(self.0);
+ }
+ }
+}
+
+struct RegistrationInner<const N: usize> {
+ dev: bindings::dev_t,
+ used: usize,
+ cdevs: [Option<Cdev>; N],
+ _pin: PhantomPinned,
+}
+
+/// Character device registration.
+///
+/// May contain up to a fixed number (`N`) of devices. Must be pinned.
+pub struct Registration<const N: usize> {
+ name: &'static CStr,
+ minors_start: u16,
+ this_module: &'static crate::ThisModule,
+ inner: Option<RegistrationInner<N>>,
+}
+
+impl<const N: usize> Registration<{ N }> {
+ /// Creates a [`Registration`] object for a character device.
+ ///
+ /// This does *not* register the device: see [`Self::register()`].
+ ///
+ /// This associated function is intended to be used when you need to avoid
+ /// a memory allocation, e.g. when the [`Registration`] is a member of
+ /// a bigger structure inside your [`crate::KernelModule`] instance. If you
+ /// are going to pin the registration right away, call
+ /// [`Self::new_pinned()`] instead.
+ pub fn new(
+ name: &'static CStr,
+ minors_start: u16,
+ this_module: &'static crate::ThisModule,
+ ) -> Self {
+ Registration {
+ name,
+ minors_start,
+ this_module,
+ inner: None,
+ }
+ }
+
+ /// Creates a pinned [`Registration`] object for a character device.
+ ///
+ /// This does *not* register the device: see [`Self::register()`].
+ pub fn new_pinned(
+ name: &'static CStr,
+ minors_start: u16,
+ this_module: &'static crate::ThisModule,
+ ) -> Result<Pin<Box<Self>>> {
+ Ok(Pin::from(Box::try_new(Self::new(
+ name,
+ minors_start,
+ this_module,
+ ))?))
+ }
+
+ /// Registers a character device.
+ ///
+ /// You may call this once per device type, up to `N` times.
+ pub fn register<T: file_operations::FileOpener<()>>(self: Pin<&mut Self>) -> Result {
+ // SAFETY: We must ensure that we never move out of `this`.
+ let this = unsafe { self.get_unchecked_mut() };
+ if this.inner.is_none() {
+ let mut dev: bindings::dev_t = 0;
+ // SAFETY: Calling unsafe function. `this.name` has `'static`
+ // lifetime.
+ let res = unsafe {
+ bindings::alloc_chrdev_region(
+ &mut dev,
+ this.minors_start.into(),
+ N.try_into()?,
+ this.name.as_char_ptr(),
+ )
+ };
+ if res != 0 {
+ return Err(Error::from_kernel_errno(res));
+ }
+ const NONE: Option<Cdev> = None;
+ this.inner = Some(RegistrationInner {
+ dev,
+ used: 0,
+ cdevs: [NONE; N],
+ _pin: PhantomPinned,
+ });
+ }
+
+ let mut inner = this.inner.as_mut().unwrap();
+ if inner.used == N {
+ return Err(Error::EINVAL);
+ }
+
+ // SAFETY: The adapter doesn't retrieve any state yet, so it's compatible with any
+ // registration.
+ let fops = unsafe { file_operations::FileOperationsVtable::<Self, T>::build() };
+ let mut cdev = Cdev::alloc(fops, this.this_module)?;
+ cdev.add(inner.dev + inner.used as bindings::dev_t, 1)?;
+ inner.cdevs[inner.used].replace(cdev);
+ inner.used += 1;
+ Ok(())
+ }
+}
+
+impl<const N: usize> file_operations::FileOpenAdapter for Registration<{ N }> {
+ type Arg = ();
+
+ unsafe fn convert(
+ _inode: *mut bindings::inode,
+ _file: *mut bindings::file,
+ ) -> *const Self::Arg {
+ // TODO: Update the SAFETY comment on the call to `FileOperationsVTable::build` above once
+ // this is updated to retrieve state.
+ &()
+ }
+}
+
+// SAFETY: `Registration` does not expose any of its state across threads
+// (it is fine for multiple threads to have a shared reference to it).
+unsafe impl<const N: usize> Sync for Registration<{ N }> {}
+
+impl<const N: usize> Drop for Registration<{ N }> {
+ fn drop(&mut self) {
+ if let Some(inner) = self.inner.as_mut() {
+ // Replicate kernel C behaviour: drop [`Cdev`]s before calling
+ // [`bindings::unregister_chrdev_region`].
+ for i in 0..inner.used {
+ inner.cdevs[i].take();
+ }
+ // SAFETY: [`self.inner`] is Some, so [`inner.dev`] was previously
+ // created using [`bindings::alloc_chrdev_region`].
+ unsafe {
+ bindings::unregister_chrdev_region(inner.dev, N.try_into().unwrap());
+ }
+ }
+ }
+}
diff --git a/rust/kernel/error.rs b/rust/kernel/error.rs
new file mode 100644
index 000000000000..df7ba6a36610
--- /dev/null
+++ b/rust/kernel/error.rs
@@ -0,0 +1,272 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Kernel errors.
+//!
+//! C header: [`include/uapi/asm-generic/errno-base.h`](../../../include/uapi/asm-generic/errno-base.h)
+
+use crate::str::CStr;
+use crate::{bindings, c_types};
+use alloc::{alloc::AllocError, collections::TryReserveError};
+use core::convert::From;
+use core::fmt;
+use core::num::TryFromIntError;
+use core::str::{self, Utf8Error};
+
+/// Generic integer kernel error.
+///
+/// The kernel defines a set of integer generic error codes based on C and
+/// POSIX ones. These codes may have a more specific meaning in some contexts.
+///
+/// # Invariants
+///
+/// The value is a valid `errno` (i.e. `>= -MAX_ERRNO && < 0`).
+#[derive(Clone, Copy, PartialEq, Eq)]
+pub struct Error(c_types::c_int);
+
+impl Error {
+ /// Invalid argument.
+ pub const EINVAL: Self = Error(-(bindings::EINVAL as i32));
+
+ /// Out of memory.
+ pub const ENOMEM: Self = Error(-(bindings::ENOMEM as i32));
+
+ /// Bad address.
+ pub const EFAULT: Self = Error(-(bindings::EFAULT as i32));
+
+ /// Illegal seek.
+ pub const ESPIPE: Self = Error(-(bindings::ESPIPE as i32));
+
+ /// Try again.
+ pub const EAGAIN: Self = Error(-(bindings::EAGAIN as i32));
+
+ /// Device or resource busy.
+ pub const EBUSY: Self = Error(-(bindings::EBUSY as i32));
+
+ /// Restart the system call.
+ pub const ERESTARTSYS: Self = Error(-(bindings::ERESTARTSYS as i32));
+
+ /// Operation not permitted.
+ pub const EPERM: Self = Error(-(bindings::EPERM as i32));
+
+ /// No such process.
+ pub const ESRCH: Self = Error(-(bindings::ESRCH as i32));
+
+ /// No such file or directory.
+ pub const ENOENT: Self = Error(-(bindings::ENOENT as i32));
+
+ /// Interrupted system call.
+ pub const EINTR: Self = Error(-(bindings::EINTR as i32));
+
+ /// Bad file number.
+ pub const EBADF: Self = Error(-(bindings::EBADF as i32));
+
+ /// Creates an [`Error`] from a kernel error code.
+ ///
+ /// It is a bug to pass an out-of-range `errno`. `EINVAL` would
+ /// be returned in such a case.
+ pub(crate) fn from_kernel_errno(errno: c_types::c_int) -> Error {
+ if errno < -(bindings::MAX_ERRNO as i32) || errno >= 0 {
+ // TODO: make it a `WARN_ONCE` once available.
+ crate::pr_warn!(
+ "attempted to create `Error` with out of range `errno`: {}",
+ errno
+ );
+ return Error::EINVAL;
+ }
+
+ // INVARIANT: the check above ensures the type invariant
+ // will hold.
+ Error(errno)
+ }
+
+ /// Creates an [`Error`] from a kernel error code.
+ ///
+ /// # Safety
+ ///
+ /// `errno` must be within error code range (i.e. `>= -MAX_ERRNO && < 0`).
+ pub(crate) unsafe fn from_kernel_errno_unchecked(errno: c_types::c_int) -> Error {
+ // INVARIANT: the contract ensures the type invariant
+ // will hold.
+ Error(errno)
+ }
+
+ /// Returns the kernel error code.
+ pub fn to_kernel_errno(self) -> c_types::c_int {
+ self.0
+ }
+}
+
+impl fmt::Debug for Error {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ extern "C" {
+ fn rust_helper_errname(err: c_types::c_int) -> *const c_types::c_char;
+ }
+ // SAFETY: FFI call.
+ let name = unsafe { rust_helper_errname(-self.0) };
+
+ if name.is_null() {
+ // Print out number if no name can be found.
+ return f.debug_tuple("Error").field(&-self.0).finish();
+ }
+
+ // SAFETY: `'static` string from C, and is not NULL.
+ let cstr = unsafe { CStr::from_char_ptr(name) };
+ // SAFETY: These strings are ASCII-only.
+ let str = unsafe { str::from_utf8_unchecked(cstr) };
+ f.debug_tuple(str).finish()
+ }
+}
+
+impl From<TryFromIntError> for Error {
+ fn from(_: TryFromIntError) -> Error {
+ Error::EINVAL
+ }
+}
+
+impl From<Utf8Error> for Error {
+ fn from(_: Utf8Error) -> Error {
+ Error::EINVAL
+ }
+}
+
+impl From<TryReserveError> for Error {
+ fn from(_: TryReserveError) -> Error {
+ Error::ENOMEM
+ }
+}
+
+/// A [`Result`] with an [`Error`] error type.
+///
+/// To be used as the return type for functions that may fail.
+///
+/// # Error codes in C and Rust
+///
+/// In C, it is common that functions indicate success or failure through
+/// their return value; modifying or returning extra data through non-`const`
+/// pointer parameters. In particular, in the kernel, functions that may fail
+/// typically return an `int` that represents a generic error code. We model
+/// those as [`Error`].
+///
+/// In Rust, it is idiomatic to model functions that may fail as returning
+/// a [`Result`]. Since in the kernel many functions return an error code,
+/// [`Result`] is a type alias for a [`core::result::Result`] that uses
+/// [`Error`] as its error type.
+///
+/// Note that even if a function does not return anything when it succeeds,
+/// it should still be modeled as returning a `Result` rather than
+/// just an [`Error`].
+pub type Result<T = ()> = core::result::Result<T, Error>;
+
+impl From<AllocError> for Error {
+ fn from(_: AllocError) -> Error {
+ Error::ENOMEM
+ }
+}
+
+// # Invariant: `-bindings::MAX_ERRNO` fits in an `i16`.
+crate::static_assert!(bindings::MAX_ERRNO <= -(i16::MIN as i32) as u32);
+
+#[doc(hidden)]
+pub fn from_kernel_result_helper<T>(r: Result<T>) -> T
+where
+ T: From<i16>,
+{
+ match r {
+ Ok(v) => v,
+ // NO-OVERFLOW: negative `errno`s are no smaller than `-bindings::MAX_ERRNO`,
+ // `-bindings::MAX_ERRNO` fits in an `i16` as per invariant above,
+ // therefore a negative `errno` always fits in an `i16` and will not overflow.
+ Err(e) => T::from(e.to_kernel_errno() as i16),
+ }
+}
+
+/// Transforms a [`crate::error::Result<T>`] to a kernel C integer result.
+///
+/// This is useful when calling Rust functions that return [`crate::error::Result<T>`]
+/// from inside `extern "C"` functions that need to return an integer
+/// error result.
+///
+/// `T` should be convertible to an `i16` via `From<i16>`.
+///
+/// # Examples
+///
+/// ```ignore
+/// # use kernel::from_kernel_result;
+/// # use kernel::c_types;
+/// # use kernel::bindings;
+/// unsafe extern "C" fn probe_callback(
+/// pdev: *mut bindings::platform_device,
+/// ) -> c_types::c_int {
+/// from_kernel_result! {
+/// let ptr = devm_alloc(pdev)?;
+/// rust_helper_platform_set_drvdata(pdev, ptr);
+/// Ok(0)
+/// }
+/// }
+/// ```
+#[macro_export]
+macro_rules! from_kernel_result {
+ ($($tt:tt)*) => {{
+ $crate::error::from_kernel_result_helper((|| {
+ $($tt)*
+ })())
+ }};
+}
+
+/// Transform a kernel "error pointer" to a normal pointer.
+///
+/// Some kernel C API functions return an "error pointer" which optionally
+/// embeds an `errno`. Callers are supposed to check the returned pointer
+/// for errors. This function performs the check and converts the "error pointer"
+/// to a normal pointer in an idiomatic fashion.
+///
+/// # Examples
+///
+/// ```ignore
+/// # use kernel::prelude::*;
+/// # use kernel::from_kernel_err_ptr;
+/// # use kernel::c_types;
+/// # use kernel::bindings;
+/// fn devm_platform_ioremap_resource(
+/// pdev: &mut PlatformDevice,
+/// index: u32,
+/// ) -> Result<*mut c_types::c_void> {
+/// // SAFETY: FFI call.
+/// unsafe {
+/// from_kernel_err_ptr(bindings::devm_platform_ioremap_resource(
+/// pdev.to_ptr(),
+/// index,
+/// ))
+/// }
+/// }
+/// ```
+// TODO: remove `dead_code` marker once an in-kernel client is available.
+#[allow(dead_code)]
+pub(crate) fn from_kernel_err_ptr<T>(ptr: *mut T) -> Result<*mut T> {
+ extern "C" {
+ #[allow(improper_ctypes)]
+ fn rust_helper_is_err(ptr: *const c_types::c_void) -> bool;
+
+ #[allow(improper_ctypes)]
+ fn rust_helper_ptr_err(ptr: *const c_types::c_void) -> c_types::c_long;
+ }
+
+ // CAST: casting a pointer to `*const c_types::c_void` is always valid.
+ let const_ptr: *const c_types::c_void = ptr.cast();
+ // SAFETY: the FFI function does not deref the pointer.
+ if unsafe { rust_helper_is_err(const_ptr) } {
+ // SAFETY: the FFI function does not deref the pointer.
+ let err = unsafe { rust_helper_ptr_err(const_ptr) };
+ // CAST: if `rust_helper_is_err()` returns `true`,
+ // then `rust_helper_ptr_err()` is guaranteed to return a
+ // negative value greater-or-equal to `-bindings::MAX_ERRNO`,
+ // which always fits in an `i16`, as per the invariant above.
+ // And an `i16` always fits in an `i32`. So casting `err` to
+ // an `i32` can never overflow, and is always valid.
+ //
+ // SAFETY: `rust_helper_is_err()` ensures `err` is a
+ // negative value greater-or-equal to `-bindings::MAX_ERRNO`
+ return Err(unsafe { Error::from_kernel_errno_unchecked(err as i32) });
+ }
+ Ok(ptr)
+}
diff --git a/rust/kernel/file.rs b/rust/kernel/file.rs
new file mode 100644
index 000000000000..091b3a4306c5
--- /dev/null
+++ b/rust/kernel/file.rs
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Files and file descriptors.
+//!
+//! C headers: [`include/linux/fs.h`](../../../../include/linux/fs.h) and
+//! [`include/linux/file.h`](../../../../include/linux/file.h)
+
+use crate::{bindings, error::Error, Result};
+use core::{mem::ManuallyDrop, ops::Deref};
+
+/// Wraps the kernel's `struct file`.
+///
+/// # Invariants
+///
+/// The pointer `File::ptr` is non-null and valid. Its reference count is also non-zero.
+pub struct File {
+ pub(crate) ptr: *mut bindings::file,
+}
+
+impl File {
+ /// Constructs a new [`struct file`] wrapper from a file descriptor.
+ ///
+ /// The file descriptor belongs to the current process.
+ pub fn from_fd(fd: u32) -> Result<Self> {
+ // SAFETY: FFI call, there are no requirements on `fd`.
+ let ptr = unsafe { bindings::fget(fd) };
+ if ptr.is_null() {
+ return Err(Error::EBADF);
+ }
+
+ // INVARIANTS: We checked that `ptr` is non-null, so it is valid. `fget` increments the ref
+ // count before returning.
+ Ok(Self { ptr })
+ }
+
+ /// Returns the current seek/cursor/pointer position (`struct file::f_pos`).
+ pub fn pos(&self) -> u64 {
+ // SAFETY: `File::ptr` is guaranteed to be valid by the type invariants.
+ unsafe { (*self.ptr).f_pos as u64 }
+ }
+
+ /// Returns whether the file is in blocking mode.
+ pub fn is_blocking(&self) -> bool {
+ // SAFETY: `File::ptr` is guaranteed to be valid by the type invariants.
+ unsafe { (*self.ptr).f_flags & bindings::O_NONBLOCK == 0 }
+ }
+}
+
+impl Drop for File {
+ fn drop(&mut self) {
+ // SAFETY: The type invariants guarantee that `File::ptr` has a non-zero reference count.
+ unsafe { bindings::fput(self.ptr) };
+ }
+}
+
+/// A wrapper for [`File`] that doesn't automatically decrement the refcount when dropped.
+///
+/// We need the wrapper because [`ManuallyDrop`] alone would allow callers to call
+/// [`ManuallyDrop::into_inner`]. This would allow an unsafe sequence to be triggered without
+/// `unsafe` blocks because it would trigger an unbalanced call to `fput`.
+///
+/// # Invariants
+///
+/// The wrapped [`File`] remains valid for the lifetime of the object.
+pub(crate) struct FileRef(ManuallyDrop<File>);
+
+impl FileRef {
+ /// Constructs a new [`struct file`] wrapper that doesn't change its reference count.
+ ///
+ /// # Safety
+ ///
+ /// The pointer `ptr` must be non-null and valid for the lifetime of the object.
+ pub(crate) unsafe fn from_ptr(ptr: *mut bindings::file) -> Self {
+ Self(ManuallyDrop::new(File { ptr }))
+ }
+}
+
+impl Deref for FileRef {
+ type Target = File;
+
+ fn deref(&self) -> &Self::Target {
+ self.0.deref()
+ }
+}
+
+/// A file descriptor reservation.
+///
+/// This allows the creation of a file descriptor in two steps: first, we reserve a slot for it,
+/// then we commit or drop the reservation. The first step may fail (e.g., the current process ran
+/// out of available slots), but commit and drop never fail (and are mutually exclusive).
+pub struct FileDescriptorReservation {
+ fd: u32,
+}
+
+impl FileDescriptorReservation {
+ /// Creates a new file descriptor reservation.
+ pub fn new(flags: u32) -> Result<Self> {
+ let fd = unsafe { bindings::get_unused_fd_flags(flags) };
+ if fd < 0 {
+ return Err(Error::from_kernel_errno(fd));
+ }
+ Ok(Self { fd: fd as _ })
+ }
+
+ /// Returns the file descriptor number that was reserved.
+ pub fn reserved_fd(&self) -> u32 {
+ self.fd
+ }
+
+ /// Commits the reservation.
+ ///
+ /// The previously reserved file descriptor is bound to `file`.
+ pub fn commit(self, file: File) {
+ // SAFETY: `self.fd` was previously returned by `get_unused_fd_flags`, and `file.ptr` is
+ // guaranteed to have an owned ref count by its type invariants.
+ unsafe { bindings::fd_install(self.fd, file.ptr) };
+
+ // `fd_install` consumes both the file descriptor and the file reference, so we cannot run
+ // the destructors.
+ core::mem::forget(self);
+ core::mem::forget(file);
+ }
+}
+
+impl Drop for FileDescriptorReservation {
+ fn drop(&mut self) {
+ // SAFETY: `self.fd` was returned by a previous call to `get_unused_fd_flags`.
+ unsafe { bindings::put_unused_fd(self.fd) };
+ }
+}
diff --git a/rust/kernel/file_operations.rs b/rust/kernel/file_operations.rs
new file mode 100644
index 000000000000..b866b6668561
--- /dev/null
+++ b/rust/kernel/file_operations.rs
@@ -0,0 +1,698 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! File operations.
+//!
+//! C header: [`include/linux/fs.h`](../../../../include/linux/fs.h)
+
+use core::convert::{TryFrom, TryInto};
+use core::{marker, mem, ops::Deref, ptr};
+
+use alloc::boxed::Box;
+
+use crate::{
+ bindings, c_types,
+ error::{Error, Result},
+ file::{File, FileRef},
+ from_kernel_result,
+ io_buffer::{IoBufferReader, IoBufferWriter},
+ iov_iter::IovIter,
+ sync::CondVar,
+ types::PointerWrapper,
+ user_ptr::{UserSlicePtr, UserSlicePtrReader, UserSlicePtrWriter},
+};
+
+/// Wraps the kernel's `struct poll_table_struct`.
+///
+/// # Invariants
+///
+/// The pointer `PollTable::ptr` is null or valid.
+pub struct PollTable {
+ ptr: *mut bindings::poll_table_struct,
+}
+
+impl PollTable {
+ /// Constructors a new `struct poll_table_struct` wrapper.
+ ///
+ /// # Safety
+ ///
+ /// The pointer `ptr` must be either null or a valid pointer for the lifetime of the object.
+ unsafe fn from_ptr(ptr: *mut bindings::poll_table_struct) -> Self {
+ Self { ptr }
+ }
+
+ /// Associates the given file and condition variable to this poll table. It means notifying the
+ /// condition variable will notify the poll table as well; additionally, the association
+ /// between the condition variable and the file will automatically be undone by the kernel when
+ /// the file is destructed. To unilaterally remove the association before then, one can call
+ /// [`CondVar::free_waiters`].
+ ///
+ /// # Safety
+ ///
+ /// If the condition variable is destroyed before the file, then [`CondVar::free_waiters`] must
+ /// be called to ensure that all waiters are flushed out.
+ pub unsafe fn register_wait<'a>(&self, file: &'a File, cv: &'a CondVar) {
+ if self.ptr.is_null() {
+ return;
+ }
+
+ // SAFETY: `PollTable::ptr` is guaranteed to be valid by the type invariants and the null
+ // check above.
+ let table = unsafe { &*self.ptr };
+ if let Some(proc) = table._qproc {
+ // SAFETY: All pointers are known to be valid.
+ unsafe { proc(file.ptr as _, cv.wait_list.get(), self.ptr) }
+ }
+ }
+}
+
+/// Equivalent to [`std::io::SeekFrom`].
+///
+/// [`std::io::SeekFrom`]: https://doc.rust-lang.org/std/io/enum.SeekFrom.html
+pub enum SeekFrom {
+ /// Equivalent to C's `SEEK_SET`.
+ Start(u64),
+
+ /// Equivalent to C's `SEEK_END`.
+ End(i64),
+
+ /// Equivalent to C's `SEEK_CUR`.
+ Current(i64),
+}
+
+unsafe extern "C" fn open_callback<A: FileOpenAdapter, T: FileOpener<A::Arg>>(
+ inode: *mut bindings::inode,
+ file: *mut bindings::file,
+) -> c_types::c_int {
+ from_kernel_result! {
+ let arg = unsafe { A::convert(inode, file) };
+ let ptr = T::open(unsafe { &*arg })?.into_pointer();
+ unsafe { (*file).private_data = ptr as *mut c_types::c_void };
+ Ok(0)
+ }
+}
+
+unsafe extern "C" fn read_callback<T: FileOperations>(
+ file: *mut bindings::file,
+ buf: *mut c_types::c_char,
+ len: c_types::c_size_t,
+ offset: *mut bindings::loff_t,
+) -> c_types::c_ssize_t {
+ from_kernel_result! {
+ let mut data = unsafe { UserSlicePtr::new(buf as *mut c_types::c_void, len).writer() };
+ // SAFETY: `private_data` was initialised by `open_callback` with a value returned by
+ // `T::Wrapper::into_pointer`. `T::Wrapper::from_pointer` is only called by the `release`
+ // callback, which the C API guarantees that will be called only when all references to
+ // `file` have been released, so we know it can't be called while this function is running.
+ let f = unsafe { T::Wrapper::borrow((*file).private_data) };
+ // No `FMODE_UNSIGNED_OFFSET` support, so `offset` must be in [0, 2^63).
+ // See discussion in https://github.com/fishinabarrel/linux-kernel-module-rust/pull/113
+ let read = T::read(&f, unsafe { &FileRef::from_ptr(file) }, &mut data, unsafe { *offset }.try_into()?)?;
+ unsafe { (*offset) += bindings::loff_t::try_from(read).unwrap() };
+ Ok(read as _)
+ }
+}
+
+unsafe extern "C" fn read_iter_callback<T: FileOperations>(
+ iocb: *mut bindings::kiocb,
+ raw_iter: *mut bindings::iov_iter,
+) -> isize {
+ from_kernel_result! {
+ let mut iter = unsafe { IovIter::from_ptr(raw_iter) };
+ let file = unsafe { (*iocb).ki_filp };
+ let offset = unsafe { (*iocb).ki_pos };
+ // SAFETY: `private_data` was initialised by `open_callback` with a value returned by
+ // `T::Wrapper::into_pointer`. `T::Wrapper::from_pointer` is only called by the `release`
+ // callback, which the C API guarantees that will be called only when all references to
+ // `file` have been released, so we know it can't be called while this function is running.
+ let f = unsafe { T::Wrapper::borrow((*file).private_data) };
+ let read = T::read(&f, unsafe { &FileRef::from_ptr(file) }, &mut iter, offset.try_into()?)?;
+ unsafe { (*iocb).ki_pos += bindings::loff_t::try_from(read).unwrap() };
+ Ok(read as _)
+ }
+}
+
+unsafe extern "C" fn write_callback<T: FileOperations>(
+ file: *mut bindings::file,
+ buf: *const c_types::c_char,
+ len: c_types::c_size_t,
+ offset: *mut bindings::loff_t,
+) -> c_types::c_ssize_t {
+ from_kernel_result! {
+ let mut data = unsafe { UserSlicePtr::new(buf as *mut c_types::c_void, len).reader() };
+ // SAFETY: `private_data` was initialised by `open_callback` with a value returned by
+ // `T::Wrapper::into_pointer`. `T::Wrapper::from_pointer` is only called by the `release`
+ // callback, which the C API guarantees that will be called only when all references to
+ // `file` have been released, so we know it can't be called while this function is running.
+ let f = unsafe { T::Wrapper::borrow((*file).private_data) };
+ // No `FMODE_UNSIGNED_OFFSET` support, so `offset` must be in [0, 2^63).
+ // See discussion in https://github.com/fishinabarrel/linux-kernel-module-rust/pull/113
+ let written = T::write(&f, unsafe { &FileRef::from_ptr(file) }, &mut data, unsafe { *offset }.try_into()?)?;
+ unsafe { (*offset) += bindings::loff_t::try_from(written).unwrap() };
+ Ok(written as _)
+ }
+}
+
+unsafe extern "C" fn write_iter_callback<T: FileOperations>(
+ iocb: *mut bindings::kiocb,
+ raw_iter: *mut bindings::iov_iter,
+) -> isize {
+ from_kernel_result! {
+ let mut iter = unsafe { IovIter::from_ptr(raw_iter) };
+ let file = unsafe { (*iocb).ki_filp };
+ let offset = unsafe { (*iocb).ki_pos };
+ // SAFETY: `private_data` was initialised by `open_callback` with a value returned by
+ // `T::Wrapper::into_pointer`. `T::Wrapper::from_pointer` is only called by the `release`
+ // callback, which the C API guarantees that will be called only when all references to
+ // `file` have been released, so we know it can't be called while this function is running.
+ let f = unsafe { T::Wrapper::borrow((*file).private_data) };
+ let written = T::write(&f, unsafe { &FileRef::from_ptr(file) }, &mut iter, offset.try_into()?)?;
+ unsafe { (*iocb).ki_pos += bindings::loff_t::try_from(written).unwrap() };
+ Ok(written as _)
+ }
+}
+
+unsafe extern "C" fn release_callback<T: FileOperations>(
+ _inode: *mut bindings::inode,
+ file: *mut bindings::file,
+) -> c_types::c_int {
+ let ptr = mem::replace(unsafe { &mut (*file).private_data }, ptr::null_mut());
+ T::release(unsafe { T::Wrapper::from_pointer(ptr as _) }, unsafe {
+ &FileRef::from_ptr(file)
+ });
+ 0
+}
+
+unsafe extern "C" fn llseek_callback<T: FileOperations>(
+ file: *mut bindings::file,
+ offset: bindings::loff_t,
+ whence: c_types::c_int,
+) -> bindings::loff_t {
+ from_kernel_result! {
+ let off = match whence as u32 {
+ bindings::SEEK_SET => SeekFrom::Start(offset.try_into()?),
+ bindings::SEEK_CUR => SeekFrom::Current(offset),
+ bindings::SEEK_END => SeekFrom::End(offset),
+ _ => return Err(Error::EINVAL),
+ };
+ // SAFETY: `private_data` was initialised by `open_callback` with a value returned by
+ // `T::Wrapper::into_pointer`. `T::Wrapper::from_pointer` is only called by the `release`
+ // callback, which the C API guarantees that will be called only when all references to
+ // `file` have been released, so we know it can't be called while this function is running.
+ let f = unsafe { T::Wrapper::borrow((*file).private_data) };
+ let off = T::seek(&f, unsafe { &FileRef::from_ptr(file) }, off)?;
+ Ok(off as bindings::loff_t)
+ }
+}
+
+unsafe extern "C" fn unlocked_ioctl_callback<T: FileOperations>(
+ file: *mut bindings::file,
+ cmd: c_types::c_uint,
+ arg: c_types::c_ulong,
+) -> c_types::c_long {
+ from_kernel_result! {
+ // SAFETY: `private_data` was initialised by `open_callback` with a value returned by
+ // `T::Wrapper::into_pointer`. `T::Wrapper::from_pointer` is only called by the `release`
+ // callback, which the C API guarantees that will be called only when all references to
+ // `file` have been released, so we know it can't be called while this function is running.
+ let f = unsafe { T::Wrapper::borrow((*file).private_data) };
+ let mut cmd = IoctlCommand::new(cmd as _, arg as _);
+ let ret = T::ioctl(&f, unsafe { &FileRef::from_ptr(file) }, &mut cmd)?;
+ Ok(ret as _)
+ }
+}
+
+unsafe extern "C" fn compat_ioctl_callback<T: FileOperations>(
+ file: *mut bindings::file,
+ cmd: c_types::c_uint,
+ arg: c_types::c_ulong,
+) -> c_types::c_long {
+ from_kernel_result! {
+ // SAFETY: `private_data` was initialised by `open_callback` with a value returned by
+ // `T::Wrapper::into_pointer`. `T::Wrapper::from_pointer` is only called by the `release`
+ // callback, which the C API guarantees that will be called only when all references to
+ // `file` have been released, so we know it can't be called while this function is running.
+ let f = unsafe { T::Wrapper::borrow((*file).private_data) };
+ let mut cmd = IoctlCommand::new(cmd as _, arg as _);
+ let ret = T::compat_ioctl(&f, unsafe { &FileRef::from_ptr(file) }, &mut cmd)?;
+ Ok(ret as _)
+ }
+}
+
+unsafe extern "C" fn mmap_callback<T: FileOperations>(
+ file: *mut bindings::file,
+ vma: *mut bindings::vm_area_struct,
+) -> c_types::c_int {
+ from_kernel_result! {
+ // SAFETY: `private_data` was initialised by `open_callback` with a value returned by
+ // `T::Wrapper::into_pointer`. `T::Wrapper::from_pointer` is only called by the `release`
+ // callback, which the C API guarantees that will be called only when all references to
+ // `file` have been released, so we know it can't be called while this function is running.
+ let f = unsafe { T::Wrapper::borrow((*file).private_data) };
+ T::mmap(&f, unsafe { &FileRef::from_ptr(file) }, unsafe { &mut *vma })?;
+ Ok(0)
+ }
+}
+
+unsafe extern "C" fn fsync_callback<T: FileOperations>(
+ file: *mut bindings::file,
+ start: bindings::loff_t,
+ end: bindings::loff_t,
+ datasync: c_types::c_int,
+) -> c_types::c_int {
+ from_kernel_result! {
+ let start = start.try_into()?;
+ let end = end.try_into()?;
+ let datasync = datasync != 0;
+ // SAFETY: `private_data` was initialised by `open_callback` with a value returned by
+ // `T::Wrapper::into_pointer`. `T::Wrapper::from_pointer` is only called by the `release`
+ // callback, which the C API guarantees that will be called only when all references to
+ // `file` have been released, so we know it can't be called while this function is running.
+ let f = unsafe { T::Wrapper::borrow((*file).private_data) };
+ let res = T::fsync(&f, unsafe { &FileRef::from_ptr(file) }, start, end, datasync)?;
+ Ok(res.try_into().unwrap())
+ }
+}
+
+unsafe extern "C" fn poll_callback<T: FileOperations>(
+ file: *mut bindings::file,
+ wait: *mut bindings::poll_table_struct,
+) -> bindings::__poll_t {
+ // SAFETY: `private_data` was initialised by `open_callback` with a value returned by
+ // `T::Wrapper::into_pointer`. `T::Wrapper::from_pointer` is only called by the `release`
+ // callback, which the C API guarantees that will be called only when all references to `file`
+ // have been released, so we know it can't be called while this function is running.
+ let f = unsafe { T::Wrapper::borrow((*file).private_data) };
+ match T::poll(&f, unsafe { &FileRef::from_ptr(file) }, unsafe {
+ &PollTable::from_ptr(wait)
+ }) {
+ Ok(v) => v,
+ Err(_) => bindings::POLLERR,
+ }
+}
+
+pub(crate) struct FileOperationsVtable<A, T>(marker::PhantomData<A>, marker::PhantomData<T>);
+
+impl<A: FileOpenAdapter, T: FileOpener<A::Arg>> FileOperationsVtable<A, T> {
+ const VTABLE: bindings::file_operations = bindings::file_operations {
+ open: Some(open_callback::<A, T>),
+ release: Some(release_callback::<T>),
+ read: if T::TO_USE.read {
+ Some(read_callback::<T>)
+ } else {
+ None
+ },
+ write: if T::TO_USE.write {
+ Some(write_callback::<T>)
+ } else {
+ None
+ },
+ llseek: if T::TO_USE.seek {
+ Some(llseek_callback::<T>)
+ } else {
+ None
+ },
+
+ check_flags: None,
+ compat_ioctl: if T::TO_USE.compat_ioctl {
+ Some(compat_ioctl_callback::<T>)
+ } else {
+ None
+ },
+ copy_file_range: None,
+ fallocate: None,
+ fadvise: None,
+ fasync: None,
+ flock: None,
+ flush: None,
+ fsync: if T::TO_USE.fsync {
+ Some(fsync_callback::<T>)
+ } else {
+ None
+ },
+ get_unmapped_area: None,
+ iterate: None,
+ iterate_shared: None,
+ iopoll: None,
+ lock: None,
+ mmap: if T::TO_USE.mmap {
+ Some(mmap_callback::<T>)
+ } else {
+ None
+ },
+ mmap_supported_flags: 0,
+ owner: ptr::null_mut(),
+ poll: if T::TO_USE.poll {
+ Some(poll_callback::<T>)
+ } else {
+ None
+ },
+ read_iter: if T::TO_USE.read_iter {
+ Some(read_iter_callback::<T>)
+ } else {
+ None
+ },
+ remap_file_range: None,
+ sendpage: None,
+ setlease: None,
+ show_fdinfo: None,
+ splice_read: None,
+ splice_write: None,
+ unlocked_ioctl: if T::TO_USE.ioctl {
+ Some(unlocked_ioctl_callback::<T>)
+ } else {
+ None
+ },
+ write_iter: if T::TO_USE.write_iter {
+ Some(write_iter_callback::<T>)
+ } else {
+ None
+ },
+ };
+
+ /// Builds an instance of [`struct file_operations`].
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that the adapter is compatible with the way the device is registered.
+ pub(crate) const unsafe fn build() -> &'static bindings::file_operations {
+ &Self::VTABLE
+ }
+}
+
+/// Represents which fields of [`struct file_operations`] should be populated with pointers.
+pub struct ToUse {
+ /// The `read` field of [`struct file_operations`].
+ pub read: bool,
+
+ /// The `read_iter` field of [`struct file_operations`].
+ pub read_iter: bool,
+
+ /// The `write` field of [`struct file_operations`].
+ pub write: bool,
+
+ /// The `write_iter` field of [`struct file_operations`].
+ pub write_iter: bool,
+
+ /// The `llseek` field of [`struct file_operations`].
+ pub seek: bool,
+
+ /// The `unlocked_ioctl` field of [`struct file_operations`].
+ pub ioctl: bool,
+
+ /// The `compat_ioctl` field of [`struct file_operations`].
+ pub compat_ioctl: bool,
+
+ /// The `fsync` field of [`struct file_operations`].
+ pub fsync: bool,
+
+ /// The `mmap` field of [`struct file_operations`].
+ pub mmap: bool,
+
+ /// The `poll` field of [`struct file_operations`].
+ pub poll: bool,
+}
+
+/// A constant version where all values are to set to `false`, that is, all supported fields will
+/// be set to null pointers.
+pub const USE_NONE: ToUse = ToUse {
+ read: false,
+ read_iter: false,
+ write: false,
+ write_iter: false,
+ seek: false,
+ ioctl: false,
+ compat_ioctl: false,
+ fsync: false,
+ mmap: false,
+ poll: false,
+};
+
+/// Defines the [`FileOperations::TO_USE`] field based on a list of fields to be populated.
+#[macro_export]
+macro_rules! declare_file_operations {
+ () => {
+ const TO_USE: $crate::file_operations::ToUse = $crate::file_operations::USE_NONE;
+ };
+ ($($i:ident),+) => {
+ const TO_USE: kernel::file_operations::ToUse =
+ $crate::file_operations::ToUse {
+ $($i: true),+ ,
+ ..$crate::file_operations::USE_NONE
+ };
+ };
+}
+
+/// Allows the handling of ioctls defined with the `_IO`, `_IOR`, `_IOW`, and `_IOWR` macros.
+///
+/// For each macro, there is a handler function that takes the appropriate types as arguments.
+pub trait IoctlHandler: Sync {
+ /// The type of the first argument to each associated function.
+ type Target;
+
+ /// Handles ioctls defined with the `_IO` macro, that is, with no buffer as argument.
+ fn pure(_this: &Self::Target, _file: &File, _cmd: u32, _arg: usize) -> Result<i32> {
+ Err(Error::EINVAL)
+ }
+
+ /// Handles ioctls defined with the `_IOR` macro, that is, with an output buffer provided as
+ /// argument.
+ fn read(
+ _this: &Self::Target,
+ _file: &File,
+ _cmd: u32,
+ _writer: &mut UserSlicePtrWriter,
+ ) -> Result<i32> {
+ Err(Error::EINVAL)
+ }
+
+ /// Handles ioctls defined with the `_IOW` macro, that is, with an input buffer provided as
+ /// argument.
+ fn write(
+ _this: &Self::Target,
+ _file: &File,
+ _cmd: u32,
+ _reader: &mut UserSlicePtrReader,
+ ) -> Result<i32> {
+ Err(Error::EINVAL)
+ }
+
+ /// Handles ioctls defined with the `_IOWR` macro, that is, with a buffer for both input and
+ /// output provided as argument.
+ fn read_write(
+ _this: &Self::Target,
+ _file: &File,
+ _cmd: u32,
+ _data: UserSlicePtr,
+ ) -> Result<i32> {
+ Err(Error::EINVAL)
+ }
+}
+
+/// Represents an ioctl command.
+///
+/// It can use the components of an ioctl command to dispatch ioctls using
+/// [`IoctlCommand::dispatch`].
+pub struct IoctlCommand {
+ cmd: u32,
+ arg: usize,
+ user_slice: Option<UserSlicePtr>,
+}
+
+impl IoctlCommand {
+ /// Constructs a new [`IoctlCommand`].
+ fn new(cmd: u32, arg: usize) -> Self {
+ let size = (cmd >> bindings::_IOC_SIZESHIFT) & bindings::_IOC_SIZEMASK;
+
+ // SAFETY: We only create one instance of the user slice per ioctl call, so TOCTOU issues
+ // are not possible.
+ let user_slice = Some(unsafe { UserSlicePtr::new(arg as _, size as _) });
+ Self {
+ cmd,
+ arg,
+ user_slice,
+ }
+ }
+
+ /// Dispatches the given ioctl to the appropriate handler based on the value of the command. It
+ /// also creates a [`UserSlicePtr`], [`UserSlicePtrReader`], or [`UserSlicePtrWriter`]
+ /// depending on the direction of the buffer of the command.
+ ///
+ /// It is meant to be used in implementations of [`FileOperations::ioctl`] and
+ /// [`FileOperations::compat_ioctl`].
+ pub fn dispatch<T: IoctlHandler>(&mut self, handler: &T::Target, file: &File) -> Result<i32> {
+ let dir = (self.cmd >> bindings::_IOC_DIRSHIFT) & bindings::_IOC_DIRMASK;
+ if dir == bindings::_IOC_NONE {
+ return T::pure(handler, file, self.cmd, self.arg);
+ }
+
+ let data = self.user_slice.take().ok_or(Error::EINVAL)?;
+ const READ_WRITE: u32 = bindings::_IOC_READ | bindings::_IOC_WRITE;
+ match dir {
+ bindings::_IOC_WRITE => T::write(handler, file, self.cmd, &mut data.reader()),
+ bindings::_IOC_READ => T::read(handler, file, self.cmd, &mut data.writer()),
+ READ_WRITE => T::read_write(handler, file, self.cmd, data),
+ _ => Err(Error::EINVAL),
+ }
+ }
+
+ /// Returns the raw 32-bit value of the command and the ptr-sized argument.
+ pub fn raw(&self) -> (u32, usize) {
+ (self.cmd, self.arg)
+ }
+}
+
+/// Trait for extracting file open arguments from kernel data structures.
+///
+/// This is meant to be implemented by registration managers.
+pub trait FileOpenAdapter {
+ /// The type of argument this adapter extracts.
+ type Arg;
+
+ /// Converts untyped data stored in [`struct inode`] and [`struct file`] (when [`struct
+ /// file_operations::open`] is called) into the given type. For example, for `miscdev`
+ /// devices, a pointer to the registered [`struct miscdev`] is stored in [`struct
+ /// file::private_data`].
+ ///
+ /// # Safety
+ ///
+ /// This function must be called only when [`struct file_operations::open`] is being called for
+ /// a file that was registered by the implementer.
+ unsafe fn convert(_inode: *mut bindings::inode, _file: *mut bindings::file)
+ -> *const Self::Arg;
+}
+
+/// Trait for implementers of kernel files.
+///
+/// In addition to the methods in [`FileOperations`], implementers must also provide
+/// [`FileOpener::open`] with a customised argument. This allows a single implementation of
+/// [`FileOperations`] to be used for different types of registrations, for example, `miscdev` and
+/// `chrdev`.
+pub trait FileOpener<T: ?Sized>: FileOperations {
+ /// Creates a new instance of this file.
+ ///
+ /// Corresponds to the `open` function pointer in `struct file_operations`.
+ fn open(context: &T) -> Result<Self::Wrapper>;
+}
+
+impl<T: FileOperations<Wrapper = Box<T>> + Default> FileOpener<()> for T {
+ fn open(_: &()) -> Result<Self::Wrapper> {
+ Ok(Box::try_new(T::default())?)
+ }
+}
+
+/// Corresponds to the kernel's `struct file_operations`.
+///
+/// You implement this trait whenever you would create a `struct file_operations`.
+///
+/// File descriptors may be used from multiple threads/processes concurrently, so your type must be
+/// [`Sync`]. It must also be [`Send`] because [`FileOperations::release`] will be called from the
+/// thread that decrements that associated file's refcount to zero.
+pub trait FileOperations: Send + Sync + Sized {
+ /// The methods to use to populate [`struct file_operations`].
+ const TO_USE: ToUse;
+
+ /// The pointer type that will be used to hold ourselves.
+ type Wrapper: PointerWrapper = Box<Self>;
+
+ /// Cleans up after the last reference to the file goes away.
+ ///
+ /// Note that the object is moved, so it will be freed automatically unless the implementation
+ /// moves it elsewhere.
+ ///
+ /// Corresponds to the `release` function pointer in `struct file_operations`.
+ fn release(_obj: Self::Wrapper, _file: &File) {}
+
+ /// Reads data from this file to the caller's buffer.
+ ///
+ /// Corresponds to the `read` and `read_iter` function pointers in `struct file_operations`.
+ fn read<T: IoBufferWriter>(
+ _this: &<<Self::Wrapper as PointerWrapper>::Borrowed as Deref>::Target,
+ _file: &File,
+ _data: &mut T,
+ _offset: u64,
+ ) -> Result<usize> {
+ Err(Error::EINVAL)
+ }
+
+ /// Writes data from the caller's buffer to this file.
+ ///
+ /// Corresponds to the `write` and `write_iter` function pointers in `struct file_operations`.
+ fn write<T: IoBufferReader>(
+ _this: &<<Self::Wrapper as PointerWrapper>::Borrowed as Deref>::Target,
+ _file: &File,
+ _data: &mut T,
+ _offset: u64,
+ ) -> Result<usize> {
+ Err(Error::EINVAL)
+ }
+
+ /// Changes the position of the file.
+ ///
+ /// Corresponds to the `llseek` function pointer in `struct file_operations`.
+ fn seek(
+ _this: &<<Self::Wrapper as PointerWrapper>::Borrowed as Deref>::Target,
+ _file: &File,
+ _offset: SeekFrom,
+ ) -> Result<u64> {
+ Err(Error::EINVAL)
+ }
+
+ /// Performs IO control operations that are specific to the file.
+ ///
+ /// Corresponds to the `unlocked_ioctl` function pointer in `struct file_operations`.
+ fn ioctl(
+ _this: &<<Self::Wrapper as PointerWrapper>::Borrowed as Deref>::Target,
+ _file: &File,
+ _cmd: &mut IoctlCommand,
+ ) -> Result<i32> {
+ Err(Error::EINVAL)
+ }
+
+ /// Performs 32-bit IO control operations on that are specific to the file on 64-bit kernels.
+ ///
+ /// Corresponds to the `compat_ioctl` function pointer in `struct file_operations`.
+ fn compat_ioctl(
+ _this: &<<Self::Wrapper as PointerWrapper>::Borrowed as Deref>::Target,
+ _file: &File,
+ _cmd: &mut IoctlCommand,
+ ) -> Result<i32> {
+ Err(Error::EINVAL)
+ }
+
+ /// Syncs pending changes to this file.
+ ///
+ /// Corresponds to the `fsync` function pointer in `struct file_operations`.
+ fn fsync(
+ _this: &<<Self::Wrapper as PointerWrapper>::Borrowed as Deref>::Target,
+ _file: &File,
+ _start: u64,
+ _end: u64,
+ _datasync: bool,
+ ) -> Result<u32> {
+ Err(Error::EINVAL)
+ }
+
+ /// Maps areas of the caller's virtual memory with device/file memory.
+ ///
+ /// Corresponds to the `mmap` function pointer in `struct file_operations`.
+ /// TODO: wrap `vm_area_struct` so that we don't have to expose it.
+ fn mmap(
+ _this: &<<Self::Wrapper as PointerWrapper>::Borrowed as Deref>::Target,
+ _file: &File,
+ _vma: &mut bindings::vm_area_struct,
+ ) -> Result {
+ Err(Error::EINVAL)
+ }
+
+ /// Checks the state of the file and optionally registers for notification when the state
+ /// changes.
+ ///
+ /// Corresponds to the `poll` function pointer in `struct file_operations`.
+ fn poll(
+ _this: &<<Self::Wrapper as PointerWrapper>::Borrowed as Deref>::Target,
+ _file: &File,
+ _table: &PollTable,
+ ) -> Result<u32> {
+ Ok(bindings::POLLIN | bindings::POLLOUT | bindings::POLLRDNORM | bindings::POLLWRNORM)
+ }
+}
diff --git a/rust/kernel/io_buffer.rs b/rust/kernel/io_buffer.rs
new file mode 100644
index 000000000000..ccecc4763aca
--- /dev/null
+++ b/rust/kernel/io_buffer.rs
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Buffers used in IO.
+
+use crate::Result;
+use alloc::vec::Vec;
+use core::mem::{size_of, MaybeUninit};
+
+/// Represents a buffer to be read from during IO.
+pub trait IoBufferReader {
+ /// Returns the number of bytes left to be read from the io buffer.
+ ///
+ /// Note that even reading less than this number of bytes may fail.
+ fn len(&self) -> usize;
+
+ /// Returns `true` if no data is available in the io buffer.
+ fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+
+ /// Reads raw data from the io buffer into a raw kernel buffer.
+ ///
+ /// # Safety
+ ///
+ /// The output buffer must be valid.
+ unsafe fn read_raw(&mut self, out: *mut u8, len: usize) -> Result;
+
+ /// Reads all data remaining in the io buffer.
+ ///
+ /// Returns `EFAULT` if the address does not currently point to mapped, readable memory.
+ fn read_all(&mut self) -> Result<Vec<u8>> {
+ let mut data = Vec::<u8>::new();
+ data.try_resize(self.len(), 0)?;
+
+ // SAFETY: The output buffer is valid as we just allocated it.
+ unsafe { self.read_raw(data.as_mut_ptr(), data.len())? };
+ Ok(data)
+ }
+
+ /// Reads a byte slice from the io buffer.
+ ///
+ /// Returns `EFAULT` if the byte slice is bigger than the remaining size of the user slice or
+ /// if the address does not currently point to mapped, readable memory.
+ fn read_slice(&mut self, data: &mut [u8]) -> Result {
+ // SAFETY: The output buffer is valid as it's coming from a live reference.
+ unsafe { self.read_raw(data.as_mut_ptr(), data.len()) }
+ }
+
+ /// Reads the contents of a plain old data (POD) type from the io buffer.
+ fn read<T: ReadableFromBytes>(&mut self) -> Result<T> {
+ let mut out = MaybeUninit::<T>::uninit();
+ // SAFETY: The buffer is valid as it was just allocated.
+ unsafe { self.read_raw(out.as_mut_ptr() as _, size_of::<T>()) }?;
+ // SAFETY: We just initialised the data.
+ Ok(unsafe { out.assume_init() })
+ }
+}
+
+/// Represents a buffer to be written to during IO.
+pub trait IoBufferWriter {
+ /// Returns the number of bytes left to be written into the io buffer.
+ ///
+ /// Note that even writing less than this number of bytes may fail.
+ fn len(&self) -> usize;
+
+ /// Returns `true` if the io buffer cannot hold any additional data.
+ fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+
+ /// Writes zeroes to the io buffer.
+ ///
+ /// Differently from the other write functions, `clear` will zero as much as it can and update
+ /// the writer internal state to reflect this. It will, however, return an error if it cannot
+ /// clear `len` bytes.
+ ///
+ /// For example, if a caller requests that 100 bytes be cleared but a segfault happens after
+ /// 20 bytes, then EFAULT is returned and the writer is advanced by 20 bytes.
+ fn clear(&mut self, len: usize) -> Result;
+
+ /// Writes a byte slice into the io buffer.
+ ///
+ /// Returns `EFAULT` if the byte slice is bigger than the remaining size of the io buffer or if
+ /// the address does not currently point to mapped, writable memory.
+ fn write_slice(&mut self, data: &[u8]) -> Result {
+ // SAFETY: The input buffer is valid as it's coming from a live reference.
+ unsafe { self.write_raw(data.as_ptr(), data.len()) }
+ }
+
+ /// Writes raw data to the io buffer from a raw kernel buffer.
+ ///
+ /// # Safety
+ ///
+ /// The input buffer must be valid.
+ unsafe fn write_raw(&mut self, data: *const u8, len: usize) -> Result;
+
+ /// Writes the contents of the given data into the io buffer.
+ fn write<T: WritableToBytes>(&mut self, data: &T) -> Result {
+ // SAFETY: The input buffer is valid as it's coming from a live
+ // reference to a type that implements `WritableToBytes`.
+ unsafe { self.write_raw(data as *const T as _, size_of::<T>()) }
+ }
+}
+
+/// Specifies that a type is safely readable from byte slices.
+///
+/// Not all types can be safely read from byte slices; examples from
+/// <https://doc.rust-lang.org/reference/behavior-considered-undefined.html> include `bool`
+/// that must be either `0` or `1`, and `char` that cannot be a surrogate or above `char::MAX`.
+///
+/// # Safety
+///
+/// Implementers must ensure that the type is made up only of types that can be safely read from
+/// arbitrary byte sequences (e.g., `u32`, `u64`, etc.).
+pub unsafe trait ReadableFromBytes {}
+
+// SAFETY: All bit patterns are acceptable values of the types below.
+unsafe impl ReadableFromBytes for u8 {}
+unsafe impl ReadableFromBytes for u16 {}
+unsafe impl ReadableFromBytes for u32 {}
+unsafe impl ReadableFromBytes for u64 {}
+unsafe impl ReadableFromBytes for usize {}
+unsafe impl ReadableFromBytes for i8 {}
+unsafe impl ReadableFromBytes for i16 {}
+unsafe impl ReadableFromBytes for i32 {}
+unsafe impl ReadableFromBytes for i64 {}
+unsafe impl ReadableFromBytes for isize {}
+
+/// Specifies that a type is safely writable to byte slices.
+///
+/// This means that we don't read undefined values (which leads to UB) in preparation for writing
+/// to the byte slice. It also ensures that no potentially sensitive information is leaked into the
+/// byte slices.
+///
+/// # Safety
+///
+/// A type must not include padding bytes and must be fully initialised to safely implement
+/// [`WritableToBytes`] (i.e., it doesn't contain [`MaybeUninit`] fields). A composition of
+/// writable types in a structure is not necessarily writable because it may result in padding
+/// bytes.
+pub unsafe trait WritableToBytes {}
+
+// SAFETY: Initialised instances of the following types have no uninitialised portions.
+unsafe impl WritableToBytes for u8 {}
+unsafe impl WritableToBytes for u16 {}
+unsafe impl WritableToBytes for u32 {}
+unsafe impl WritableToBytes for u64 {}
+unsafe impl WritableToBytes for usize {}
+unsafe impl WritableToBytes for i8 {}
+unsafe impl WritableToBytes for i16 {}
+unsafe impl WritableToBytes for i32 {}
+unsafe impl WritableToBytes for i64 {}
+unsafe impl WritableToBytes for isize {}
diff --git a/rust/kernel/iov_iter.rs b/rust/kernel/iov_iter.rs
new file mode 100644
index 000000000000..d778e1ac9760
--- /dev/null
+++ b/rust/kernel/iov_iter.rs
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! IO vector iterators.
+//!
+//! C header: [`include/linux/uio.h`](../../../../include/linux/uio.h)
+
+use crate::{
+ bindings, c_types,
+ error::Error,
+ io_buffer::{IoBufferReader, IoBufferWriter},
+ Result,
+};
+
+extern "C" {
+ fn rust_helper_copy_to_iter(
+ addr: *const c_types::c_void,
+ bytes: usize,
+ i: *mut bindings::iov_iter,
+ ) -> usize;
+
+ fn rust_helper_copy_from_iter(
+ addr: *mut c_types::c_void,
+ bytes: usize,
+ i: *mut bindings::iov_iter,
+ ) -> usize;
+}
+
+/// Wraps the kernel's `struct iov_iter`.
+///
+/// # Invariants
+///
+/// The pointer `IovIter::ptr` is non-null and valid.
+pub struct IovIter {
+ ptr: *mut bindings::iov_iter,
+}
+
+impl IovIter {
+ fn common_len(&self) -> usize {
+ // SAFETY: `IovIter::ptr` is guaranteed to be valid by the type invariants.
+ unsafe { (*self.ptr).count }
+ }
+
+ /// Constructs a new [`struct iov_iter`] wrapper.
+ ///
+ /// # Safety
+ ///
+ /// The pointer `ptr` must be non-null and valid for the lifetime of the object.
+ pub(crate) unsafe fn from_ptr(ptr: *mut bindings::iov_iter) -> Self {
+ // INVARIANTS: the safety contract ensures the type invariant will hold.
+ Self { ptr }
+ }
+}
+
+impl IoBufferWriter for IovIter {
+ fn len(&self) -> usize {
+ self.common_len()
+ }
+
+ fn clear(&mut self, mut len: usize) -> Result {
+ while len > 0 {
+ // SAFETY: `IovIter::ptr` is guaranteed to be valid by the type invariants.
+ let written = unsafe { bindings::iov_iter_zero(len, self.ptr) };
+ if written == 0 {
+ return Err(Error::EFAULT);
+ }
+
+ len -= written;
+ }
+ Ok(())
+ }
+
+ unsafe fn write_raw(&mut self, data: *const u8, len: usize) -> Result {
+ let res = unsafe { rust_helper_copy_to_iter(data as _, len, self.ptr) };
+ if res != len {
+ Err(Error::EFAULT)
+ } else {
+ Ok(())
+ }
+ }
+}
+
+impl IoBufferReader for IovIter {
+ fn len(&self) -> usize {
+ self.common_len()
+ }
+
+ unsafe fn read_raw(&mut self, out: *mut u8, len: usize) -> Result {
+ let res = unsafe { rust_helper_copy_from_iter(out as _, len, self.ptr) };
+ if res != len {
+ Err(Error::EFAULT)
+ } else {
+ Ok(())
+ }
+ }
+}
diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs
new file mode 100644
index 000000000000..86c580fd7f68
--- /dev/null
+++ b/rust/kernel/lib.rs
@@ -0,0 +1,220 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! The `kernel` crate.
+//!
+//! This crate contains the kernel APIs that have been ported or wrapped for
+//! usage by Rust code in the kernel and is shared by all of them.
+//!
+//! In other words, all the rest of the Rust code in the kernel (e.g. kernel
+//! modules written in Rust) depends on [`core`], [`alloc`] and this crate.
+//!
+//! If you need a kernel C API that is not ported or wrapped yet here, then
+//! do so first instead of bypassing this crate.
+
+#![no_std]
+#![feature(
+ allocator_api,
+ alloc_error_handler,
+ associated_type_defaults,
+ const_fn_trait_bound,
+ const_mut_refs,
+ const_panic,
+ const_raw_ptr_deref,
+ const_unreachable_unchecked,
+ receiver_trait,
+ try_reserve
+)]
+
+// Ensure conditional compilation based on the kernel configuration works;
+// otherwise we may silently break things like initcall handling.
+#[cfg(not(CONFIG_RUST))]
+compile_error!("Missing kernel configuration for conditional compilation");
+
+#[cfg(not(test))]
+#[cfg(not(testlib))]
+mod allocator;
+
+#[doc(hidden)]
+pub mod bindings;
+
+pub mod buffer;
+pub mod c_types;
+pub mod chrdev;
+mod error;
+pub mod file;
+pub mod file_operations;
+pub mod miscdev;
+pub mod pages;
+pub mod security;
+pub mod str;
+pub mod task;
+pub mod traits;
+
+pub mod linked_list;
+mod raw_list;
+pub mod rbtree;
+
+#[doc(hidden)]
+pub mod module_param;
+
+mod build_assert;
+pub mod prelude;
+pub mod print;
+pub mod random;
+mod static_assert;
+pub mod sync;
+
+#[cfg(CONFIG_SYSCTL)]
+pub mod sysctl;
+
+pub mod io_buffer;
+pub mod iov_iter;
+pub mod of;
+pub mod platdev;
+mod types;
+pub mod user_ptr;
+
+#[doc(hidden)]
+pub use build_error::build_error;
+
+pub use crate::error::{Error, Result};
+pub use crate::types::{Mode, ScopeGuard};
+
+/// Page size defined in terms of the `PAGE_SHIFT` macro from C.
+///
+/// [`PAGE_SHIFT`]: ../../../include/asm-generic/page.h
+pub const PAGE_SIZE: usize = 1 << bindings::PAGE_SHIFT;
+
+/// Prefix to appear before log messages printed from within the kernel crate.
+const __LOG_PREFIX: &[u8] = b"rust_kernel\0";
+
+/// The top level entrypoint to implementing a kernel module.
+///
+/// For any teardown or cleanup operations, your type may implement [`Drop`].
+pub trait KernelModule: Sized + Sync {
+ /// Called at module initialization time.
+ ///
+ /// Use this method to perform whatever setup or registration your module
+ /// should do.
+ ///
+ /// Equivalent to the `module_init` macro in the C API.
+ fn init() -> Result<Self>;
+}
+
+/// Equivalent to `THIS_MODULE` in the C API.
+///
+/// C header: `include/linux/export.h`
+pub struct ThisModule(*mut bindings::module);
+
+// SAFETY: `THIS_MODULE` may be used from all threads within a module.
+unsafe impl Sync for ThisModule {}
+
+impl ThisModule {
+ /// Creates a [`ThisModule`] given the `THIS_MODULE` pointer.
+ ///
+ /// # Safety
+ ///
+ /// The pointer must be equal to the right `THIS_MODULE`.
+ pub const unsafe fn from_ptr(ptr: *mut bindings::module) -> ThisModule {
+ ThisModule(ptr)
+ }
+
+ /// Locks the module parameters to access them.
+ ///
+ /// Returns a [`KParamGuard`] that will release the lock when dropped.
+ pub fn kernel_param_lock(&self) -> KParamGuard<'_> {
+ // SAFETY: `kernel_param_lock` will check if the pointer is null and
+ // use the built-in mutex in that case.
+ #[cfg(CONFIG_SYSFS)]
+ unsafe {
+ bindings::kernel_param_lock(self.0)
+ }
+
+ KParamGuard { this_module: self }
+ }
+}
+
+/// Scoped lock on the kernel parameters of [`ThisModule`].
+///
+/// Lock will be released when this struct is dropped.
+pub struct KParamGuard<'a> {
+ this_module: &'a ThisModule,
+}
+
+#[cfg(CONFIG_SYSFS)]
+impl<'a> Drop for KParamGuard<'a> {
+ fn drop(&mut self) {
+ // SAFETY: `kernel_param_lock` will check if the pointer is null and
+ // use the built-in mutex in that case. The existance of `self`
+ // guarantees that the lock is held.
+ unsafe { bindings::kernel_param_unlock(self.this_module.0) }
+ }
+}
+
+/// Calculates the offset of a field from the beginning of the struct it belongs to.
+///
+/// # Example
+///
+/// ```
+/// # use kernel::prelude::*;
+/// # use kernel::offset_of;
+/// struct Test {
+/// a: u64,
+/// b: u32,
+/// }
+///
+/// fn test() {
+/// // This prints `8`.
+/// pr_info!("{}\n", offset_of!(Test, b));
+/// }
+/// ```
+#[macro_export]
+macro_rules! offset_of {
+ ($type:ty, $($f:tt)*) => {{
+ let tmp = core::mem::MaybeUninit::<$type>::uninit();
+ let outer = tmp.as_ptr();
+ // To avoid warnings when nesting `unsafe` blocks.
+ #[allow(unused_unsafe)]
+ // SAFETY: The pointer is valid and aligned, just not initialised; `addr_of` ensures that
+ // we don't actually read from `outer` (which would be UB) nor create an intermediate
+ // reference.
+ let inner = unsafe { core::ptr::addr_of!((*outer).$($f)*) } as *const u8;
+ // To avoid warnings when nesting `unsafe` blocks.
+ #[allow(unused_unsafe)]
+ // SAFETY: The two pointers are within the same allocation block.
+ unsafe { inner.offset_from(outer as *const u8) }
+ }}
+}
+
+/// Produces a pointer to an object from a pointer to one of its fields.
+///
+/// # Safety
+///
+/// Callers must ensure that the pointer to the field is in fact a pointer to the specified field,
+/// as opposed to a pointer to another object of the same type.
+///
+/// # Example
+///
+/// ```
+/// # use kernel::prelude::*;
+/// # use kernel::container_of;
+/// struct Test {
+/// a: u64,
+/// b: u32,
+/// }
+///
+/// fn test() {
+/// let test = Test { a: 10, b: 20 };
+/// let b_ptr = &test.b;
+/// let test_alias = unsafe { container_of!(b_ptr, Test, b) };
+/// // This prints `true`.
+/// pr_info!("{}\n", core::ptr::eq(&test, test_alias));
+/// }
+/// ```
+#[macro_export]
+macro_rules! container_of {
+ ($ptr:expr, $type:ty, $($f:tt)*) => {{
+ let offset = $crate::offset_of!($type, $($f)*);
+ unsafe { ($ptr as *const _ as *const u8).offset(-offset) as *const $type }
+ }}
+}
diff --git a/rust/kernel/linked_list.rs b/rust/kernel/linked_list.rs
new file mode 100644
index 000000000000..d57bf1b881a3
--- /dev/null
+++ b/rust/kernel/linked_list.rs
@@ -0,0 +1,245 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Linked lists.
+//!
+//! TODO: This module is a work in progress.
+
+use alloc::{boxed::Box, sync::Arc};
+use core::ptr::NonNull;
+
+pub use crate::raw_list::{Cursor, GetLinks, Links};
+use crate::{raw_list, raw_list::RawList};
+
+// TODO: Use the one from `kernel::file_operations::PointerWrapper` instead.
+/// Wraps an object to be inserted in a linked list.
+pub trait Wrapper<T: ?Sized> {
+ /// Converts the wrapped object into a pointer that represents it.
+ fn into_pointer(self) -> NonNull<T>;
+
+ /// Converts the object back from the pointer representation.
+ ///
+ /// # Safety
+ ///
+ /// The passed pointer must come from a previous call to [`Wrapper::into_pointer()`].
+ unsafe fn from_pointer(ptr: NonNull<T>) -> Self;
+
+ /// Returns a reference to the wrapped object.
+ fn as_ref(&self) -> &T;
+}
+
+impl<T: ?Sized> Wrapper<T> for Box<T> {
+ fn into_pointer(self) -> NonNull<T> {
+ NonNull::new(Box::into_raw(self)).unwrap()
+ }
+
+ unsafe fn from_pointer(ptr: NonNull<T>) -> Self {
+ unsafe { Box::from_raw(ptr.as_ptr()) }
+ }
+
+ fn as_ref(&self) -> &T {
+ AsRef::as_ref(self)
+ }
+}
+
+impl<T: ?Sized> Wrapper<T> for Arc<T> {
+ fn into_pointer(self) -> NonNull<T> {
+ NonNull::new(Arc::into_raw(self) as _).unwrap()
+ }
+
+ unsafe fn from_pointer(ptr: NonNull<T>) -> Self {
+ unsafe { Arc::from_raw(ptr.as_ptr()) }
+ }
+
+ fn as_ref(&self) -> &T {
+ AsRef::as_ref(self)
+ }
+}
+
+impl<T: ?Sized> Wrapper<T> for &T {
+ fn into_pointer(self) -> NonNull<T> {
+ NonNull::from(self)
+ }
+
+ unsafe fn from_pointer(ptr: NonNull<T>) -> Self {
+ unsafe { &*ptr.as_ptr() }
+ }
+
+ fn as_ref(&self) -> &T {
+ self
+ }
+}
+
+/// A descriptor of wrapped list elements.
+pub trait GetLinksWrapped: GetLinks {
+ /// Specifies which wrapper (e.g., `Box` and `Arc`) wraps the list entries.
+ type Wrapped: Wrapper<Self::EntryType>;
+}
+
+impl<T: ?Sized> GetLinksWrapped for Box<T>
+where
+ Box<T>: GetLinks,
+{
+ type Wrapped = Box<<Box<T> as GetLinks>::EntryType>;
+}
+
+impl<T: GetLinks + ?Sized> GetLinks for Box<T> {
+ type EntryType = T::EntryType;
+ fn get_links(data: &Self::EntryType) -> &Links<Self::EntryType> {
+ <T as GetLinks>::get_links(data)
+ }
+}
+
+impl<T: ?Sized> GetLinksWrapped for Arc<T>
+where
+ Arc<T>: GetLinks,
+{
+ type Wrapped = Arc<<Arc<T> as GetLinks>::EntryType>;
+}
+
+impl<T: GetLinks + ?Sized> GetLinks for Arc<T> {
+ type EntryType = T::EntryType;
+ fn get_links(data: &Self::EntryType) -> &Links<Self::EntryType> {
+ <T as GetLinks>::get_links(data)
+ }
+}
+
+/// A linked list.
+///
+/// Elements in the list are wrapped and ownership is transferred to the list while the element is
+/// in the list.
+pub struct List<G: GetLinksWrapped> {
+ list: RawList<G>,
+}
+
+impl<G: GetLinksWrapped> List<G> {
+ /// Constructs a new empty linked list.
+ pub fn new() -> Self {
+ Self {
+ list: RawList::new(),
+ }
+ }
+
+ /// Returns whether the list is empty.
+ pub fn is_empty(&self) -> bool {
+ self.list.is_empty()
+ }
+
+ /// Adds the given object to the end (back) of the list.
+ ///
+ /// It is dropped if it's already on this (or another) list; this can happen for
+ /// reference-counted objects, so dropping means decrementing the reference count.
+ pub fn push_back(&mut self, data: G::Wrapped) {
+ let ptr = data.into_pointer();
+
+ // SAFETY: We took ownership of the entry, so it is safe to insert it.
+ if !unsafe { self.list.push_back(ptr.as_ref()) } {
+ // If insertion failed, rebuild object so that it can be freed.
+ // SAFETY: We just called `into_pointer` above.
+ unsafe { G::Wrapped::from_pointer(ptr) };
+ }
+ }
+
+ /// Inserts the given object after `existing`.
+ ///
+ /// It is dropped if it's already on this (or another) list; this can happen for
+ /// reference-counted objects, so dropping means decrementing the reference count.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that `existing` points to a valid entry that is on the list.
+ pub unsafe fn insert_after(&mut self, existing: NonNull<G::EntryType>, data: G::Wrapped) {
+ let ptr = data.into_pointer();
+ let entry = unsafe { &*existing.as_ptr() };
+ if unsafe { !self.list.insert_after(entry, ptr.as_ref()) } {
+ // If insertion failed, rebuild object so that it can be freed.
+ unsafe { G::Wrapped::from_pointer(ptr) };
+ }
+ }
+
+ /// Removes the given entry.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that `data` is either on this list or in no list. It being on another
+ /// list leads to memory unsafety.
+ pub unsafe fn remove(&mut self, data: &G::Wrapped) -> Option<G::Wrapped> {
+ let entry_ref = Wrapper::as_ref(data);
+ if unsafe { self.list.remove(entry_ref) } {
+ Some(unsafe { G::Wrapped::from_pointer(NonNull::from(entry_ref)) })
+ } else {
+ None
+ }
+ }
+
+ /// Removes the element currently at the front of the list and returns it.
+ ///
+ /// Returns `None` if the list is empty.
+ pub fn pop_front(&mut self) -> Option<G::Wrapped> {
+ let front = self.list.pop_front()?;
+ // SAFETY: Elements on the list were inserted after a call to `into_pointer `.
+ Some(unsafe { G::Wrapped::from_pointer(front) })
+ }
+
+ /// Returns a cursor starting on the first (front) element of the list.
+ pub fn cursor_front(&self) -> Cursor<'_, G> {
+ self.list.cursor_front()
+ }
+
+ /// Returns a mutable cursor starting on the first (front) element of the list.
+ pub fn cursor_front_mut(&mut self) -> CursorMut<'_, G> {
+ CursorMut::new(self.list.cursor_front_mut())
+ }
+}
+
+impl<G: GetLinksWrapped> Default for List<G> {
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+impl<G: GetLinksWrapped> Drop for List<G> {
+ fn drop(&mut self) {
+ while self.pop_front().is_some() {}
+ }
+}
+
+/// A list cursor that allows traversing a linked list and inspecting & mutating elements.
+pub struct CursorMut<'a, G: GetLinksWrapped> {
+ cursor: raw_list::CursorMut<'a, G>,
+}
+
+impl<'a, G: GetLinksWrapped> CursorMut<'a, G> {
+ fn new(cursor: raw_list::CursorMut<'a, G>) -> Self {
+ Self { cursor }
+ }
+
+ /// Returns the element the cursor is currently positioned on.
+ pub fn current(&mut self) -> Option<&mut G::EntryType> {
+ self.cursor.current()
+ }
+
+ /// Removes the element the cursor is currently positioned on.
+ ///
+ /// After removal, it advances the cursor to the next element.
+ pub fn remove_current(&mut self) -> Option<G::Wrapped> {
+ let ptr = self.cursor.remove_current()?;
+
+ // SAFETY: Elements on the list were inserted after a call to `into_pointer `.
+ Some(unsafe { G::Wrapped::from_pointer(ptr) })
+ }
+
+ /// Returns the element immediately after the one the cursor is positioned on.
+ pub fn peek_next(&mut self) -> Option<&mut G::EntryType> {
+ self.cursor.peek_next()
+ }
+
+ /// Returns the element immediately before the one the cursor is positioned on.
+ pub fn peek_prev(&mut self) -> Option<&mut G::EntryType> {
+ self.cursor.peek_prev()
+ }
+
+ /// Moves the cursor to the next element.
+ pub fn move_next(&mut self) {
+ self.cursor.move_next();
+ }
+}
diff --git a/rust/kernel/miscdev.rs b/rust/kernel/miscdev.rs
new file mode 100644
index 000000000000..e4d94d7416ef
--- /dev/null
+++ b/rust/kernel/miscdev.rs
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Miscellaneous devices.
+//!
+//! C header: [`include/linux/miscdevice.h`](../../../../include/linux/miscdevice.h)
+//!
+//! Reference: <https://www.kernel.org/doc/html/latest/driver-api/misc_devices.html>
+
+use crate::bindings;
+use crate::error::{Error, Result};
+use crate::file_operations::{FileOpenAdapter, FileOpener, FileOperationsVtable};
+use crate::str::CStr;
+use alloc::boxed::Box;
+use core::marker::PhantomPinned;
+use core::pin::Pin;
+
+/// A registration of a miscellaneous device.
+pub struct Registration<T: Sync = ()> {
+ registered: bool,
+ mdev: bindings::miscdevice,
+ _pin: PhantomPinned,
+
+ /// Context initialised on construction and made available to all file instances on
+ /// [`FileOpener::open`].
+ pub context: T,
+}
+
+impl<T: Sync> Registration<T> {
+ /// Creates a new [`Registration`] but does not register it yet.
+ ///
+ /// It is allowed to move.
+ pub fn new(context: T) -> Self {
+ Self {
+ registered: false,
+ mdev: bindings::miscdevice::default(),
+ _pin: PhantomPinned,
+ context,
+ }
+ }
+
+ /// Registers a miscellaneous device.
+ ///
+ /// Returns a pinned heap-allocated representation of the registration.
+ pub fn new_pinned<F: FileOpener<T>>(
+ name: &'static CStr,
+ minor: Option<i32>,
+ context: T,
+ ) -> Result<Pin<Box<Self>>> {
+ let mut r = Pin::from(Box::try_new(Self::new(context))?);
+ r.as_mut().register::<F>(name, minor)?;
+ Ok(r)
+ }
+
+ /// Registers a miscellaneous device with the rest of the kernel.
+ ///
+ /// It must be pinned because the memory block that represents the registration is
+ /// self-referential. If a minor is not given, the kernel allocates a new one if possible.
+ pub fn register<F: FileOpener<T>>(
+ self: Pin<&mut Self>,
+ name: &'static CStr,
+ minor: Option<i32>,
+ ) -> Result {
+ // SAFETY: We must ensure that we never move out of `this`.
+ let this = unsafe { self.get_unchecked_mut() };
+ if this.registered {
+ // Already registered.
+ return Err(Error::EINVAL);
+ }
+
+ // SAFETY: The adapter is compatible with `misc_register`.
+ this.mdev.fops = unsafe { FileOperationsVtable::<Self, F>::build() };
+ this.mdev.name = name.as_char_ptr();
+ this.mdev.minor = minor.unwrap_or(bindings::MISC_DYNAMIC_MINOR as i32);
+
+ let ret = unsafe { bindings::misc_register(&mut this.mdev) };
+ if ret < 0 {
+ return Err(Error::from_kernel_errno(ret));
+ }
+ this.registered = true;
+ Ok(())
+ }
+}
+
+impl<T: Sync> FileOpenAdapter for Registration<T> {
+ type Arg = T;
+
+ unsafe fn convert(_inode: *mut bindings::inode, file: *mut bindings::file) -> *const Self::Arg {
+ // TODO: `SAFETY` comment required here even if `unsafe` is not present,
+ // because `container_of!` hides it. Ideally we would not allow
+ // `unsafe` code as parameters to macros.
+ let reg = crate::container_of!((*file).private_data, Self, mdev);
+ unsafe { &(*reg).context }
+ }
+}
+
+// SAFETY: The only method is `register()`, which requires a (pinned) mutable `Registration`, so it
+// is safe to pass `&Registration` to multiple threads because it offers no interior mutability,
+// except maybe through `Registration::context`, but it is itself `Sync`.
+unsafe impl<T: Sync> Sync for Registration<T> {}
+
+// SAFETY: All functions work from any thread. So as long as the `Registration::context` is
+// `Send`, so is `Registration<T>`. `T` needs to be `Sync` because it's a requirement of
+// `Registration<T>`.
+unsafe impl<T: Send + Sync> Send for Registration<T> {}
+
+impl<T: Sync> Drop for Registration<T> {
+ /// Removes the registration from the kernel if it has completed successfully before.
+ fn drop(&mut self) {
+ if self.registered {
+ unsafe { bindings::misc_deregister(&mut self.mdev) }
+ }
+ }
+}
diff --git a/rust/kernel/module_param.rs b/rust/kernel/module_param.rs
new file mode 100644
index 000000000000..a588449c41fa
--- /dev/null
+++ b/rust/kernel/module_param.rs
@@ -0,0 +1,497 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Types for module parameters.
+//!
+//! C header: [`include/linux/moduleparam.h`](../../../include/linux/moduleparam.h)
+
+use crate::str::CStr;
+use core::fmt::Write;
+
+/// Types that can be used for module parameters.
+///
+/// Note that displaying the type in `sysfs` will fail if
+/// [`alloc::string::ToString::to_string`] (as implemented through the
+/// [`core::fmt::Display`] trait) writes more than [`PAGE_SIZE`]
+/// bytes (including an additional null terminator).
+///
+/// [`PAGE_SIZE`]: `crate::PAGE_SIZE`
+pub trait ModuleParam: core::fmt::Display + core::marker::Sized {
+ /// The `ModuleParam` will be used by the kernel module through this type.
+ ///
+ /// This may differ from `Self` if, for example, `Self` needs to track
+ /// ownership without exposing it or allocate extra space for other possible
+ /// parameter values. See [`StringParam`] or [`ArrayParam`] for examples.
+ type Value: ?Sized;
+
+ /// Whether the parameter is allowed to be set without an argument.
+ ///
+ /// Setting this to `true` allows the parameter to be passed without an
+ /// argument (e.g. just `module.param` instead of `module.param=foo`).
+ const NOARG_ALLOWED: bool;
+
+ /// Convert a parameter argument into the parameter value.
+ ///
+ /// `None` should be returned when parsing of the argument fails.
+ /// `arg == None` indicates that the parameter was passed without an
+ /// argument. If `NOARG_ALLOWED` is set to `false` then `arg` is guaranteed
+ /// to always be `Some(_)`.
+ ///
+ /// Parameters passed at boot time will be set before [`kmalloc`] is
+ /// available (even if the module is loaded at a later time). However, in
+ /// this case, the argument buffer will be valid for the entire lifetime of
+ /// the kernel. So implementations of this method which need to allocate
+ /// should first check that the allocator is available (with
+ /// [`crate::bindings::slab_is_available`]) and when it is not available
+ /// provide an alternative implementation which doesn't allocate. In cases
+ /// where the allocator is not available it is safe to save references to
+ /// `arg` in `Self`, but in other cases a copy should be made.
+ ///
+ /// [`kmalloc`]: ../../../include/linux/slab.h
+ fn try_from_param_arg(arg: Option<&'static [u8]>) -> Option<Self>;
+
+ /// Get the current value of the parameter for use in the kernel module.
+ ///
+ /// This function should not be used directly. Instead use the wrapper
+ /// `read` which will be generated by [`macros::module`].
+ fn value(&self) -> &Self::Value;
+
+ /// Set the module parameter from a string.
+ ///
+ /// Used to set the parameter value when loading the module or when set
+ /// through `sysfs`.
+ ///
+ /// # Safety
+ ///
+ /// If `val` is non-null then it must point to a valid null-terminated
+ /// string. The `arg` field of `param` must be an instance of `Self`.
+ unsafe extern "C" fn set_param(
+ val: *const crate::c_types::c_char,
+ param: *const crate::bindings::kernel_param,
+ ) -> crate::c_types::c_int {
+ let arg = if val.is_null() {
+ None
+ } else {
+ Some(unsafe { CStr::from_char_ptr(val).as_bytes() })
+ };
+ match Self::try_from_param_arg(arg) {
+ Some(new_value) => {
+ let old_value = unsafe { (*param).__bindgen_anon_1.arg as *mut Self };
+ let _ = unsafe { core::ptr::replace(old_value, new_value) };
+ 0
+ }
+ None => crate::error::Error::EINVAL.to_kernel_errno(),
+ }
+ }
+
+ /// Write a string representation of the current parameter value to `buf`.
+ ///
+ /// Used for displaying the current parameter value in `sysfs`.
+ ///
+ /// # Safety
+ ///
+ /// `buf` must be a buffer of length at least `kernel::PAGE_SIZE` that is
+ /// writeable. The `arg` field of `param` must be an instance of `Self`.
+ unsafe extern "C" fn get_param(
+ buf: *mut crate::c_types::c_char,
+ param: *const crate::bindings::kernel_param,
+ ) -> crate::c_types::c_int {
+ let slice = unsafe { core::slice::from_raw_parts_mut(buf as *mut u8, crate::PAGE_SIZE) };
+ let mut buf = crate::buffer::Buffer::new(slice);
+ match unsafe { write!(buf, "{}\0", *((*param).__bindgen_anon_1.arg as *mut Self)) } {
+ Err(_) => crate::error::Error::EINVAL.to_kernel_errno(),
+ Ok(()) => buf.bytes_written() as crate::c_types::c_int,
+ }
+ }
+
+ /// Drop the parameter.
+ ///
+ /// Called when unloading a module.
+ ///
+ /// # Safety
+ ///
+ /// The `arg` field of `param` must be an instance of `Self`.
+ unsafe extern "C" fn free(arg: *mut crate::c_types::c_void) {
+ unsafe { core::ptr::drop_in_place(arg as *mut Self) };
+ }
+}
+
+/// Trait for parsing integers.
+///
+/// Strings begining with `0x`, `0o`, or `0b` are parsed as hex, octal, or
+/// binary respectively. Strings beginning with `0` otherwise are parsed as
+/// octal. Anything else is parsed as decimal. A leading `+` or `-` is also
+/// permitted. Any string parsed by [`kstrtol()`] or [`kstrtoul()`] will be
+/// successfully parsed.
+///
+/// [`kstrtol()`]: https://www.kernel.org/doc/html/latest/core-api/kernel-api.html#c.kstrtol
+/// [`kstrtoul()`]: https://www.kernel.org/doc/html/latest/core-api/kernel-api.html#c.kstrtoul
+trait ParseInt: Sized {
+ fn from_str_radix(src: &str, radix: u32) -> Result<Self, core::num::ParseIntError>;
+ fn checked_neg(self) -> Option<Self>;
+
+ fn from_str_unsigned(src: &str) -> Result<Self, core::num::ParseIntError> {
+ let (radix, digits) = if let Some(n) = src.strip_prefix("0x") {
+ (16, n)
+ } else if let Some(n) = src.strip_prefix("0X") {
+ (16, n)
+ } else if let Some(n) = src.strip_prefix("0o") {
+ (8, n)
+ } else if let Some(n) = src.strip_prefix("0O") {
+ (8, n)
+ } else if let Some(n) = src.strip_prefix("0b") {
+ (2, n)
+ } else if let Some(n) = src.strip_prefix("0B") {
+ (2, n)
+ } else if src.starts_with('0') {
+ (8, src)
+ } else {
+ (10, src)
+ };
+ Self::from_str_radix(digits, radix)
+ }
+
+ fn from_str(src: &str) -> Option<Self> {
+ match src.bytes().next() {
+ None => None,
+ Some(b'-') => Self::from_str_unsigned(&src[1..]).ok()?.checked_neg(),
+ Some(b'+') => Some(Self::from_str_unsigned(&src[1..]).ok()?),
+ Some(_) => Some(Self::from_str_unsigned(src).ok()?),
+ }
+ }
+}
+
+macro_rules! impl_parse_int {
+ ($ty:ident) => {
+ impl ParseInt for $ty {
+ fn from_str_radix(src: &str, radix: u32) -> Result<Self, core::num::ParseIntError> {
+ $ty::from_str_radix(src, radix)
+ }
+
+ fn checked_neg(self) -> Option<Self> {
+ self.checked_neg()
+ }
+ }
+ };
+}
+
+impl_parse_int!(i8);
+impl_parse_int!(u8);
+impl_parse_int!(i16);
+impl_parse_int!(u16);
+impl_parse_int!(i32);
+impl_parse_int!(u32);
+impl_parse_int!(i64);
+impl_parse_int!(u64);
+impl_parse_int!(isize);
+impl_parse_int!(usize);
+
+macro_rules! impl_module_param {
+ ($ty:ident) => {
+ impl ModuleParam for $ty {
+ type Value = $ty;
+
+ const NOARG_ALLOWED: bool = false;
+
+ fn try_from_param_arg(arg: Option<&'static [u8]>) -> Option<Self> {
+ let bytes = arg?;
+ let utf8 = core::str::from_utf8(bytes).ok()?;
+ <$ty as crate::module_param::ParseInt>::from_str(utf8)
+ }
+
+ fn value(&self) -> &Self::Value {
+ self
+ }
+ }
+ };
+}
+
+#[doc(hidden)]
+#[macro_export]
+/// Generate a static [`kernel_param_ops`](../../../include/linux/moduleparam.h) struct.
+///
+/// # Example
+/// ```ignore
+/// make_param_ops!(
+/// /// Documentation for new param ops.
+/// PARAM_OPS_MYTYPE, // Name for the static.
+/// MyType // A type which implements [`ModuleParam`].
+/// );
+/// ```
+macro_rules! make_param_ops {
+ ($ops:ident, $ty:ty) => {
+ $crate::make_param_ops!(
+ #[doc=""]
+ $ops,
+ $ty
+ );
+ };
+ ($(#[$meta:meta])* $ops:ident, $ty:ty) => {
+ $(#[$meta])*
+ ///
+ /// Static [`kernel_param_ops`](../../../include/linux/moduleparam.h)
+ /// struct generated by [`make_param_ops`].
+ pub static $ops: $crate::bindings::kernel_param_ops = $crate::bindings::kernel_param_ops {
+ flags: if <$ty as $crate::module_param::ModuleParam>::NOARG_ALLOWED {
+ $crate::bindings::KERNEL_PARAM_OPS_FL_NOARG
+ } else {
+ 0
+ },
+ set: Some(<$ty as $crate::module_param::ModuleParam>::set_param),
+ get: Some(<$ty as $crate::module_param::ModuleParam>::get_param),
+ free: Some(<$ty as $crate::module_param::ModuleParam>::free),
+ };
+ };
+}
+
+impl_module_param!(i8);
+impl_module_param!(u8);
+impl_module_param!(i16);
+impl_module_param!(u16);
+impl_module_param!(i32);
+impl_module_param!(u32);
+impl_module_param!(i64);
+impl_module_param!(u64);
+impl_module_param!(isize);
+impl_module_param!(usize);
+
+make_param_ops!(
+ /// Rust implementation of [`kernel_param_ops`](../../../include/linux/moduleparam.h)
+ /// for [`i8`].
+ PARAM_OPS_I8,
+ i8
+);
+make_param_ops!(
+ /// Rust implementation of [`kernel_param_ops`](../../../include/linux/moduleparam.h)
+ /// for [`u8`].
+ PARAM_OPS_U8,
+ u8
+);
+make_param_ops!(
+ /// Rust implementation of [`kernel_param_ops`](../../../include/linux/moduleparam.h)
+ /// for [`i16`].
+ PARAM_OPS_I16,
+ i16
+);
+make_param_ops!(
+ /// Rust implementation of [`kernel_param_ops`](../../../include/linux/moduleparam.h)
+ /// for [`u16`].
+ PARAM_OPS_U16,
+ u16
+);
+make_param_ops!(
+ /// Rust implementation of [`kernel_param_ops`](../../../include/linux/moduleparam.h)
+ /// for [`i32`].
+ PARAM_OPS_I32,
+ i32
+);
+make_param_ops!(
+ /// Rust implementation of [`kernel_param_ops`](../../../include/linux/moduleparam.h)
+ /// for [`u32`].
+ PARAM_OPS_U32,
+ u32
+);
+make_param_ops!(
+ /// Rust implementation of [`kernel_param_ops`](../../../include/linux/moduleparam.h)
+ /// for [`i64`].
+ PARAM_OPS_I64,
+ i64
+);
+make_param_ops!(
+ /// Rust implementation of [`kernel_param_ops`](../../../include/linux/moduleparam.h)
+ /// for [`u64`].
+ PARAM_OPS_U64,
+ u64
+);
+make_param_ops!(
+ /// Rust implementation of [`kernel_param_ops`](../../../include/linux/moduleparam.h)
+ /// for [`isize`].
+ PARAM_OPS_ISIZE,
+ isize
+);
+make_param_ops!(
+ /// Rust implementation of [`kernel_param_ops`](../../../include/linux/moduleparam.h)
+ /// for [`usize`].
+ PARAM_OPS_USIZE,
+ usize
+);
+
+impl ModuleParam for bool {
+ type Value = bool;
+
+ const NOARG_ALLOWED: bool = true;
+
+ fn try_from_param_arg(arg: Option<&'static [u8]>) -> Option<Self> {
+ match arg {
+ None => Some(true),
+ Some(b"y") | Some(b"Y") | Some(b"1") | Some(b"true") => Some(true),
+ Some(b"n") | Some(b"N") | Some(b"0") | Some(b"false") => Some(false),
+ _ => None,
+ }
+ }
+
+ fn value(&self) -> &Self::Value {
+ self
+ }
+}
+
+make_param_ops!(
+ /// Rust implementation of [`kernel_param_ops`](../../../include/linux/moduleparam.h)
+ /// for [`bool`].
+ PARAM_OPS_BOOL,
+ bool
+);
+
+/// An array of at __most__ `N` values.
+///
+/// # Invariant
+///
+/// The first `self.used` elements of `self.values` are initialized.
+pub struct ArrayParam<T, const N: usize> {
+ values: [core::mem::MaybeUninit<T>; N],
+ used: usize,
+}
+
+impl<T, const N: usize> ArrayParam<T, { N }> {
+ fn values(&self) -> &[T] {
+ // SAFETY: The invariant maintained by `ArrayParam` allows us to cast
+ // the first `self.used` elements to `T`.
+ unsafe {
+ &*(&self.values[0..self.used] as *const [core::mem::MaybeUninit<T>] as *const [T])
+ }
+ }
+}
+
+impl<T: Copy, const N: usize> ArrayParam<T, { N }> {
+ const fn new() -> Self {
+ // INVARIANT: The first `self.used` elements of `self.values` are
+ // initialized.
+ ArrayParam {
+ values: [core::mem::MaybeUninit::uninit(); N],
+ used: 0,
+ }
+ }
+
+ const fn push(&mut self, val: T) {
+ if self.used < N {
+ // INVARIANT: The first `self.used` elements of `self.values` are
+ // initialized.
+ self.values[self.used] = core::mem::MaybeUninit::new(val);
+ self.used += 1;
+ }
+ }
+
+ /// Create an instance of `ArrayParam` initialized with `vals`.
+ ///
+ /// This function is only meant to be used in the [`module::module`] macro.
+ pub const fn create(vals: &[T]) -> Self {
+ let mut result = ArrayParam::new();
+ let mut i = 0;
+ while i < vals.len() {
+ result.push(vals[i]);
+ i += 1;
+ }
+ result
+ }
+}
+
+impl<T: core::fmt::Display, const N: usize> core::fmt::Display for ArrayParam<T, { N }> {
+ fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
+ for val in self.values() {
+ write!(f, "{},", val)?;
+ }
+ Ok(())
+ }
+}
+
+impl<T: Copy + core::fmt::Display + ModuleParam, const N: usize> ModuleParam
+ for ArrayParam<T, { N }>
+{
+ type Value = [T];
+
+ const NOARG_ALLOWED: bool = false;
+
+ fn try_from_param_arg(arg: Option<&'static [u8]>) -> Option<Self> {
+ arg.and_then(|args| {
+ let mut result = Self::new();
+ for arg in args.split(|b| *b == b',') {
+ result.push(T::try_from_param_arg(Some(arg))?);
+ }
+ Some(result)
+ })
+ }
+
+ fn value(&self) -> &Self::Value {
+ self.values()
+ }
+}
+
+/// A C-style string parameter.
+///
+/// The Rust version of the [`charp`] parameter. This type is meant to be
+/// used by the [`macros::module`] macro, not handled directly. Instead use the
+/// `read` method generated by that macro.
+///
+/// [`charp`]: ../../../include/linux/moduleparam.h
+pub enum StringParam {
+ /// A borrowed parameter value.
+ ///
+ /// Either the default value (which is static in the module) or borrowed
+ /// from the original argument buffer used to set the value.
+ Ref(&'static [u8]),
+
+ /// A value that was allocated when the parameter was set.
+ ///
+ /// The value needs to be freed when the parameter is reset or the module is
+ /// unloaded.
+ Owned(alloc::vec::Vec<u8>),
+}
+
+impl StringParam {
+ fn bytes(&self) -> &[u8] {
+ match self {
+ StringParam::Ref(bytes) => *bytes,
+ StringParam::Owned(vec) => &vec[..],
+ }
+ }
+}
+
+impl core::fmt::Display for StringParam {
+ fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
+ let bytes = self.bytes();
+ match core::str::from_utf8(bytes) {
+ Ok(utf8) => write!(f, "{}", utf8),
+ Err(_) => write!(f, "{:?}", bytes),
+ }
+ }
+}
+
+impl ModuleParam for StringParam {
+ type Value = [u8];
+
+ const NOARG_ALLOWED: bool = false;
+
+ fn try_from_param_arg(arg: Option<&'static [u8]>) -> Option<Self> {
+ // SAFETY: It is always safe to call [`slab_is_available`](../../../include/linux/slab.h).
+ let slab_available = unsafe { crate::bindings::slab_is_available() };
+ arg.and_then(|arg| {
+ if slab_available {
+ let mut vec = alloc::vec::Vec::new();
+ vec.try_extend_from_slice(arg).ok()?;
+ Some(StringParam::Owned(vec))
+ } else {
+ Some(StringParam::Ref(arg))
+ }
+ })
+ }
+
+ fn value(&self) -> &Self::Value {
+ self.bytes()
+ }
+}
+
+make_param_ops!(
+ /// Rust implementation of [`kernel_param_ops`](../../../include/linux/moduleparam.h)
+ /// for [`StringParam`].
+ PARAM_OPS_STR,
+ StringParam
+);
diff --git a/rust/kernel/of.rs b/rust/kernel/of.rs
new file mode 100644
index 000000000000..78aa5956f03f
--- /dev/null
+++ b/rust/kernel/of.rs
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Devicetree and Open Firmware abstractions.
+//!
+//! C header: [`include/linux/of_*.h`](../../../../include/linux/of_*.h)
+
+use crate::{bindings, c_types, str::CStr};
+
+use core::ops::Deref;
+use core::ptr;
+
+/// A kernel Open Firmware / devicetree match table.
+///
+/// Can only exist as an `&OfMatchTable` reference (akin to `&str` or
+/// `&Path` in Rust std).
+///
+/// # Invariants
+///
+/// The inner reference points to a sentinel-terminated C array.
+#[repr(transparent)]
+pub struct OfMatchTable(bindings::of_device_id);
+
+impl OfMatchTable {
+ /// Returns the table as a reference to a static lifetime, sentinel-terminated C array.
+ ///
+ /// This is suitable to be coerced into the kernel's `of_match_table` field.
+ pub fn as_ptr(&'static self) -> &'static bindings::of_device_id {
+ // The inner reference points to a sentinel-terminated C array, as per
+ // the type invariant.
+ &self.0
+ }
+}
+
+/// An Open Firmware Match Table that can be constructed at build time.
+///
+/// # Invariants
+///
+/// `sentinel` always contains zeroes.
+#[repr(C)]
+pub struct ConstOfMatchTable<const N: usize> {
+ table: [bindings::of_device_id; N],
+ sentinel: bindings::of_device_id,
+}
+
+impl<const N: usize> ConstOfMatchTable<N> {
+ /// Creates a new Open Firmware Match Table from a list of compatible strings.
+ pub const fn new_const(compatibles: [&'static CStr; N]) -> Self {
+ let mut table = [Self::zeroed_of_device_id(); N];
+ let mut i = 0;
+ while i < N {
+ table[i] = Self::new_of_device_id(compatibles[i]);
+ i += 1;
+ }
+ Self {
+ table,
+ // INVARIANTS: we zero the sentinel here, and never change it
+ // anywhere. Therefore it always contains zeroes.
+ sentinel: Self::zeroed_of_device_id(),
+ }
+ }
+
+ const fn zeroed_of_device_id() -> bindings::of_device_id {
+ bindings::of_device_id {
+ name: [0; 32],
+ type_: [0; 32],
+ compatible: [0; 128],
+ data: ptr::null(),
+ }
+ }
+
+ const fn new_of_device_id(compatible: &'static CStr) -> bindings::of_device_id {
+ let mut id = Self::zeroed_of_device_id();
+ let compatible = compatible.as_bytes_with_nul();
+ let mut i = 0;
+ while i < compatible.len() {
+ // If `compatible` does not fit in `id.compatible`, an
+ // "index out of bounds" build time error will be triggered.
+ id.compatible[i] = compatible[i] as c_types::c_char;
+ i += 1;
+ }
+ id
+ }
+}
+
+impl<const N: usize> Deref for ConstOfMatchTable<N> {
+ type Target = OfMatchTable;
+
+ fn deref(&self) -> &OfMatchTable {
+ // INVARIANTS: `head` points to a sentinel-terminated C array,
+ // as per the `ConstOfMatchTable` type invariant, therefore
+ // `&OfMatchTable`'s inner reference will point to a sentinel-terminated C array.
+ let head = &self.table[0] as *const bindings::of_device_id as *const OfMatchTable;
+
+ // SAFETY: The returned reference must remain valid for the lifetime of `self`.
+ // The raw pointer `head` points to memory inside `self`. So the reference created
+ // from this raw pointer has the same lifetime as `self`.
+ // Therefore this reference remains valid for the lifetime of `self`, and
+ // is safe to return.
+ unsafe { &*head }
+ }
+}
diff --git a/rust/kernel/pages.rs b/rust/kernel/pages.rs
new file mode 100644
index 000000000000..4f45bef09bca
--- /dev/null
+++ b/rust/kernel/pages.rs
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Kernel page allocation and management.
+//!
+//! TODO: This module is a work in progress.
+
+use crate::{
+ bindings, c_types, io_buffer::IoBufferReader, user_ptr::UserSlicePtrReader, Error, Result,
+ PAGE_SIZE,
+};
+use core::{marker::PhantomData, ptr};
+
+extern "C" {
+ #[allow(improper_ctypes)]
+ fn rust_helper_alloc_pages(
+ gfp_mask: bindings::gfp_t,
+ order: c_types::c_uint,
+ ) -> *mut bindings::page;
+
+ #[allow(improper_ctypes)]
+ fn rust_helper_kmap(page: *mut bindings::page) -> *mut c_types::c_void;
+
+ #[allow(improper_ctypes)]
+ fn rust_helper_kunmap(page: *mut bindings::page);
+}
+
+/// A set of physical pages.
+///
+/// `Pages` holds a reference to a set of pages of order `ORDER`. Having the order as a generic
+/// const allows the struct to have the same size as a pointer.
+///
+/// # Invariants
+///
+/// The pointer `Pages::pages` is valid and points to 2^ORDER pages.
+pub struct Pages<const ORDER: u32> {
+ pages: *mut bindings::page,
+}
+
+impl<const ORDER: u32> Pages<ORDER> {
+ /// Allocates a new set of contiguous pages.
+ pub fn new() -> Result<Self> {
+ // TODO: Consider whether we want to allow callers to specify flags.
+ // SAFETY: This only allocates pages. We check that it succeeds in the next statement.
+ let pages = unsafe {
+ rust_helper_alloc_pages(
+ bindings::GFP_KERNEL | bindings::__GFP_ZERO | bindings::__GFP_HIGHMEM,
+ ORDER,
+ )
+ };
+ if pages.is_null() {
+ return Err(Error::ENOMEM);
+ }
+ // INVARIANTS: We checked that the allocation above succeeded>
+ Ok(Self { pages })
+ }
+
+ /// Maps a single page at the given address in the given VM area.
+ ///
+ /// This is only meant to be used by pages of order 0.
+ pub fn insert_page(&self, vma: &mut bindings::vm_area_struct, address: usize) -> Result {
+ if ORDER != 0 {
+ return Err(Error::EINVAL);
+ }
+
+ // SAFETY: We check above that the allocation is of order 0. The range of `address` is
+ // already checked by `vm_insert_page`.
+ let ret = unsafe { bindings::vm_insert_page(vma, address as _, self.pages) };
+ if ret != 0 {
+ Err(Error::from_kernel_errno(ret))
+ } else {
+ Ok(())
+ }
+ }
+
+ /// Copies data from the given [`UserSlicePtrReader`] into the pages.
+ pub fn copy_into_page(
+ &self,
+ reader: &mut UserSlicePtrReader,
+ offset: usize,
+ len: usize,
+ ) -> Result {
+ // TODO: For now this only works on the first page.
+ let end = offset.checked_add(len).ok_or(Error::EINVAL)?;
+ if end > PAGE_SIZE {
+ return Err(Error::EINVAL);
+ }
+
+ let mapping = self.kmap(0).ok_or(Error::EINVAL)?;
+
+ // SAFETY: We ensured that the buffer was valid with the check above.
+ unsafe { reader.read_raw((mapping.ptr as usize + offset) as _, len) }?;
+ Ok(())
+ }
+
+ /// Maps the pages and reads from them into the given buffer.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that the destination buffer is valid for the given length.
+ /// Additionally, if the raw buffer is intended to be recast, they must ensure that the data
+ /// can be safely cast; [`crate::io_buffer::ReadableFromBytes`] has more details about it.
+ pub unsafe fn read(&self, dest: *mut u8, offset: usize, len: usize) -> Result {
+ // TODO: For now this only works on the first page.
+ let end = offset.checked_add(len).ok_or(Error::EINVAL)?;
+ if end > PAGE_SIZE {
+ return Err(Error::EINVAL);
+ }
+
+ let mapping = self.kmap(0).ok_or(Error::EINVAL)?;
+ unsafe { ptr::copy((mapping.ptr as *mut u8).add(offset), dest, len) };
+ Ok(())
+ }
+
+ /// Maps the pages and writes into them from the given bufer.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that the buffer is valid for the given length. Additionally, if the
+ /// page is (or will be) mapped by userspace, they must ensure that no kernel data is leaked
+ /// through padding if it was cast from another type; [`crate::io_buffer::WritableToBytes`] has
+ /// more details about it.
+ pub unsafe fn write(&self, src: *const u8, offset: usize, len: usize) -> Result {
+ // TODO: For now this only works on the first page.
+ let end = offset.checked_add(len).ok_or(Error::EINVAL)?;
+ if end > PAGE_SIZE {
+ return Err(Error::EINVAL);
+ }
+
+ let mapping = self.kmap(0).ok_or(Error::EINVAL)?;
+ unsafe { ptr::copy(src, (mapping.ptr as *mut u8).add(offset), len) };
+ Ok(())
+ }
+
+ /// Maps the page at index `index`.
+ fn kmap(&self, index: usize) -> Option<PageMapping<'_>> {
+ if index >= 1usize << ORDER {
+ return None;
+ }
+
+ // SAFETY: We checked above that `index` is within range.
+ let page = unsafe { self.pages.add(index) };
+
+ // SAFETY: `page` is valid based on the checks above.
+ let ptr = unsafe { rust_helper_kmap(page) };
+ if ptr.is_null() {
+ return None;
+ }
+
+ Some(PageMapping {
+ page,
+ ptr,
+ _phantom: PhantomData,
+ })
+ }
+}
+
+impl<const ORDER: u32> Drop for Pages<ORDER> {
+ fn drop(&mut self) {
+ // SAFETY: By the type invariants, we know the pages are allocated with the given order.
+ unsafe { bindings::__free_pages(self.pages, ORDER) };
+ }
+}
+
+struct PageMapping<'a> {
+ page: *mut bindings::page,
+ ptr: *mut c_types::c_void,
+ _phantom: PhantomData<&'a i32>,
+}
+
+impl Drop for PageMapping<'_> {
+ fn drop(&mut self) {
+ // SAFETY: An instance of `PageMapping` is created only when `kmap` succeeded for the given
+ // page, so it is safe to unmap it here.
+ unsafe { rust_helper_kunmap(self.page) };
+ }
+}
diff --git a/rust/kernel/platdev.rs b/rust/kernel/platdev.rs
new file mode 100644
index 000000000000..5f306b61321e
--- /dev/null
+++ b/rust/kernel/platdev.rs
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Platform devices.
+//!
+//! Also called `platdev`, `pdev`.
+//!
+//! C header: [`include/linux/platform_device.h`](../../../../include/linux/platform_device.h)
+
+use crate::{
+ bindings, c_types,
+ error::{Error, Result},
+ from_kernel_result,
+ of::OfMatchTable,
+ str::CStr,
+ types::PointerWrapper,
+};
+use alloc::boxed::Box;
+use core::{marker::PhantomPinned, pin::Pin};
+
+/// A registration of a platform device.
+#[derive(Default)]
+pub struct Registration {
+ registered: bool,
+ pdrv: bindings::platform_driver,
+ _pin: PhantomPinned,
+}
+
+// SAFETY: `Registration` does not expose any of its state across threads
+// (it is fine for multiple threads to have a shared reference to it).
+unsafe impl Sync for Registration {}
+
+extern "C" {
+ #[allow(improper_ctypes)]
+ fn rust_helper_platform_get_drvdata(
+ pdev: *const bindings::platform_device,
+ ) -> *mut c_types::c_void;
+
+ #[allow(improper_ctypes)]
+ fn rust_helper_platform_set_drvdata(
+ pdev: *mut bindings::platform_device,
+ data: *mut c_types::c_void,
+ );
+}
+
+extern "C" fn probe_callback<P: PlatformDriver>(
+ pdev: *mut bindings::platform_device,
+) -> c_types::c_int {
+ from_kernel_result! {
+ // SAFETY: `pdev` is guaranteed to be a valid, non-null pointer.
+ let device_id = unsafe { (*pdev).id };
+ let drv_data = P::probe(device_id)?;
+ let drv_data = drv_data.into_pointer() as *mut c_types::c_void;
+ // SAFETY: `pdev` is guaranteed to be a valid, non-null pointer.
+ unsafe {
+ rust_helper_platform_set_drvdata(pdev, drv_data);
+ }
+ Ok(0)
+ }
+}
+
+extern "C" fn remove_callback<P: PlatformDriver>(
+ pdev: *mut bindings::platform_device,
+) -> c_types::c_int {
+ from_kernel_result! {
+ // SAFETY: `pdev` is guaranteed to be a valid, non-null pointer.
+ let device_id = unsafe { (*pdev).id };
+ // SAFETY: `pdev` is guaranteed to be a valid, non-null pointer.
+ let ptr = unsafe { rust_helper_platform_get_drvdata(pdev) };
+ // SAFETY:
+ // - we allocated this pointer using `P::DrvData::into_pointer`,
+ // so it is safe to turn back into a `P::DrvData`.
+ // - the allocation happened in `probe`, no-one freed the memory,
+ // `remove` is the canonical kernel location to free driver data. so OK
+ // to convert the pointer back to a Rust structure here.
+ let drv_data = unsafe { P::DrvData::from_pointer(ptr) };
+ P::remove(device_id, drv_data)?;
+ Ok(0)
+ }
+}
+
+impl Registration {
+ fn register<P: PlatformDriver>(
+ self: Pin<&mut Self>,
+ name: &'static CStr,
+ of_match_table: Option<&'static OfMatchTable>,
+ module: &'static crate::ThisModule,
+ ) -> Result {
+ // SAFETY: We must ensure that we never move out of `this`.
+ let this = unsafe { self.get_unchecked_mut() };
+ if this.registered {
+ // Already registered.
+ return Err(Error::EINVAL);
+ }
+ this.pdrv.driver.name = name.as_char_ptr();
+ if let Some(tbl) = of_match_table {
+ this.pdrv.driver.of_match_table = tbl.as_ptr();
+ }
+ this.pdrv.probe = Some(probe_callback::<P>);
+ this.pdrv.remove = Some(remove_callback::<P>);
+ // SAFETY:
+ // - `this.pdrv` lives at least until the call to `platform_driver_unregister()` returns.
+ // - `name` pointer has static lifetime.
+ // - `module.0` lives at least as long as the module.
+ // - `probe()` and `remove()` are static functions.
+ // - `of_match_table` is either a raw pointer with static lifetime,
+ // as guaranteed by the [`of::OfMatchTable::as_ptr()`] return type,
+ // or null.
+ let ret = unsafe { bindings::__platform_driver_register(&mut this.pdrv, module.0) };
+ if ret < 0 {
+ return Err(Error::from_kernel_errno(ret));
+ }
+ this.registered = true;
+ Ok(())
+ }
+
+ /// Registers a platform device.
+ ///
+ /// Returns a pinned heap-allocated representation of the registration.
+ pub fn new_pinned<P: PlatformDriver>(
+ name: &'static CStr,
+ of_match_tbl: Option<&'static OfMatchTable>,
+ module: &'static crate::ThisModule,
+ ) -> Result<Pin<Box<Self>>> {
+ let mut r = Pin::from(Box::try_new(Self::default())?);
+ r.as_mut().register::<P>(name, of_match_tbl, module)?;
+ Ok(r)
+ }
+}
+
+impl Drop for Registration {
+ fn drop(&mut self) {
+ if self.registered {
+ // SAFETY: if `registered` is true, then `self.pdev` was registered
+ // previously, which means `platform_driver_unregister` is always
+ // safe to call.
+ unsafe { bindings::platform_driver_unregister(&mut self.pdrv) }
+ }
+ }
+}
+
+/// Trait for implementers of platform drivers.
+///
+/// Implement this trait whenever you create a platform driver.
+pub trait PlatformDriver {
+ /// Device driver data.
+ ///
+ /// Corresponds to the data set or retrieved via the kernel's
+ /// `platform_{set,get}_drvdata()` functions.
+ ///
+ /// Require that `DrvData` implements `PointerWrapper`. We guarantee to
+ /// never move the underlying wrapped data structure. This allows
+ /// driver writers to use pinned or self-referential data structures.
+ type DrvData: PointerWrapper;
+
+ /// Platform driver probe.
+ ///
+ /// Called when a new platform device is added or discovered.
+ /// Implementers should attempt to initialize the device here.
+ fn probe(device_id: i32) -> Result<Self::DrvData>;
+
+ /// Platform driver remove.
+ ///
+ /// Called when a platform device is removed.
+ /// Implementers should prepare the device for complete removal here.
+ fn remove(device_id: i32, drv_data: Self::DrvData) -> Result;
+}
diff --git a/rust/kernel/prelude.rs b/rust/kernel/prelude.rs
new file mode 100644
index 000000000000..c0bf618099ad
--- /dev/null
+++ b/rust/kernel/prelude.rs
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! The `kernel` prelude.
+//!
+//! These are the most common items used by Rust code in the kernel,
+//! intended to be imported by all Rust code, for convenience.
+//!
+//! # Examples
+//!
+//! ```
+//! use kernel::prelude::*;
+//! ```
+
+pub use core::pin::Pin;
+
+pub use alloc::{boxed::Box, string::String, sync::Arc, vec::Vec};
+
+pub use macros::{module, module_misc_device};
+
+pub use super::build_assert;
+
+pub use super::{pr_alert, pr_crit, pr_emerg, pr_err, pr_info, pr_notice, pr_warn};
+
+pub use super::static_assert;
+
+pub use super::{Error, KernelModule, Result};
+
+pub use crate::traits::TryPin;
diff --git a/rust/kernel/print.rs b/rust/kernel/print.rs
new file mode 100644
index 000000000000..b7384f0bacc3
--- /dev/null
+++ b/rust/kernel/print.rs
@@ -0,0 +1,412 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Printing facilities.
+//!
+//! C header: [`include/linux/printk.h`](../../../../include/linux/printk.h)
+//!
+//! Reference: <https://www.kernel.org/doc/html/latest/core-api/printk-basics.html>
+
+use core::cmp;
+use core::fmt;
+
+use crate::bindings;
+use crate::c_types::{c_char, c_void};
+
+// Called from `vsprintf` with format specifier `%pA`.
+#[no_mangle]
+unsafe fn rust_fmt_argument(buf: *mut c_char, end: *mut c_char, ptr: *const c_void) -> *mut c_char {
+ use fmt::Write;
+
+ // Use `usize` to use `saturating_*` functions.
+ struct Writer {
+ buf: usize,
+ end: usize,
+ }
+
+ impl Write for Writer {
+ fn write_str(&mut self, s: &str) -> fmt::Result {
+ // `buf` value after writing `len` bytes. This does not have to be bounded
+ // by `end`, but we don't want it to wrap around to 0.
+ let buf_new = self.buf.saturating_add(s.len());
+
+ // Amount that we can copy. `saturating_sub` ensures we get 0 if
+ // `buf` goes past `end`.
+ let len_to_copy = cmp::min(buf_new, self.end).saturating_sub(self.buf);
+
+ // SAFETY: In any case, `buf` is non-null and properly aligned.
+ // If `len_to_copy` is non-zero, then we know `buf` has not past
+ // `end` yet and so is valid.
+ unsafe {
+ core::ptr::copy_nonoverlapping(
+ s.as_bytes().as_ptr(),
+ self.buf as *mut u8,
+ len_to_copy,
+ )
+ };
+
+ self.buf = buf_new;
+ Ok(())
+ }
+ }
+
+ let mut w = Writer {
+ buf: buf as _,
+ end: end as _,
+ };
+ let _ = w.write_fmt(unsafe { *(ptr as *const fmt::Arguments<'_>) });
+ w.buf as _
+}
+
+/// Format strings.
+///
+/// Public but hidden since it should only be used from public macros.
+#[doc(hidden)]
+pub mod format_strings {
+ use crate::bindings;
+
+ /// The length we copy from the `KERN_*` kernel prefixes.
+ const LENGTH_PREFIX: usize = 2;
+
+ /// The length of the fixed format strings.
+ pub const LENGTH: usize = 10;
+
+ /// Generates a fixed format string for the kernel's [`printk`].
+ ///
+ /// The format string is always the same for a given level, i.e. for a
+ /// given `prefix`, which are the kernel's `KERN_*` constants.
+ ///
+ /// [`printk`]: ../../../../include/linux/printk.h
+ const fn generate(is_cont: bool, prefix: &[u8; 3]) -> [u8; LENGTH] {
+ // Ensure the `KERN_*` macros are what we expect.
+ assert!(prefix[0] == b'\x01');
+ if is_cont {
+ assert!(prefix[1] == b'c');
+ } else {
+ assert!(prefix[1] >= b'0' && prefix[1] <= b'7');
+ }
+ assert!(prefix[2] == b'\x00');
+
+ let suffix: &[u8; LENGTH - LENGTH_PREFIX] = if is_cont {
+ b"%pA\0\0\0\0\0"
+ } else {
+ b"%s: %pA\0"
+ };
+
+ [
+ prefix[0], prefix[1], suffix[0], suffix[1], suffix[2], suffix[3], suffix[4], suffix[5],
+ suffix[6], suffix[7],
+ ]
+ }
+
+ // Generate the format strings at compile-time.
+ //
+ // This avoids the compiler generating the contents on the fly in the stack.
+ //
+ // Furthermore, `static` instead of `const` is used to share the strings
+ // for all the kernel.
+ pub static EMERG: [u8; LENGTH] = generate(false, bindings::KERN_EMERG);
+ pub static ALERT: [u8; LENGTH] = generate(false, bindings::KERN_ALERT);
+ pub static CRIT: [u8; LENGTH] = generate(false, bindings::KERN_CRIT);
+ pub static ERR: [u8; LENGTH] = generate(false, bindings::KERN_ERR);
+ pub static WARNING: [u8; LENGTH] = generate(false, bindings::KERN_WARNING);
+ pub static NOTICE: [u8; LENGTH] = generate(false, bindings::KERN_NOTICE);
+ pub static INFO: [u8; LENGTH] = generate(false, bindings::KERN_INFO);
+ pub static DEBUG: [u8; LENGTH] = generate(false, bindings::KERN_DEBUG);
+ pub static CONT: [u8; LENGTH] = generate(true, bindings::KERN_CONT);
+}
+
+/// Prints a message via the kernel's [`printk`].
+///
+/// Public but hidden since it should only be used from public macros.
+///
+/// # Safety
+///
+/// The format string must be one of the ones in [`format_strings`], and
+/// the module name must be null-terminated.
+///
+/// [`printk`]: ../../../../include/linux/printk.h
+#[doc(hidden)]
+pub unsafe fn call_printk(
+ format_string: &[u8; format_strings::LENGTH],
+ module_name: &[u8],
+ args: fmt::Arguments<'_>,
+) {
+ // `printk` does not seem to fail in any path.
+ unsafe {
+ bindings::printk(
+ format_string.as_ptr() as _,
+ module_name.as_ptr(),
+ &args as *const _ as *const c_void,
+ );
+ }
+}
+
+/// Prints a message via the kernel's [`printk`] for the `CONT` level.
+///
+/// Public but hidden since it should only be used from public macros.
+///
+/// [`printk`]: ../../../../include/linux/printk.h
+#[doc(hidden)]
+pub fn call_printk_cont(args: fmt::Arguments<'_>) {
+ // `printk` does not seem to fail in any path.
+ //
+ // SAFETY: The format string is fixed.
+ unsafe {
+ bindings::printk(
+ format_strings::CONT.as_ptr() as _,
+ &args as *const _ as *const c_void,
+ );
+ }
+}
+
+/// Performs formatting and forwards the string to [`call_printk`].
+///
+/// Public but hidden since it should only be used from public macros.
+#[doc(hidden)]
+#[cfg(not(testlib))]
+#[macro_export]
+macro_rules! print_macro (
+ // The non-continuation cases (most of them, e.g. `INFO`).
+ ($format_string:path, false, $($arg:tt)+) => (
+ // SAFETY: This hidden macro should only be called by the documented
+ // printing macros which ensure the format string is one of the fixed
+ // ones. All `__LOG_PREFIX`s are null-terminated as they are generated
+ // by the `module!` proc macro or fixed values defined in a kernel
+ // crate.
+ unsafe {
+ $crate::print::call_printk(
+ &$format_string,
+ crate::__LOG_PREFIX,
+ format_args!($($arg)+),
+ );
+ }
+ );
+
+ // The `CONT` case.
+ ($format_string:path, true, $($arg:tt)+) => (
+ $crate::print::call_printk_cont(
+ format_args!($($arg)+),
+ );
+ );
+);
+
+/// Stub for doctests
+#[cfg(testlib)]
+#[macro_export]
+macro_rules! print_macro (
+ ($format_string:path, $e:expr, $($arg:tt)+) => (
+ ()
+ );
+);
+
+// We could use a macro to generate these macros. However, doing so ends
+// up being a bit ugly: it requires the dollar token trick to escape `$` as
+// well as playing with the `doc` attribute. Furthermore, they cannot be easily
+// imported in the prelude due to [1]. So, for the moment, we just write them
+// manually, like in the C side; while keeping most of the logic in another
+// macro, i.e. [`print_macro`].
+//
+// [1]: https://github.com/rust-lang/rust/issues/52234
+
+/// Prints an emergency-level message (level 0).
+///
+/// Use this level if the system is unusable.
+///
+/// Equivalent to the kernel's [`pr_emerg`] macro.
+///
+/// Mimics the interface of [`std::print!`]. See [`core::fmt`] and
+/// [`alloc::format!`] for information about the formatting syntax.
+///
+/// [`pr_emerg`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_emerg
+/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::prelude::*;
+/// pr_emerg!("hello {}\n", "there");
+/// ```
+#[macro_export]
+macro_rules! pr_emerg (
+ ($($arg:tt)*) => (
+ $crate::print_macro!($crate::print::format_strings::EMERG, false, $($arg)*)
+ )
+);
+
+/// Prints an alert-level message (level 1).
+///
+/// Use this level if action must be taken immediately.
+///
+/// Equivalent to the kernel's [`pr_alert`] macro.
+///
+/// Mimics the interface of [`std::print!`]. See [`core::fmt`] and
+/// [`alloc::format!`] for information about the formatting syntax.
+///
+/// [`pr_alert`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_alert
+/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::prelude::*;
+/// pr_alert!("hello {}\n", "there");
+/// ```
+#[macro_export]
+macro_rules! pr_alert (
+ ($($arg:tt)*) => (
+ $crate::print_macro!($crate::print::format_strings::ALERT, false, $($arg)*)
+ )
+);
+
+/// Prints a critical-level message (level 2).
+///
+/// Use this level for critical conditions.
+///
+/// Equivalent to the kernel's [`pr_crit`] macro.
+///
+/// Mimics the interface of [`std::print!`]. See [`core::fmt`] and
+/// [`alloc::format!`] for information about the formatting syntax.
+///
+/// [`pr_crit`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_crit
+/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::prelude::*;
+/// pr_crit!("hello {}\n", "there");
+/// ```
+#[macro_export]
+macro_rules! pr_crit (
+ ($($arg:tt)*) => (
+ $crate::print_macro!($crate::print::format_strings::CRIT, false, $($arg)*)
+ )
+);
+
+/// Prints an error-level message (level 3).
+///
+/// Use this level for error conditions.
+///
+/// Equivalent to the kernel's [`pr_err`] macro.
+///
+/// Mimics the interface of [`std::print!`]. See [`core::fmt`] and
+/// [`alloc::format!`] for information about the formatting syntax.
+///
+/// [`pr_err`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_err
+/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::prelude::*;
+/// pr_err!("hello {}\n", "there");
+/// ```
+#[macro_export]
+macro_rules! pr_err (
+ ($($arg:tt)*) => (
+ $crate::print_macro!($crate::print::format_strings::ERR, false, $($arg)*)
+ )
+);
+
+/// Prints a warning-level message (level 4).
+///
+/// Use this level for warning conditions.
+///
+/// Equivalent to the kernel's [`pr_warn`] macro.
+///
+/// Mimics the interface of [`std::print!`]. See [`core::fmt`] and
+/// [`alloc::format!`] for information about the formatting syntax.
+///
+/// [`pr_warn`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_warn
+/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::prelude::*;
+/// pr_warn!("hello {}\n", "there");
+/// ```
+#[macro_export]
+macro_rules! pr_warn (
+ ($($arg:tt)*) => (
+ $crate::print_macro!($crate::print::format_strings::WARNING, false, $($arg)*)
+ )
+);
+
+/// Prints a notice-level message (level 5).
+///
+/// Use this level for normal but significant conditions.
+///
+/// Equivalent to the kernel's [`pr_notice`] macro.
+///
+/// Mimics the interface of [`std::print!`]. See [`core::fmt`] and
+/// [`alloc::format!`] for information about the formatting syntax.
+///
+/// [`pr_notice`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_notice
+/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::prelude::*;
+/// pr_notice!("hello {}\n", "there");
+/// ```
+#[macro_export]
+macro_rules! pr_notice (
+ ($($arg:tt)*) => (
+ $crate::print_macro!($crate::print::format_strings::NOTICE, false, $($arg)*)
+ )
+);
+
+/// Prints an info-level message (level 6).
+///
+/// Use this level for informational messages.
+///
+/// Equivalent to the kernel's [`pr_info`] macro.
+///
+/// Mimics the interface of [`std::print!`]. See [`core::fmt`] and
+/// [`alloc::format!`] for information about the formatting syntax.
+///
+/// [`pr_info`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_info
+/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::prelude::*;
+/// pr_info!("hello {}\n", "there");
+/// ```
+#[macro_export]
+#[doc(alias = "print")]
+macro_rules! pr_info (
+ ($($arg:tt)*) => (
+ $crate::print_macro!($crate::print::format_strings::INFO, false, $($arg)*)
+ )
+);
+
+/// Continues a previous log message in the same line.
+///
+/// Use only when continuing a previous `pr_*!` macro (e.g. [`pr_info!`]).
+///
+/// Equivalent to the kernel's [`pr_cont`] macro.
+///
+/// Mimics the interface of [`std::print!`]. See [`core::fmt`] and
+/// [`alloc::format!`] for information about the formatting syntax.
+///
+/// [`pr_cont`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_cont
+/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::prelude::*;
+/// # use kernel::pr_cont;
+/// pr_info!("hello");
+/// pr_cont!(" {}\n", "there");
+/// ```
+#[macro_export]
+macro_rules! pr_cont (
+ ($($arg:tt)*) => (
+ $crate::print_macro!($crate::print::format_strings::CONT, true, $($arg)*)
+ )
+);
diff --git a/rust/kernel/random.rs b/rust/kernel/random.rs
new file mode 100644
index 000000000000..723a89829f66
--- /dev/null
+++ b/rust/kernel/random.rs
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Random numbers.
+//!
+//! C header: [`include/linux/random.h`](../../../../include/linux/random.h)
+
+use core::convert::TryInto;
+
+use crate::{bindings, c_types, error};
+
+/// Fills a byte slice with random bytes generated from the kernel's CSPRNG.
+///
+/// Ensures that the CSPRNG has been seeded before generating any random bytes,
+/// and will block until it is ready.
+pub fn getrandom(dest: &mut [u8]) -> error::Result {
+ let res = unsafe { bindings::wait_for_random_bytes() };
+ if res != 0 {
+ return Err(error::Error::from_kernel_errno(res));
+ }
+
+ unsafe {
+ bindings::get_random_bytes(
+ dest.as_mut_ptr() as *mut c_types::c_void,
+ dest.len().try_into()?,
+ );
+ }
+ Ok(())
+}
+
+/// Fills a byte slice with random bytes generated from the kernel's CSPRNG.
+///
+/// If the CSPRNG is not yet seeded, returns an `Err(EAGAIN)` immediately.
+pub fn getrandom_nonblock(dest: &mut [u8]) -> error::Result {
+ if !unsafe { bindings::rng_is_initialized() } {
+ return Err(error::Error::EAGAIN);
+ }
+ getrandom(dest)
+}
+
+/// Contributes the contents of a byte slice to the kernel's entropy pool.
+///
+/// Does *not* credit the kernel entropy counter though.
+pub fn add_randomness(data: &[u8]) {
+ unsafe {
+ bindings::add_device_randomness(
+ data.as_ptr() as *const c_types::c_void,
+ data.len().try_into().unwrap(),
+ );
+ }
+}
diff --git a/rust/kernel/raw_list.rs b/rust/kernel/raw_list.rs
new file mode 100644
index 000000000000..4bc4f4a24ad5
--- /dev/null
+++ b/rust/kernel/raw_list.rs
@@ -0,0 +1,361 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Raw lists.
+//!
+//! TODO: This module is a work in progress.
+
+use core::{
+ cell::UnsafeCell,
+ ptr,
+ ptr::NonNull,
+ sync::atomic::{AtomicBool, Ordering},
+};
+
+/// A descriptor of list elements.
+///
+/// It describes the type of list elements and provides a function to determine how to get the
+/// links to be used on a list.
+///
+/// A type that may be in multiple lists simultaneously neneds to implement one of these for each
+/// simultaneous list.
+pub trait GetLinks {
+ /// The type of the entries in the list.
+ type EntryType: ?Sized;
+
+ /// Returns the links to be used when linking an entry within a list.
+ fn get_links(data: &Self::EntryType) -> &Links<Self::EntryType>;
+}
+
+/// The links used to link an object on a linked list.
+///
+/// Instances of this type are usually embedded in structures and returned in calls to
+/// [`GetLinks::get_links`].
+pub struct Links<T: ?Sized> {
+ inserted: AtomicBool,
+ entry: UnsafeCell<ListEntry<T>>,
+}
+
+impl<T: ?Sized> Links<T> {
+ /// Constructs a new [`Links`] instance that isn't inserted on any lists yet.
+ pub fn new() -> Self {
+ Self {
+ inserted: AtomicBool::new(false),
+ entry: UnsafeCell::new(ListEntry::new()),
+ }
+ }
+
+ fn acquire_for_insertion(&self) -> bool {
+ self.inserted
+ .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
+ .is_ok()
+ }
+
+ fn release_after_removal(&self) {
+ self.inserted.store(false, Ordering::Release);
+ }
+}
+
+impl<T: ?Sized> Default for Links<T> {
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+struct ListEntry<T: ?Sized> {
+ next: Option<NonNull<T>>,
+ prev: Option<NonNull<T>>,
+}
+
+impl<T: ?Sized> ListEntry<T> {
+ fn new() -> Self {
+ Self {
+ next: None,
+ prev: None,
+ }
+ }
+}
+
+/// A linked list.
+///
+/// # Invariants
+///
+/// The links of objects added to a list are owned by the list.
+pub(crate) struct RawList<G: GetLinks> {
+ head: Option<NonNull<G::EntryType>>,
+}
+
+impl<G: GetLinks> RawList<G> {
+ pub(crate) fn new() -> Self {
+ Self { head: None }
+ }
+
+ pub(crate) fn is_empty(&self) -> bool {
+ self.head.is_none()
+ }
+
+ fn insert_after_priv(
+ &mut self,
+ existing: &G::EntryType,
+ new_entry: &mut ListEntry<G::EntryType>,
+ new_ptr: Option<NonNull<G::EntryType>>,
+ ) {
+ {
+ // SAFETY: It's safe to get the previous entry of `existing` because the list cannot
+ // change.
+ let existing_links = unsafe { &mut *G::get_links(existing).entry.get() };
+ new_entry.next = existing_links.next;
+ existing_links.next = new_ptr;
+ }
+
+ new_entry.prev = Some(NonNull::from(existing));
+
+ // SAFETY: It's safe to get the next entry of `existing` because the list cannot change.
+ let next_links =
+ unsafe { &mut *G::get_links(new_entry.next.unwrap().as_ref()).entry.get() };
+ next_links.prev = new_ptr;
+ }
+
+ /// Inserts the given object after `existing`.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that `existing` points to a valid entry that is on the list.
+ pub(crate) unsafe fn insert_after(
+ &mut self,
+ existing: &G::EntryType,
+ new: &G::EntryType,
+ ) -> bool {
+ let links = G::get_links(new);
+ if !links.acquire_for_insertion() {
+ // Nothing to do if already inserted.
+ return false;
+ }
+
+ // SAFETY: The links are now owned by the list, so it is safe to get a mutable reference.
+ let new_entry = unsafe { &mut *links.entry.get() };
+ self.insert_after_priv(existing, new_entry, Some(NonNull::from(new)));
+ true
+ }
+
+ fn push_back_internal(&mut self, new: &G::EntryType) -> bool {
+ let links = G::get_links(new);
+ if !links.acquire_for_insertion() {
+ // Nothing to do if already inserted.
+ return false;
+ }
+
+ // SAFETY: The links are now owned by the list, so it is safe to get a mutable reference.
+ let new_entry = unsafe { &mut *links.entry.get() };
+ let new_ptr = Some(NonNull::from(new));
+ match self.back() {
+ // SAFETY: `back` is valid as the list cannot change.
+ Some(back) => self.insert_after_priv(unsafe { back.as_ref() }, new_entry, new_ptr),
+ None => {
+ self.head = new_ptr;
+ new_entry.next = new_ptr;
+ new_entry.prev = new_ptr;
+ }
+ }
+ true
+ }
+
+ pub(crate) unsafe fn push_back(&mut self, new: &G::EntryType) -> bool {
+ self.push_back_internal(new)
+ }
+
+ fn remove_internal(&mut self, data: &G::EntryType) -> bool {
+ let links = G::get_links(data);
+
+ // SAFETY: The links are now owned by the list, so it is safe to get a mutable reference.
+ let entry = unsafe { &mut *links.entry.get() };
+ let next = if let Some(next) = entry.next {
+ next
+ } else {
+ // Nothing to do if the entry is not on the list.
+ return false;
+ };
+
+ if ptr::eq(data, next.as_ptr()) {
+ // We're removing the only element.
+ self.head = None
+ } else {
+ // Update the head if we're removing it.
+ if let Some(raw_head) = self.head {
+ if ptr::eq(data, raw_head.as_ptr()) {
+ self.head = Some(next);
+ }
+ }
+
+ // SAFETY: It's safe to get the previous entry because the list cannot change.
+ unsafe { &mut *G::get_links(entry.prev.unwrap().as_ref()).entry.get() }.next =
+ entry.next;
+
+ // SAFETY: It's safe to get the next entry because the list cannot change.
+ unsafe { &mut *G::get_links(next.as_ref()).entry.get() }.prev = entry.prev;
+ }
+
+ // Reset the links of the element we're removing so that we know it's not on any list.
+ entry.next = None;
+ entry.prev = None;
+ links.release_after_removal();
+ true
+ }
+
+ /// Removes the given entry.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that `data` is either on this list or in no list. It being on another
+ /// list leads to memory unsafety.
+ pub(crate) unsafe fn remove(&mut self, data: &G::EntryType) -> bool {
+ self.remove_internal(data)
+ }
+
+ fn pop_front_internal(&mut self) -> Option<NonNull<G::EntryType>> {
+ let head = self.head?;
+ // SAFETY: The head is on the list as we just got it from there and it cannot change.
+ unsafe { self.remove(head.as_ref()) };
+ Some(head)
+ }
+
+ pub(crate) fn pop_front(&mut self) -> Option<NonNull<G::EntryType>> {
+ self.pop_front_internal()
+ }
+
+ pub(crate) fn front(&self) -> Option<NonNull<G::EntryType>> {
+ self.head
+ }
+
+ pub(crate) fn back(&self) -> Option<NonNull<G::EntryType>> {
+ // SAFETY: The links of head are owned by the list, so it is safe to get a reference.
+ unsafe { &*G::get_links(self.head?.as_ref()).entry.get() }.prev
+ }
+
+ pub(crate) fn cursor_front(&self) -> Cursor<'_, G> {
+ Cursor::new(self, self.front())
+ }
+
+ pub(crate) fn cursor_front_mut(&mut self) -> CursorMut<'_, G> {
+ CursorMut::new(self, self.front())
+ }
+}
+
+struct CommonCursor<G: GetLinks> {
+ cur: Option<NonNull<G::EntryType>>,
+}
+
+impl<G: GetLinks> CommonCursor<G> {
+ fn new(cur: Option<NonNull<G::EntryType>>) -> Self {
+ Self { cur }
+ }
+
+ fn move_next(&mut self, list: &RawList<G>) {
+ match self.cur.take() {
+ None => self.cur = list.head,
+ Some(cur) => {
+ if let Some(head) = list.head {
+ // SAFETY: We have a shared ref to the linked list, so the links can't change.
+ let links = unsafe { &*G::get_links(cur.as_ref()).entry.get() };
+ if links.next.unwrap() != head {
+ self.cur = links.next;
+ }
+ }
+ }
+ }
+ }
+
+ fn move_prev(&mut self, list: &RawList<G>) {
+ match list.head {
+ None => self.cur = None,
+ Some(head) => {
+ let next = match self.cur.take() {
+ None => head,
+ Some(cur) => {
+ if cur == head {
+ return;
+ }
+ cur
+ }
+ };
+ // SAFETY: There's a shared ref to the list, so the links can't change.
+ let links = unsafe { &*G::get_links(next.as_ref()).entry.get() };
+ self.cur = links.prev;
+ }
+ }
+ }
+}
+
+/// A list cursor that allows traversing a linked list and inspecting elements.
+pub struct Cursor<'a, G: GetLinks> {
+ cursor: CommonCursor<G>,
+ list: &'a RawList<G>,
+}
+
+impl<'a, G: GetLinks> Cursor<'a, G> {
+ fn new(list: &'a RawList<G>, cur: Option<NonNull<G::EntryType>>) -> Self {
+ Self {
+ list,
+ cursor: CommonCursor::new(cur),
+ }
+ }
+
+ /// Returns the element the cursor is currently positioned on.
+ pub fn current(&self) -> Option<&'a G::EntryType> {
+ let cur = self.cursor.cur?;
+ // SAFETY: Objects must be kept alive while on the list.
+ Some(unsafe { &*cur.as_ptr() })
+ }
+
+ /// Moves the cursor to the next element.
+ pub fn move_next(&mut self) {
+ self.cursor.move_next(self.list);
+ }
+}
+
+pub(crate) struct CursorMut<'a, G: GetLinks> {
+ cursor: CommonCursor<G>,
+ list: &'a mut RawList<G>,
+}
+
+impl<'a, G: GetLinks> CursorMut<'a, G> {
+ fn new(list: &'a mut RawList<G>, cur: Option<NonNull<G::EntryType>>) -> Self {
+ Self {
+ list,
+ cursor: CommonCursor::new(cur),
+ }
+ }
+
+ pub(crate) fn current(&mut self) -> Option<&mut G::EntryType> {
+ let cur = self.cursor.cur?;
+ // SAFETY: Objects must be kept alive while on the list.
+ Some(unsafe { &mut *cur.as_ptr() })
+ }
+
+ /// Removes the entry the cursor is pointing to and advances the cursor to the next entry. It
+ /// returns a raw pointer to the removed element (if one is removed).
+ pub(crate) fn remove_current(&mut self) -> Option<NonNull<G::EntryType>> {
+ let entry = self.cursor.cur?;
+ self.cursor.move_next(self.list);
+ // SAFETY: The entry is on the list as we just got it from there and it cannot change.
+ unsafe { self.list.remove(entry.as_ref()) };
+ Some(entry)
+ }
+
+ pub(crate) fn peek_next(&mut self) -> Option<&mut G::EntryType> {
+ let mut new = CommonCursor::new(self.cursor.cur);
+ new.move_next(self.list);
+ // SAFETY: Objects must be kept alive while on the list.
+ Some(unsafe { &mut *new.cur?.as_ptr() })
+ }
+
+ pub(crate) fn peek_prev(&mut self) -> Option<&mut G::EntryType> {
+ let mut new = CommonCursor::new(self.cursor.cur);
+ new.move_prev(self.list);
+ // SAFETY: Objects must be kept alive while on the list.
+ Some(unsafe { &mut *new.cur?.as_ptr() })
+ }
+
+ pub(crate) fn move_next(&mut self) {
+ self.cursor.move_next(self.list);
+ }
+}
diff --git a/rust/kernel/rbtree.rs b/rust/kernel/rbtree.rs
new file mode 100644
index 000000000000..630865369508
--- /dev/null
+++ b/rust/kernel/rbtree.rs
@@ -0,0 +1,570 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Red-black trees.
+//!
+//! C header: [`include/linux/rbtree.h`](../../../../include/linux/rbtree.h)
+//!
+//! Reference: <https://www.kernel.org/doc/html/latest/core-api/rbtree.html>
+
+use crate::{bindings, Result};
+use alloc::boxed::Box;
+use core::{
+ cmp::{Ord, Ordering},
+ iter::{IntoIterator, Iterator},
+ marker::PhantomData,
+ mem::MaybeUninit,
+ ptr::{addr_of_mut, NonNull},
+};
+
+extern "C" {
+ fn rust_helper_rb_link_node(
+ node: *mut bindings::rb_node,
+ parent: *const bindings::rb_node,
+ rb_link: *mut *mut bindings::rb_node,
+ );
+}
+
+struct Node<K, V> {
+ links: bindings::rb_node,
+ key: K,
+ value: V,
+}
+
+/// A red-black tree with owned nodes.
+///
+/// It is backed by the kernel C red-black trees.
+///
+/// # Invariants
+///
+/// Non-null parent/children pointers stored in instances of the `rb_node` C struct are always
+/// valid, and pointing to a field of our internal representation of a node.
+///
+/// # Examples
+///
+/// In the example below we do several operations on a tree. We note that insertions may fail if
+/// the system is out of memory.
+///
+/// ```
+/// # use kernel::prelude::*;
+/// use kernel::rbtree::RBTree;
+///
+/// fn rbtest() -> Result {
+/// // Create a new tree.
+/// let mut tree = RBTree::new();
+///
+/// // Insert three elements.
+/// tree.try_insert(20, 200)?;
+/// tree.try_insert(10, 100)?;
+/// tree.try_insert(30, 300)?;
+///
+/// // Check the nodes we just inserted.
+/// {
+/// let mut iter = tree.iter();
+/// assert_eq!(iter.next().unwrap(), (&10, &100));
+/// assert_eq!(iter.next().unwrap(), (&20, &200));
+/// assert_eq!(iter.next().unwrap(), (&30, &300));
+/// assert!(iter.next().is_none());
+/// }
+///
+/// // Print all elements.
+/// for (key, value) in &tree {
+/// pr_info!("{} = {}\n", key, value);
+/// }
+///
+/// // Replace one of the elements.
+/// tree.try_insert(10, 1000)?;
+///
+/// // Check that the tree reflects the replacement.
+/// {
+/// let mut iter = tree.iter();
+/// assert_eq!(iter.next().unwrap(), (&10, &1000));
+/// assert_eq!(iter.next().unwrap(), (&20, &200));
+/// assert_eq!(iter.next().unwrap(), (&30, &300));
+/// assert!(iter.next().is_none());
+/// }
+///
+/// // Change the value of one of the elements.
+/// *tree.get_mut(&30).unwrap() = 3000;
+///
+/// // Check that the tree reflects the update.
+/// {
+/// let mut iter = tree.iter();
+/// assert_eq!(iter.next().unwrap(), (&10, &1000));
+/// assert_eq!(iter.next().unwrap(), (&20, &200));
+/// assert_eq!(iter.next().unwrap(), (&30, &3000));
+/// assert!(iter.next().is_none());
+/// }
+///
+/// // Remove an element.
+/// tree.remove(&10);
+///
+/// // Check that the tree reflects the removal.
+/// {
+/// let mut iter = tree.iter();
+/// assert_eq!(iter.next().unwrap(), (&20, &200));
+/// assert_eq!(iter.next().unwrap(), (&30, &3000));
+/// assert!(iter.next().is_none());
+/// }
+///
+/// // Update all values.
+/// for value in tree.values_mut() {
+/// *value *= 10;
+/// }
+///
+/// // Check that the tree reflects the changes to values.
+/// {
+/// let mut iter = tree.iter();
+/// assert_eq!(iter.next().unwrap(), (&20, &2000));
+/// assert_eq!(iter.next().unwrap(), (&30, &30000));
+/// assert!(iter.next().is_none());
+/// }
+///
+/// Ok(())
+/// }
+/// ```
+///
+/// In the example below, we first allocate a node, acquire a spinlock, then insert the node into
+/// the tree. This is useful when the insertion context does not allow sleeping, for example, when
+/// holding a spinlock.
+///
+/// ```
+/// # use kernel::prelude::*;
+/// use kernel::{rbtree::RBTree, sync::SpinLock};
+///
+/// fn insert_test(tree: &SpinLock<RBTree<u32, u32>>) -> Result {
+/// // Pre-allocate node. This may fail (as it allocates memory).
+/// let node = RBTree::try_allocate_node(10, 100)?;
+///
+/// // Insert node while holding the lock. It is guaranteed to succeed with no allocation
+/// // attempts.
+/// let mut guard = tree.lock();
+/// guard.insert(node);
+/// Ok(())
+/// }
+/// ```
+///
+/// In the example below, we reuse an existing node allocation from an element we removed.
+///
+/// ```
+/// # use kernel::prelude::*;
+/// use kernel::rbtree::RBTree;
+///
+/// fn reuse_test() -> Result {
+/// // Create a new tree.
+/// let mut tree = RBTree::new();
+///
+/// // Insert three elements.
+/// tree.try_insert(20, 200)?;
+/// tree.try_insert(10, 100)?;
+/// tree.try_insert(30, 300)?;
+///
+/// // Check the nodes we just inserted.
+/// {
+/// let mut iter = tree.iter();
+/// assert_eq!(iter.next().unwrap(), (&10, &100));
+/// assert_eq!(iter.next().unwrap(), (&20, &200));
+/// assert_eq!(iter.next().unwrap(), (&30, &300));
+/// assert!(iter.next().is_none());
+/// }
+///
+/// // Remove a node, getting back ownership of it.
+/// let existing = tree.remove_node(&30).unwrap();
+///
+/// // Check that the tree reflects the removal.
+/// {
+/// let mut iter = tree.iter();
+/// assert_eq!(iter.next().unwrap(), (&10, &100));
+/// assert_eq!(iter.next().unwrap(), (&20, &200));
+/// assert!(iter.next().is_none());
+/// }
+///
+/// // Turn the node into a reservation so that we can reuse it with a different key/value.
+/// let reservation = existing.into_reservation();
+///
+/// // Insert a new node into the tree, reusing the previous allocation. This is guaranteed to
+/// // succeed (no memory allocations).
+/// tree.insert(reservation.into_node(15, 150));
+///
+/// // Check that the tree reflect the new insertion.
+/// {
+/// let mut iter = tree.iter();
+/// assert_eq!(iter.next().unwrap(), (&10, &100));
+/// assert_eq!(iter.next().unwrap(), (&15, &150));
+/// assert_eq!(iter.next().unwrap(), (&20, &200));
+/// assert!(iter.next().is_none());
+/// }
+///
+/// Ok(())
+/// }
+/// ```
+pub struct RBTree<K, V> {
+ root: bindings::rb_root,
+ _p: PhantomData<Node<K, V>>,
+}
+
+impl<K, V> RBTree<K, V> {
+ /// Creates a new and empty tree.
+ pub fn new() -> Self {
+ Self {
+ // INVARIANT: There are no nodes in the tree, so the invariant holds vacuously.
+ root: bindings::rb_root::default(),
+ _p: PhantomData,
+ }
+ }
+
+ /// Tries to insert a new value into the tree.
+ ///
+ /// It overwrites a node if one already exists with the same key and returns it (containing the
+ /// key/value pair). Returns [`None`] if a node with the same key didn't already exist.
+ ///
+ /// Returns an error if it cannot allocate memory for the new node.
+ pub fn try_insert(&mut self, key: K, value: V) -> Result<Option<RBTreeNode<K, V>>>
+ where
+ K: Ord,
+ {
+ Ok(self.insert(Self::try_allocate_node(key, value)?))
+ }
+
+ /// Allocates memory for a node to be eventually initialised and inserted into the tree via a
+ /// call to [`RBTree::insert`].
+ pub fn try_reserve_node() -> Result<RBTreeNodeReservation<K, V>> {
+ Ok(RBTreeNodeReservation {
+ node: Box::try_new(MaybeUninit::uninit())?,
+ })
+ }
+
+ /// Allocates and initialiases a node that can be inserted into the tree via
+ /// [`RBTree::insert`].
+ pub fn try_allocate_node(key: K, value: V) -> Result<RBTreeNode<K, V>> {
+ Ok(Self::try_reserve_node()?.into_node(key, value))
+ }
+
+ /// Inserts a new node into the tree.
+ ///
+ /// It overwrites a node if one already exists with the same key and returns it (containing the
+ /// key/value pair). Returns [`None`] if a node with the same key didn't already exist.
+ ///
+ /// This function always succeeds.
+ pub fn insert(&mut self, node: RBTreeNode<K, V>) -> Option<RBTreeNode<K, V>>
+ where
+ K: Ord,
+ {
+ let RBTreeNode { node } = node;
+ let node = Box::into_raw(node);
+ // SAFETY: `node` is valid at least until we call `Box::from_raw`, which only happens when
+ // the node is removed or replaced.
+ let node_links = unsafe { addr_of_mut!((*node).links) };
+ let mut new_link: &mut *mut bindings::rb_node = &mut self.root.rb_node;
+ let mut parent = core::ptr::null_mut();
+ while !new_link.is_null() {
+ let this = crate::container_of!(*new_link, Node<K, V>, links);
+
+ parent = *new_link;
+
+ // SAFETY: `this` is a non-null node so it is valid by the type invariants. `node` is
+ // valid until the node is removed.
+ match unsafe { (*node).key.cmp(&(*this).key) } {
+ // SAFETY: `parent` is a non-null node so it is valid by the type invariants.
+ Ordering::Less => new_link = unsafe { &mut (*parent).rb_left },
+ // SAFETY: `parent` is a non-null node so it is valid by the type invariants.
+ Ordering::Greater => new_link = unsafe { &mut (*parent).rb_right },
+ Ordering::Equal => {
+ // INVARIANT: We are replacing an existing node with a new one, which is valid.
+ // It remains valid because we "forgot" it with `Box::into_raw`.
+ // SAFETY: All pointers are non-null and valid (parent, despite the name, really
+ // is the node we're replacing).
+ unsafe { bindings::rb_replace_node(parent, node_links, &mut self.root) };
+
+ // INVARIANT: The node is being returned and the caller may free it, however,
+ // it was removed from the tree. So the invariants still hold.
+ return Some(RBTreeNode {
+ // SAFETY: `this` was a node in the tree, so it is valid.
+ node: unsafe { Box::from_raw(this as _) },
+ });
+ }
+ }
+ }
+
+ // INVARIANT: We are linking in a new node, which is valid. It remains valid because we
+ // "forgot" it with `Box::into_raw`.
+ // SAFETY: All pointers are non-null and valid (`*new_link` is null, but `new_link` is a
+ // mutable reference).
+ unsafe { rust_helper_rb_link_node(node_links, parent, new_link) };
+
+ // SAFETY: All pointers are valid. `node` has just been inserted into the tree.
+ unsafe { bindings::rb_insert_color(node_links, &mut self.root) };
+ None
+ }
+
+ /// Returns a node with the given key, if one exists.
+ fn find(&self, key: &K) -> Option<NonNull<Node<K, V>>>
+ where
+ K: Ord,
+ {
+ let mut node = self.root.rb_node;
+ while !node.is_null() {
+ let this = crate::container_of!(node, Node<K, V>, links);
+ // SAFETY: `this` is a non-null node so it is valid by the type invariants.
+ node = match key.cmp(unsafe { &(*this).key }) {
+ // SAFETY: `node` is a non-null node so it is valid by the type invariants.
+ Ordering::Less => unsafe { (*node).rb_left },
+ // SAFETY: `node` is a non-null node so it is valid by the type invariants.
+ Ordering::Greater => unsafe { (*node).rb_right },
+ Ordering::Equal => return NonNull::new(this as _),
+ }
+ }
+ None
+ }
+
+ /// Returns a reference to the value corresponding to the key.
+ pub fn get(&self, key: &K) -> Option<&V>
+ where
+ K: Ord,
+ {
+ // SAFETY: The `find` return value is a node in the tree, so it is valid.
+ self.find(key).map(|node| unsafe { &node.as_ref().value })
+ }
+
+ /// Returns a mutable reference to the value corresponding to the key.
+ pub fn get_mut(&mut self, key: &K) -> Option<&mut V>
+ where
+ K: Ord,
+ {
+ // SAFETY: the `find` return value is a node in the tree, so it is valid.
+ self.find(key)
+ .map(|mut node| unsafe { &mut node.as_mut().value })
+ }
+
+ /// Removes the node with the given key from the tree.
+ ///
+ /// It returns the node that was removed if one exists, or [`None`] otherwise.
+ pub fn remove_node(&mut self, key: &K) -> Option<RBTreeNode<K, V>>
+ where
+ K: Ord,
+ {
+ let mut node = self.find(key)?;
+
+ // SAFETY: the `find` return value is a node in the tree, so it is valid.
+ unsafe { bindings::rb_erase(&mut node.as_mut().links, &mut self.root) };
+
+ // INVARIANT: The node is being returned and the caller may free it, however, it was
+ // removed from the tree. So the invariants still hold.
+ Some(RBTreeNode {
+ // SAFETY: the `find` return value was a node in the tree, so it is valid.
+ node: unsafe { Box::from_raw(node.as_ptr()) },
+ })
+ }
+
+ /// Removes the node with the given key from the tree.
+ ///
+ /// It returns the value that was removed if one exists, or [`None`] otherwise.
+ pub fn remove(&mut self, key: &K) -> Option<V>
+ where
+ K: Ord,
+ {
+ let node = self.remove_node(key)?;
+ let RBTreeNode { node } = node;
+ let Node {
+ links: _,
+ key: _,
+ value,
+ } = *node;
+ Some(value)
+ }
+
+ /// Returns an iterator over the tree nodes, sorted by key.
+ pub fn iter(&self) -> RBTreeIterator<'_, K, V> {
+ RBTreeIterator {
+ _tree: PhantomData,
+ // SAFETY: `root` is valid as it's embedded in `self` and we have a valid `self`.
+ next: unsafe { bindings::rb_first(&self.root) },
+ }
+ }
+
+ /// Returns a mutable iterator over the tree nodes, sorted by key.
+ pub fn iter_mut(&mut self) -> RBTreeIteratorMut<'_, K, V> {
+ RBTreeIteratorMut {
+ _tree: PhantomData,
+ // SAFETY: `root` is valid as it's embedded in `self` and we have a valid `self`.
+ next: unsafe { bindings::rb_first(&self.root) },
+ }
+ }
+
+ /// Returns an iterator over the keys of the nodes in the tree, in sorted order.
+ pub fn keys(&self) -> impl Iterator<Item = &'_ K> {
+ self.iter().map(|(k, _)| k)
+ }
+
+ /// Returns an iterator over the values of the nodes in the tree, sorted by key.
+ pub fn values(&self) -> impl Iterator<Item = &'_ V> {
+ self.iter().map(|(_, v)| v)
+ }
+
+ /// Returns a mutable iterator over the values of the nodes in the tree, sorted by key.
+ pub fn values_mut(&mut self) -> impl Iterator<Item = &'_ mut V> {
+ self.iter_mut().map(|(_, v)| v)
+ }
+}
+
+impl<K, V> Default for RBTree<K, V> {
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+impl<K, V> Drop for RBTree<K, V> {
+ fn drop(&mut self) {
+ // SAFETY: `root` is valid as it's embedded in `self` and we have a valid `self`.
+ let mut next = unsafe { bindings::rb_first_postorder(&self.root) };
+
+ // INVARIANT: The loop invariant is that all tree nodes from `next` in postorder are valid.
+ while !next.is_null() {
+ let this = crate::container_of!(next, Node<K, V>, links);
+
+ // Find out what the next node is before disposing of the current one.
+ // SAFETY: `next` and all nodes in postorder are still valid.
+ next = unsafe { bindings::rb_next_postorder(next) };
+
+ // INVARIANT: This is the destructor, so we break the type invariant during clean-up,
+ // but it is not observable. The loop invariant is still maintained.
+ // SAFETY: `this` is valid per the loop invariant.
+ unsafe { Box::from_raw(this as *mut Node<K, V>) };
+ }
+ }
+}
+
+impl<'a, K, V> IntoIterator for &'a RBTree<K, V> {
+ type Item = (&'a K, &'a V);
+ type IntoIter = RBTreeIterator<'a, K, V>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.iter()
+ }
+}
+
+/// An iterator over the nodes of a [`RBTree`].
+///
+/// Instances are created by calling [`RBTree::iter`].
+pub struct RBTreeIterator<'a, K, V> {
+ _tree: PhantomData<&'a RBTree<K, V>>,
+ next: *mut bindings::rb_node,
+}
+
+impl<'a, K, V> Iterator for RBTreeIterator<'a, K, V> {
+ type Item = (&'a K, &'a V);
+
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.next.is_null() {
+ return None;
+ }
+
+ let cur = crate::container_of!(self.next, Node<K, V>, links);
+
+ // SAFETY: The reference to the tree used to create the iterator outlives the iterator, so
+ // the tree cannot change. By the tree invariant, all nodes are valid.
+ self.next = unsafe { bindings::rb_next(self.next) };
+
+ // SAFETY: By the same reasoning above, it is safe to dereference the node. Additionally,
+ // it is ok to return a reference to members because the iterator must outlive it.
+ Some(unsafe { (&(*cur).key, &(*cur).value) })
+ }
+}
+
+impl<'a, K, V> IntoIterator for &'a mut RBTree<K, V> {
+ type Item = (&'a K, &'a mut V);
+ type IntoIter = RBTreeIteratorMut<'a, K, V>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.iter_mut()
+ }
+}
+
+/// A mutable iterator over the nodes of a [`RBTree`].
+///
+/// Instances are created by calling [`RBTree::iter_mut`].
+pub struct RBTreeIteratorMut<'a, K, V> {
+ _tree: PhantomData<&'a RBTree<K, V>>,
+ next: *mut bindings::rb_node,
+}
+
+impl<'a, K, V> Iterator for RBTreeIteratorMut<'a, K, V> {
+ type Item = (&'a K, &'a mut V);
+
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.next.is_null() {
+ return None;
+ }
+
+ let cur = crate::container_of!(self.next, Node<K, V>, links) as *mut Node<K, V>;
+
+ // SAFETY: The reference to the tree used to create the iterator outlives the iterator, so
+ // the tree cannot change (except for the value of previous nodes, but those don't affect
+ // the iteration process). By the tree invariant, all nodes are valid.
+ self.next = unsafe { bindings::rb_next(self.next) };
+
+ // SAFETY: By the same reasoning above, it is safe to dereference the node. Additionally,
+ // it is ok to return a reference to members because the iterator must outlive it.
+ Some(unsafe { (&(*cur).key, &mut (*cur).value) })
+ }
+}
+
+/// A memory reservation for a red-black tree node.
+///
+/// It contains the memory needed to hold a node that can be inserted into a red-black tree. One
+/// can be obtained by directly allocating it ([`RBTree::try_reserve_node`]) or by "uninitialising"
+/// ([`RBTreeNode::into_reservation`]) an actual node (usually returned by some operation like
+/// removal from a tree).
+pub struct RBTreeNodeReservation<K, V> {
+ node: Box<MaybeUninit<Node<K, V>>>,
+}
+
+impl<K, V> RBTreeNodeReservation<K, V> {
+ /// Initialises a node reservation.
+ ///
+ /// It then becomes an [`RBTreeNode`] that can be inserted into a tree.
+ pub fn into_node(mut self, key: K, value: V) -> RBTreeNode<K, V> {
+ let node_ptr = self.node.as_mut_ptr();
+ // SAFETY: `node_ptr` is valid, and so are its fields.
+ unsafe { addr_of_mut!((*node_ptr).links).write(bindings::rb_node::default()) };
+ // SAFETY: `node_ptr` is valid, and so are its fields.
+ unsafe { addr_of_mut!((*node_ptr).key).write(key) };
+ // SAFETY: `node_ptr` is valid, and so are its fields.
+ unsafe { addr_of_mut!((*node_ptr).value).write(value) };
+ let raw = Box::into_raw(self.node);
+ RBTreeNode {
+ // SAFETY: The pointer came from a `MaybeUninit<Node>` whose fields have all been
+ // initialised. Additionally, it has the same layout as `Node`.
+ node: unsafe { Box::from_raw(raw as _) },
+ }
+ }
+}
+
+/// A red-black tree node.
+///
+/// The node is fully initialised (with key and value) and can be inserted into a tree without any
+/// extra allocations or failure paths.
+pub struct RBTreeNode<K, V> {
+ node: Box<Node<K, V>>,
+}
+
+impl<K, V> RBTreeNode<K, V> {
+ /// "Uninitialises" a node.
+ ///
+ /// It then becomes a reservation that can be re-initialised into a different node (i.e., with
+ /// a different key and/or value).
+ ///
+ /// The existing key and value are dropped in-place as part of this operation, that is, memory
+ /// may be freed (but only for the key/value; memory for the node itself is kept for reuse).
+ pub fn into_reservation(self) -> RBTreeNodeReservation<K, V> {
+ let raw = Box::into_raw(self.node);
+ let mut ret = RBTreeNodeReservation {
+ // SAFETY: The pointer came from a valid `Node`, which has the same layout as
+ // `MaybeUninit<Node>`.
+ node: unsafe { Box::from_raw(raw as _) },
+ };
+ // SAFETY: Although the type is `MaybeUninit<Node>`, we know it has been initialised
+ // because it came from a `Node`. So it is safe to drop it.
+ unsafe { core::ptr::drop_in_place(ret.node.as_mut_ptr()) };
+ ret
+ }
+}
diff --git a/rust/kernel/security.rs b/rust/kernel/security.rs
new file mode 100644
index 000000000000..c38b0dceb345
--- /dev/null
+++ b/rust/kernel/security.rs
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Linux Security Modules (LSM).
+//!
+//! C header: [`include/linux/security.h`](../../../../include/linux/security.h).
+
+use crate::{bindings, c_types, error::Error, file::File, task::Task, Result};
+
+extern "C" {
+ #[allow(improper_ctypes)]
+ fn rust_helper_security_binder_set_context_mgr(
+ mgr: *mut bindings::task_struct,
+ ) -> c_types::c_int;
+ #[allow(improper_ctypes)]
+ fn rust_helper_security_binder_transaction(
+ from: *mut bindings::task_struct,
+ to: *mut bindings::task_struct,
+ ) -> c_types::c_int;
+ #[allow(improper_ctypes)]
+ fn rust_helper_security_binder_transfer_binder(
+ from: *mut bindings::task_struct,
+ to: *mut bindings::task_struct,
+ ) -> c_types::c_int;
+ #[allow(improper_ctypes)]
+ fn rust_helper_security_binder_transfer_file(
+ from: *mut bindings::task_struct,
+ to: *mut bindings::task_struct,
+ file: *mut bindings::file,
+ ) -> c_types::c_int;
+}
+
+/// Calls the security modules to determine if the given task can become the manager of a binder
+/// context.
+pub fn binder_set_context_mgr(mgr: &Task) -> Result {
+ // SAFETY: By the `Task` invariants, `mgr.ptr` is valid.
+ let ret = unsafe { rust_helper_security_binder_set_context_mgr(mgr.ptr) };
+ if ret != 0 {
+ Err(Error::from_kernel_errno(ret))
+ } else {
+ Ok(())
+ }
+}
+
+/// Calls the security modules to determine if binder transactions are allowed from task `from` to
+/// task `to`.
+pub fn binder_transaction(from: &Task, to: &Task) -> Result {
+ // SAFETY: By the `Task` invariants, `from.ptr` and `to.ptr` are valid.
+ let ret = unsafe { rust_helper_security_binder_transaction(from.ptr, to.ptr) };
+ if ret != 0 {
+ Err(Error::from_kernel_errno(ret))
+ } else {
+ Ok(())
+ }
+}
+
+/// Calls the security modules to determine if task `from` is allowed to send binder objects
+/// (owned by itself or other processes) to task `to` through a binder transaction.
+pub fn binder_transfer_binder(from: &Task, to: &Task) -> Result {
+ // SAFETY: By the `Task` invariants, `from.ptr` and `to.ptr` are valid.
+ let ret = unsafe { rust_helper_security_binder_transfer_binder(from.ptr, to.ptr) };
+ if ret != 0 {
+ Err(Error::from_kernel_errno(ret))
+ } else {
+ Ok(())
+ }
+}
+
+/// Calls the security modules to determine if task `from` is allowed to send the given file to
+/// task `to` (which would get its own file descriptor) through a binder transaction.
+pub fn binder_transfer_file(from: &Task, to: &Task, file: &File) -> Result {
+ // SAFETY: By the `Task` invariants, `from.ptr` and `to.ptr` are valid. Similarly, by the
+ // `File` invariants, `file.ptr` is also valid.
+ let ret = unsafe { rust_helper_security_binder_transfer_file(from.ptr, to.ptr, file.ptr) };
+ if ret != 0 {
+ Err(Error::from_kernel_errno(ret))
+ } else {
+ Ok(())
+ }
+}
diff --git a/rust/kernel/static_assert.rs b/rust/kernel/static_assert.rs
new file mode 100644
index 000000000000..a80d8ab57564
--- /dev/null
+++ b/rust/kernel/static_assert.rs
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Static assert.
+
+/// Static assert (i.e. compile-time assert).
+///
+/// Similar to C11 [`_Static_assert`] and C++11 [`static_assert`].
+///
+/// The feature may be added to Rust in the future: see [RFC 2790].
+///
+/// [`_Static_assert`]: https://en.cppreference.com/w/c/language/_Static_assert
+/// [`static_assert`]: https://en.cppreference.com/w/cpp/language/static_assert
+/// [RFC 2790]: https://github.com/rust-lang/rfcs/issues/2790
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::prelude::*;
+/// static_assert!(42 > 24);
+/// static_assert!(core::mem::size_of::<u8>() == 1);
+///
+/// const X: &[u8] = b"bar";
+/// static_assert!(X[1] == 'a' as u8);
+///
+/// const fn f(x: i32) -> i32 {
+/// x + 2
+/// }
+/// static_assert!(f(40) == 42);
+/// ```
+#[macro_export]
+macro_rules! static_assert {
+ ($condition:expr) => {
+ // Based on the latest one in `rustc`'s one before it was [removed].
+ //
+ // [removed]: https://github.com/rust-lang/rust/commit/c2dad1c6b9f9636198d7c561b47a2974f5103f6d
+ #[allow(dead_code)]
+ const _: () = [()][!($condition) as usize];
+ };
+}
diff --git a/rust/kernel/str.rs b/rust/kernel/str.rs
new file mode 100644
index 000000000000..5620080a8e81
--- /dev/null
+++ b/rust/kernel/str.rs
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! String representations.
+
+use core::ops::{self, Deref, Index};
+
+use crate::bindings;
+use crate::c_types;
+
+/// Byte string without UTF-8 validity guarantee.
+///
+/// `BStr` is simply an alias to `[u8]`, but has a more evident semantical meaning.
+pub type BStr = [u8];
+
+/// Creates a new [`BStr`] from a string literal.
+///
+/// `b_str!` converts the supplied string literal to byte string, so non-ASCII
+/// characters can be included.
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::b_str;
+/// # use kernel::str::BStr;
+/// const MY_BSTR: &'static BStr = b_str!("My awesome BStr!");
+/// ```
+#[macro_export]
+macro_rules! b_str {
+ ($str:literal) => {{
+ const S: &'static str = $str;
+ const C: &'static $crate::str::BStr = S.as_bytes();
+ C
+ }};
+}
+
+/// Possible errors when using conversion functions in [`CStr`].
+#[derive(Debug, Clone, Copy)]
+pub enum CStrConvertError {
+ /// Supplied bytes contain an interior `NUL`.
+ InteriorNul,
+
+ /// Supplied bytes are not terminated by `NUL`.
+ NotNulTerminated,
+}
+
+impl From<CStrConvertError> for crate::Error {
+ #[inline]
+ fn from(_: CStrConvertError) -> crate::Error {
+ crate::Error::EINVAL
+ }
+}
+
+/// A string that is guaranteed to have exactly one `NUL` byte, which is at the
+/// end.
+///
+/// Used for interoperability with kernel APIs that take C strings.
+#[repr(transparent)]
+pub struct CStr([u8]);
+
+impl CStr {
+ /// Returns the length of this string excluding `NUL`.
+ #[inline]
+ pub const fn len(&self) -> usize {
+ self.len_with_nul() - 1
+ }
+
+ /// Returns the length of this string with `NUL`.
+ #[inline]
+ pub const fn len_with_nul(&self) -> usize {
+ // SAFETY: This is one of the invariant of `CStr`.
+ // We add a `unreachable_unchecked` here to hint the optimizer that
+ // the value returned from this function is non-zero.
+ if self.0.is_empty() {
+ unsafe { core::hint::unreachable_unchecked() };
+ }
+ self.0.len()
+ }
+
+ /// Returns `true` if the string only includes `NUL`.
+ #[inline]
+ pub const fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+
+ /// Wraps a raw C string pointer.
+ ///
+ /// # Safety
+ ///
+ /// `ptr` must be a valid pointer to a `NUL`-terminated C string, and it must
+ /// last at least `'a`. When `CStr` is alive, the memory pointed by `ptr`
+ /// must not be mutated.
+ #[inline]
+ pub unsafe fn from_char_ptr<'a>(ptr: *const c_types::c_char) -> &'a Self {
+ // SAFETY: The safety precondition guarantees `ptr` is a valid pointer
+ // to a `NUL`-terminated C string.
+ let len = unsafe { bindings::strlen(ptr) } + 1;
+ // SAFETY: Lifetime guaranteed by the safety precondition.
+ let bytes = unsafe { core::slice::from_raw_parts(ptr as _, len as _) };
+ // SAFETY: As `len` is returned by `strlen`, `bytes` does not contain interior `NUL`.
+ // As we have added 1 to `len`, the last byte is known to be `NUL`.
+ unsafe { Self::from_bytes_with_nul_unchecked(bytes) }
+ }
+
+ /// Creates a [`CStr`] from a `[u8]`.
+ ///
+ /// The provided slice must be `NUL`-terminated, does not contain any
+ /// interior `NUL` bytes.
+ pub const fn from_bytes_with_nul(bytes: &[u8]) -> Result<&Self, CStrConvertError> {
+ if bytes.is_empty() {
+ return Err(CStrConvertError::NotNulTerminated);
+ }
+ if bytes[bytes.len() - 1] != 0 {
+ return Err(CStrConvertError::NotNulTerminated);
+ }
+ let mut i = 0;
+ // `i + 1 < bytes.len()` allows LLVM to optimize away bounds checking,
+ // while it couldn't optimize away bounds checks for `i < bytes.len() - 1`.
+ while i + 1 < bytes.len() {
+ if bytes[i] == 0 {
+ return Err(CStrConvertError::InteriorNul);
+ }
+ i += 1;
+ }
+ // SAFETY: We just checked that all properties hold.
+ Ok(unsafe { Self::from_bytes_with_nul_unchecked(bytes) })
+ }
+
+ /// Creates a [`CStr`] from a `[u8]`, panic if input is not valid.
+ ///
+ /// This function is only meant to be used by `c_str!` macro, so
+ /// crates using `c_str!` macro don't have to enable `const_panic` feature.
+ #[doc(hidden)]
+ pub const fn from_bytes_with_nul_unwrap(bytes: &[u8]) -> &Self {
+ match Self::from_bytes_with_nul(bytes) {
+ Ok(v) => v,
+ Err(_) => panic!("string contains interior NUL"),
+ }
+ }
+
+ /// Creates a [`CStr`] from a `[u8]` without performing any additional
+ /// checks.
+ ///
+ /// # Safety
+ ///
+ /// `bytes` *must* end with a `NUL` byte, and should only have a single
+ /// `NUL` byte (or the string will be truncated).
+ #[inline]
+ pub const unsafe fn from_bytes_with_nul_unchecked(bytes: &[u8]) -> &CStr {
+ // Note: This can be done using pointer deref (which requires
+ // `const_raw_ptr_deref` to be const) or `transmute` (which requires
+ // `const_transmute` to be const) or `ptr::from_raw_parts` (which
+ // requires `ptr_metadata`).
+ // While none of them are current stable, it is very likely that one of
+ // them will eventually be.
+ // SAFETY: Properties of `bytes` guaranteed by the safety precondition.
+ unsafe { &*(bytes as *const [u8] as *const Self) }
+ }
+
+ /// Returns a C pointer to the string.
+ #[inline]
+ pub const fn as_char_ptr(&self) -> *const c_types::c_char {
+ self.0.as_ptr() as _
+ }
+
+ /// Convert the string to a byte slice without the trailing 0 byte.
+ #[inline]
+ pub fn as_bytes(&self) -> &[u8] {
+ &self.0[..self.len()]
+ }
+
+ /// Convert the string to a byte slice containing the trailing 0 byte.
+ #[inline]
+ pub const fn as_bytes_with_nul(&self) -> &[u8] {
+ &self.0
+ }
+}
+
+impl AsRef<BStr> for CStr {
+ #[inline]
+ fn as_ref(&self) -> &BStr {
+ self.as_bytes()
+ }
+}
+
+impl Deref for CStr {
+ type Target = BStr;
+
+ #[inline]
+ fn deref(&self) -> &Self::Target {
+ self.as_bytes()
+ }
+}
+
+impl Index<ops::RangeFrom<usize>> for CStr {
+ type Output = CStr;
+
+ #[inline]
+ fn index(&self, index: ops::RangeFrom<usize>) -> &Self::Output {
+ // Delegate bounds checking to slice.
+ // Assign to _ to mute clippy's unnecessary operation warning.
+ let _ = &self.as_bytes()[index.start..];
+ // SAFETY: We just checked the bounds.
+ unsafe { Self::from_bytes_with_nul_unchecked(&self.0[index.start..]) }
+ }
+}
+
+impl Index<ops::RangeFull> for CStr {
+ type Output = CStr;
+
+ #[inline]
+ fn index(&self, _index: ops::RangeFull) -> &Self::Output {
+ self
+ }
+}
+
+mod private {
+ use core::ops;
+
+ // Marker trait for index types that can be forward to `BStr`.
+ pub trait CStrIndex {}
+
+ impl CStrIndex for usize {}
+ impl CStrIndex for ops::Range<usize> {}
+ impl CStrIndex for ops::RangeInclusive<usize> {}
+ impl CStrIndex for ops::RangeToInclusive<usize> {}
+}
+
+impl<Idx> Index<Idx> for CStr
+where
+ Idx: private::CStrIndex,
+ BStr: Index<Idx>,
+{
+ type Output = <BStr as Index<Idx>>::Output;
+
+ #[inline]
+ fn index(&self, index: Idx) -> &Self::Output {
+ &self.as_bytes()[index]
+ }
+}
+
+/// Creates a new [`CStr`] from a string literal.
+///
+/// The string literal should not contain any `NUL` bytes.
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::c_str;
+/// # use kernel::str::CStr;
+/// const MY_CSTR: &'static CStr = c_str!("My awesome CStr!");
+/// ```
+#[macro_export]
+macro_rules! c_str {
+ ($str:literal) => {{
+ const S: &str = concat!($str, "\0");
+ const C: &$crate::str::CStr = $crate::str::CStr::from_bytes_with_nul_unwrap(S.as_bytes());
+ C
+ }};
+}
diff --git a/rust/kernel/sync/arc.rs b/rust/kernel/sync/arc.rs
new file mode 100644
index 000000000000..ddecb9e371a4
--- /dev/null
+++ b/rust/kernel/sync/arc.rs
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! A reference-counted pointer.
+//!
+//! This module implements a way for users to create reference-counted objects and pointers to
+//! them. Such a pointer automatically increments and decrements the count, and drops the
+//! underlying object when it reaches zero. It is also safe to use concurrently from multiple
+//! threads.
+//!
+//! It is different from the standard library's [`Arc`] in a few ways:
+//! 1. It is backed by the kernel's `refcount_t` type.
+//! 2. It does not support weak references, which allows it to be half the size.
+//! 3. It saturates the reference count instead of aborting when it goes over a threshold.
+//! 4. It does not provide a `get_mut` method, so the ref counted object is pinned.
+//!
+//! [`Arc`]: https://doc.rust-lang.org/std/sync/struct.Arc.html
+
+use crate::{bindings, Result};
+use alloc::boxed::Box;
+use core::{
+ cell::UnsafeCell, convert::AsRef, marker::PhantomData, mem::ManuallyDrop, ops::Deref, pin::Pin,
+ ptr::NonNull,
+};
+
+extern "C" {
+ fn rust_helper_refcount_new() -> bindings::refcount_t;
+ fn rust_helper_refcount_inc(r: *mut bindings::refcount_t);
+ fn rust_helper_refcount_dec_and_test(r: *mut bindings::refcount_t) -> bool;
+}
+
+/// A reference-counted pointer to an instance of `T`.
+///
+/// The reference count is incremented when new instances of [`Ref`] are created, and decremented
+/// when they are dropped. When the count reaches zero, the underlying `T` is also dropped.
+///
+/// # Invariants
+///
+/// The reference count on an instance of [`Ref`] is always non-zero.
+/// The object pointed to by [`Ref`] is always pinned.
+pub struct Ref<T: ?Sized> {
+ ptr: NonNull<RefInner<T>>,
+ _p: PhantomData<RefInner<T>>,
+}
+
+struct RefInner<T: ?Sized> {
+ refcount: UnsafeCell<bindings::refcount_t>,
+ data: T,
+}
+
+// This is to allow [`Ref`] (and variants) to be used as the type of `self`.
+impl<T: ?Sized> core::ops::Receiver for Ref<T> {}
+
+// SAFETY: It is safe to send `Ref<T>` to another thread when the underlying `T` is `Sync` because
+// it effectively means sharing `&T` (which is safe because `T` is `Sync`); additionally, it needs
+// `T` to be `Send` because any thread that has a `Ref<T>` may ultimately access `T` directly, for
+// example, when the reference count reaches zero and `T` is dropped.
+unsafe impl<T: ?Sized + Sync + Send> Send for Ref<T> {}
+
+// SAFETY: It is safe to send `&Ref<T>` to another thread when the underlying `T` is `Sync` for
+// the same reason as above. `T` needs to be `Send` as well because a thread can clone a `&Ref<T>`
+// into a `Ref<T>`, which may lead to `T` being accessed by the same reasoning as above.
+unsafe impl<T: ?Sized + Sync + Send> Sync for Ref<T> {}
+
+impl<T> Ref<T> {
+ /// Constructs a new reference counted instance of `T`.
+ pub fn try_new(contents: T) -> Result<Self> {
+ Self::try_new_and_init(contents, |_| {})
+ }
+
+ /// Constructs a new reference counted instance of `T` and calls the initialisation function.
+ ///
+ /// This is useful because it provides a mutable reference to `T` at its final location.
+ pub fn try_new_and_init<U: FnOnce(Pin<&mut T>)>(contents: T, init: U) -> Result<Self> {
+ // INVARIANT: The refcount is initialised to a non-zero value.
+ let mut inner = Box::try_new(RefInner {
+ // SAFETY: Just an FFI call that returns a `refcount_t` initialised to 1.
+ refcount: UnsafeCell::new(unsafe { rust_helper_refcount_new() }),
+ data: contents,
+ })?;
+
+ // SAFETY: By the invariant, `RefInner` is pinned and `T` is also pinned.
+ let pinned = unsafe { Pin::new_unchecked(&mut inner.data) };
+
+ // INVARIANT: The only places where `&mut T` is available are here, which is explicitly
+ // pinned, and in `drop`. Both are compatible with the pin requirements.
+ init(pinned);
+
+ Ok(Ref {
+ ptr: NonNull::from(Box::leak(inner)),
+ _p: PhantomData,
+ })
+ }
+
+ /// Deconstructs a [`Ref`] object into a `usize`.
+ ///
+ /// It can be reconstructed once via [`Ref::from_usize`].
+ pub fn into_usize(obj: Self) -> usize {
+ ManuallyDrop::new(obj).ptr.as_ptr() as _
+ }
+
+ /// Borrows a [`Ref`] instance previously deconstructed via [`Ref::into_usize`].
+ ///
+ /// # Safety
+ ///
+ /// `encoded` must have been returned by a previous call to [`Ref::into_usize`]. Additionally,
+ /// [`Ref::from_usize`] can only be called after *all* instances of [`RefBorrow`] have been
+ /// dropped.
+ pub unsafe fn borrow_usize(encoded: usize) -> RefBorrow<T> {
+ // SAFETY: By the safety requirement of this function, we know that `encoded` came from
+ // a previous call to `Ref::into_usize`.
+ let obj = ManuallyDrop::new(unsafe { Ref::from_usize(encoded) });
+
+ // SAFEY: The safety requirements ensure that the object remains alive for the lifetime of
+ // the returned value. There is no way to create mutable references to the object.
+ unsafe { RefBorrow::new(obj) }
+ }
+
+ /// Recreates a [`Ref`] instance previously deconstructed via [`Ref::into_usize`].
+ ///
+ /// # Safety
+ ///
+ /// `encoded` must have been returned by a previous call to [`Ref::into_usize`]. Additionally,
+ /// it can only be called once for each previous call to [``Ref::into_usize`].
+ pub unsafe fn from_usize(encoded: usize) -> Self {
+ Ref {
+ ptr: NonNull::new(encoded as _).unwrap(),
+ _p: PhantomData,
+ }
+ }
+}
+
+impl<T: ?Sized> Ref<T> {
+ /// Determines if two reference-counted pointers point to the same underlying instance of `T`.
+ pub fn ptr_eq(a: &Self, b: &Self) -> bool {
+ core::ptr::eq(a.ptr.as_ptr(), b.ptr.as_ptr())
+ }
+
+ /// Returns a pinned version of a given `Ref` instance.
+ pub fn pinned(obj: Self) -> Pin<Self> {
+ // SAFETY: The type invariants guarantee that the value is pinned.
+ unsafe { Pin::new_unchecked(obj) }
+ }
+}
+
+impl<T: ?Sized> Deref for Ref<T> {
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ // SAFETY: By the type invariant, there is necessarily a reference to the object, so it is
+ // safe to dereference it.
+ unsafe { &self.ptr.as_ref().data }
+ }
+}
+
+impl<T: ?Sized> Clone for Ref<T> {
+ fn clone(&self) -> Self {
+ // INVARIANT: C `refcount_inc` saturates the refcount, so it cannot overflow to zero.
+ // SAFETY: By the type invariant, there is necessarily a reference to the object, so it is
+ // safe to increment the refcount.
+ unsafe { rust_helper_refcount_inc(self.ptr.as_ref().refcount.get()) };
+ Self {
+ ptr: self.ptr,
+ _p: PhantomData,
+ }
+ }
+}
+
+impl<T: ?Sized> AsRef<T> for Ref<T> {
+ fn as_ref(&self) -> &T {
+ // SAFETY: By the type invariant, there is necessarily a reference to the object, so it is
+ // safe to dereference it.
+ unsafe { &self.ptr.as_ref().data }
+ }
+}
+
+impl<T: ?Sized> Drop for Ref<T> {
+ fn drop(&mut self) {
+ // SAFETY: By the type invariant, there is necessarily a reference to the object. We cannot
+ // touch `refcount` after it's decremented to a non-zero value because another thread/CPU
+ // may concurrently decrement it to zero and free it. It is ok to have a raw pointer to
+ // freed/invalid memory as long as it is never dereferenced.
+ let refcount = unsafe { self.ptr.as_ref() }.refcount.get();
+
+ // INVARIANT: If the refcount reaches zero, there are no other instances of `Ref`, and
+ // this instance is being dropped, so the broken invariant is not observable.
+ // SAFETY: Also by the type invariant, we are allowed to decrement the refcount.
+ let is_zero = unsafe { rust_helper_refcount_dec_and_test(refcount) };
+ if is_zero {
+ // The count reached zero, we must free the memory.
+ //
+ // SAFETY: The pointer was initialised from the result of `Box::leak`.
+ unsafe { Box::from_raw(self.ptr.as_ptr()) };
+ }
+ }
+}
+
+/// A borrowed [`Ref`] with manually-managed lifetime.
+///
+/// # Invariants
+///
+/// There are no mutable references to the underlying [`Ref`], and it remains valid for the lifetime
+/// of the [`RefBorrow`] instance.
+pub struct RefBorrow<T: ?Sized> {
+ inner_ref: ManuallyDrop<Ref<T>>,
+}
+
+impl<T: ?Sized> RefBorrow<T> {
+ /// Creates a new [`RefBorrow`] instance.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure the following for the lifetime of the returned [`RefBorrow`] instance:
+ /// 1. That `obj` remains valid;
+ /// 2. That no mutable references to `obj` are created.
+ unsafe fn new(obj: ManuallyDrop<Ref<T>>) -> Self {
+ // INVARIANT: The safety requirements guarantee the invariants.
+ Self { inner_ref: obj }
+ }
+}
+
+impl<T: ?Sized> Deref for RefBorrow<T> {
+ type Target = Ref<T>;
+
+ fn deref(&self) -> &Self::Target {
+ self.inner_ref.deref()
+ }
+}
diff --git a/rust/kernel/sync/condvar.rs b/rust/kernel/sync/condvar.rs
new file mode 100644
index 000000000000..993087e6c233
--- /dev/null
+++ b/rust/kernel/sync/condvar.rs
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! A condition variable.
+//!
+//! This module allows Rust code to use the kernel's [`struct wait_queue_head`] as a condition
+//! variable.
+
+use super::{Guard, Lock, NeedsLockClass};
+use crate::{bindings, str::CStr, task::Task};
+use core::{cell::UnsafeCell, marker::PhantomPinned, mem::MaybeUninit, pin::Pin};
+
+extern "C" {
+ fn rust_helper_init_wait(wq: *mut bindings::wait_queue_entry);
+}
+
+/// Safely initialises a [`CondVar`] with the given name, generating a new lock class.
+#[macro_export]
+macro_rules! condvar_init {
+ ($condvar:expr, $name:literal) => {
+ $crate::init_with_lockdep!($condvar, $name)
+ };
+}
+
+// TODO: `bindgen` is not generating this constant. Figure out why.
+const POLLFREE: u32 = 0x4000;
+
+/// Exposes the kernel's [`struct wait_queue_head`] as a condition variable. It allows the caller to
+/// atomically release the given lock and go to sleep. It reacquires the lock when it wakes up. And
+/// it wakes up when notified by another thread (via [`CondVar::notify_one`] or
+/// [`CondVar::notify_all`]) or because the thread received a signal.
+///
+/// [`struct wait_queue_head`]: ../../../include/linux/wait.h
+pub struct CondVar {
+ pub(crate) wait_list: UnsafeCell<bindings::wait_queue_head>,
+
+ /// A condvar needs to be pinned because it contains a [`struct list_head`] that is
+ /// self-referential, so it cannot be safely moved once it is initialised.
+ _pin: PhantomPinned,
+}
+
+// SAFETY: `CondVar` only uses a `struct wait_queue_head`, which is safe to use on any thread.
+unsafe impl Send for CondVar {}
+
+// SAFETY: `CondVar` only uses a `struct wait_queue_head`, which is safe to use on multiple threads
+// concurrently.
+unsafe impl Sync for CondVar {}
+
+impl CondVar {
+ /// Constructs a new conditional variable.
+ ///
+ /// # Safety
+ ///
+ /// The caller must call `CondVar::init` before using the conditional variable.
+ pub unsafe fn new() -> Self {
+ Self {
+ wait_list: UnsafeCell::new(bindings::wait_queue_head::default()),
+ _pin: PhantomPinned,
+ }
+ }
+
+ /// Atomically releases the given lock (whose ownership is proven by the guard) and puts the
+ /// thread to sleep. It wakes up when notified by [`CondVar::notify_one`] or
+ /// [`CondVar::notify_all`], or when the thread receives a signal.
+ ///
+ /// Returns whether there is a signal pending.
+ #[must_use = "wait returns if a signal is pending, so the caller must check the return value"]
+ pub fn wait<L: Lock>(&self, guard: &mut Guard<'_, L>) -> bool {
+ let lock = guard.lock;
+ let mut wait = MaybeUninit::<bindings::wait_queue_entry>::uninit();
+
+ // SAFETY: `wait` points to valid memory.
+ unsafe { rust_helper_init_wait(wait.as_mut_ptr()) };
+
+ // SAFETY: Both `wait` and `wait_list` point to valid memory.
+ unsafe {
+ bindings::prepare_to_wait_exclusive(
+ self.wait_list.get(),
+ wait.as_mut_ptr(),
+ bindings::TASK_INTERRUPTIBLE as _,
+ );
+ }
+
+ // SAFETY: The guard is evidence that the caller owns the lock.
+ unsafe { lock.unlock() };
+
+ // SAFETY: No arguments, switches to another thread.
+ unsafe { bindings::schedule() };
+
+ lock.lock_noguard();
+
+ // SAFETY: Both `wait` and `wait_list` point to valid memory.
+ unsafe { bindings::finish_wait(self.wait_list.get(), wait.as_mut_ptr()) };
+
+ Task::current().signal_pending()
+ }
+
+ /// Calls the kernel function to notify the appropriate number of threads with the given flags.
+ fn notify(&self, count: i32, flags: u32) {
+ // SAFETY: `wait_list` points to valid memory.
+ unsafe {
+ bindings::__wake_up(
+ self.wait_list.get(),
+ bindings::TASK_NORMAL,
+ count,
+ flags as _,
+ )
+ };
+ }
+
+ /// Wakes a single waiter up, if any. This is not 'sticky' in the sense that if no thread is
+ /// waiting, the notification is lost completely (as opposed to automatically waking up the
+ /// next waiter).
+ pub fn notify_one(&self) {
+ self.notify(1, 0);
+ }
+
+ /// Wakes all waiters up, if any. This is not 'sticky' in the sense that if no thread is
+ /// waiting, the notification is lost completely (as opposed to automatically waking up the
+ /// next waiter).
+ pub fn notify_all(&self) {
+ self.notify(0, 0);
+ }
+
+ /// Wakes all waiters up. If they were added by `epoll`, they are also removed from the list of
+ /// waiters. This is useful when cleaning up a condition variable that may be waited on by
+ /// threads that use `epoll`.
+ pub fn free_waiters(&self) {
+ self.notify(1, bindings::POLLHUP | POLLFREE);
+ }
+}
+
+impl NeedsLockClass for CondVar {
+ unsafe fn init(self: Pin<&mut Self>, name: &'static CStr, key: *mut bindings::lock_class_key) {
+ unsafe { bindings::__init_waitqueue_head(self.wait_list.get(), name.as_char_ptr(), key) };
+ }
+}
diff --git a/rust/kernel/sync/guard.rs b/rust/kernel/sync/guard.rs
new file mode 100644
index 000000000000..84e5d319a5fd
--- /dev/null
+++ b/rust/kernel/sync/guard.rs
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! A generic lock guard and trait.
+//!
+//! This module contains a lock guard that can be used with any locking primitive that implements
+//! the ([`Lock`]) trait. It also contains the definition of the trait, which can be leveraged by
+//! other constructs to work on generic locking primitives.
+
+/// Allows mutual exclusion primitives that implement the [`Lock`] trait to automatically unlock
+/// when a guard goes out of scope. It also provides a safe and convenient way to access the data
+/// protected by the lock.
+#[must_use = "the lock unlocks immediately when the guard is unused"]
+pub struct Guard<'a, L: Lock + ?Sized> {
+ pub(crate) lock: &'a L,
+}
+
+// SAFETY: `Guard` is sync when the data protected by the lock is also sync. This is more
+// conservative than the default compiler implementation; more details can be found on
+// https://github.com/rust-lang/rust/issues/41622 -- it refers to `MutexGuard` from the standard
+// library.
+unsafe impl<L> Sync for Guard<'_, L>
+where
+ L: Lock + ?Sized,
+ L::Inner: Sync,
+{
+}
+
+impl<L: Lock + ?Sized> core::ops::Deref for Guard<'_, L> {
+ type Target = L::Inner;
+
+ fn deref(&self) -> &Self::Target {
+ // SAFETY: The caller owns the lock, so it is safe to deref the protected data.
+ unsafe { &*self.lock.locked_data().get() }
+ }
+}
+
+impl<L: Lock + ?Sized> core::ops::DerefMut for Guard<'_, L> {
+ fn deref_mut(&mut self) -> &mut L::Inner {
+ // SAFETY: The caller owns the lock, so it is safe to deref the protected data.
+ unsafe { &mut *self.lock.locked_data().get() }
+ }
+}
+
+impl<L: Lock + ?Sized> Drop for Guard<'_, L> {
+ fn drop(&mut self) {
+ // SAFETY: The caller owns the lock, so it is safe to unlock it.
+ unsafe { self.lock.unlock() };
+ }
+}
+
+impl<'a, L: Lock + ?Sized> Guard<'a, L> {
+ /// Constructs a new lock guard.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that it owns the lock.
+ pub(crate) unsafe fn new(lock: &'a L) -> Self {
+ Self { lock }
+ }
+}
+
+/// A generic mutual exclusion primitive.
+///
+/// [`Guard`] is written such that any mutual exclusion primitive that can implement this trait can
+/// also benefit from having an automatic way to unlock itself.
+pub trait Lock {
+ /// The type of the data protected by the lock.
+ type Inner: ?Sized;
+
+ /// Acquires the lock, making the caller its owner.
+ fn lock_noguard(&self);
+
+ /// Releases the lock, giving up ownership of the lock.
+ ///
+ /// # Safety
+ ///
+ /// It must only be called by the current owner of the lock.
+ unsafe fn unlock(&self);
+
+ /// Returns the data protected by the lock.
+ fn locked_data(&self) -> &core::cell::UnsafeCell<Self::Inner>;
+}
diff --git a/rust/kernel/sync/locked_by.rs b/rust/kernel/sync/locked_by.rs
new file mode 100644
index 000000000000..d3e0b0d5e9b4
--- /dev/null
+++ b/rust/kernel/sync/locked_by.rs
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! A wrapper for data protected by a lock that does not wrap it.
+
+use super::{Guard, Lock};
+use core::{cell::UnsafeCell, ops::Deref, ptr};
+
+/// Allows access to some data to be serialised by a lock that does not wrap it.
+///
+/// In most cases, data protected by a lock is wrapped by the appropriate lock type, e.g.,
+/// [`super::Mutex`] or [`super::SpinLock`]. [`LockedBy`] is meant for cases when this is not
+/// possible. For example, if a container has a lock and some data in the contained elements needs
+/// to be protected by the same lock.
+///
+/// [`LockedBy`] wraps the data in lieu of another locking primitive, and only allows access to it
+/// when the caller shows evidence that 'external' lock is locked.
+///
+/// # Example
+///
+/// The following is an example for illustrative purposes: `InnerDirectory::bytes_used` is an
+/// aggregate of all `InnerFile::bytes_used` and must be kept consistent; so we wrap `InnerFile` in
+/// a `LockedBy` so that it shares a lock with `InnerDirectory`. This allows us to enforce at
+/// compile-time that access to `InnerFile` is only granted when an `InnerDirectory` is also
+/// locked; we enforce at run time that the right `InnerDirectory` is locked.
+///
+/// ```
+/// # use kernel::prelude::*;
+/// use kernel::sync::{LockedBy, Mutex};
+///
+/// struct InnerFile {
+/// bytes_used: u64,
+/// }
+///
+/// struct File {
+/// name: String,
+/// inner: LockedBy<InnerFile, Mutex<InnerDirectory>>,
+/// }
+///
+/// struct InnerDirectory {
+/// /// The sum of the bytes used by all files.
+/// bytes_used: u64,
+/// files: Vec<File>,
+/// }
+///
+/// struct Directory {
+/// name: String,
+/// inner: Mutex<InnerDirectory>,
+/// }
+/// ```
+pub struct LockedBy<T: ?Sized, L: Lock + ?Sized> {
+ owner: *const L::Inner,
+ data: UnsafeCell<T>,
+}
+
+// SAFETY: `LockedBy` can be transferred across thread boundaries iff the data it protects can.
+unsafe impl<T: ?Sized + Send, L: Lock + ?Sized> Send for LockedBy<T, L> {}
+
+// SAFETY: `LockedBy` serialises the interior mutability it provides, so it is `Sync` as long as the
+// data it protects is `Send`.
+unsafe impl<T: ?Sized + Send, L: Lock + ?Sized> Sync for LockedBy<T, L> {}
+
+impl<T, L: Lock + ?Sized> LockedBy<T, L> {
+ /// Constructs a new instance of [`LockedBy`].
+ ///
+ /// It stores a raw pointer to the owner that is never dereferenced. It is only used to ensure
+ /// that the right owner is being used to access the protected data. If the owner is freed, the
+ /// data becomes inaccessible; if another instance of the owner is allocated *on the same
+ /// memory location*, the data becomes accessible again: none of this affects memory safety
+ /// because in any case at most one thread (or CPU) can access the protected data at a time.
+ pub fn new(owner: &L, data: T) -> Self {
+ Self {
+ owner: owner.locked_data().get(),
+ data: UnsafeCell::new(data),
+ }
+ }
+}
+
+impl<T: ?Sized, L: Lock + ?Sized> LockedBy<T, L> {
+ /// Returns a reference to the protected data when the caller provides evidence (via a
+ /// [`Guard`]) that the owner is locked.
+ pub fn access<'a>(&'a self, guard: &'a Guard<'_, L>) -> &'a T {
+ if !ptr::eq(guard.deref(), self.owner) {
+ panic!("guard does not match owner");
+ }
+
+ // SAFETY: `guard` is evidence that the owner is locked.
+ unsafe { &mut *self.data.get() }
+ }
+
+ /// Returns a mutable reference to the protected data when the caller provides evidence (via a
+ /// mutable [`Guard`]) that the owner is locked mutably.
+ pub fn access_mut<'a>(&'a self, guard: &'a mut Guard<'_, L>) -> &'a mut T {
+ if !ptr::eq(guard.deref().deref(), self.owner) {
+ panic!("guard does not match owner");
+ }
+
+ // SAFETY: `guard` is evidence that the owner is locked.
+ unsafe { &mut *self.data.get() }
+ }
+
+ /// Returns a mutable reference to the protected data when the caller provides evidence (via a
+ /// mutable owner) that the owner is locked mutably. Showing a mutable reference to the owner
+ /// is sufficient because we know no other references can exist to it.
+ pub fn access_from_mut<'a>(&'a self, owner: &'a mut L::Inner) -> &'a mut T {
+ if !ptr::eq(owner, self.owner) {
+ panic!("mismatched owners");
+ }
+
+ // SAFETY: `owner` is evidence that there is only one reference to the owner.
+ unsafe { &mut *self.data.get() }
+ }
+}
diff --git a/rust/kernel/sync/mod.rs b/rust/kernel/sync/mod.rs
new file mode 100644
index 000000000000..ce863109c06e
--- /dev/null
+++ b/rust/kernel/sync/mod.rs
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Synchronisation primitives.
+//!
+//! This module contains the kernel APIs related to synchronisation that have been ported or
+//! wrapped for usage by Rust code in the kernel and is shared by all of them.
+//!
+//! # Example
+//!
+//! ```no_run
+//! # use kernel::prelude::*;
+//! # use kernel::mutex_init;
+//! # use kernel::sync::Mutex;
+//! # use alloc::boxed::Box;
+//! # use core::pin::Pin;
+//! // SAFETY: `init` is called below.
+//! let mut data = Pin::from(Box::new(unsafe { Mutex::new(0) }));
+//! mutex_init!(data.as_mut(), "test::data");
+//! *data.lock() = 10;
+//! pr_info!("{}\n", *data.lock());
+//! ```
+
+use crate::str::CStr;
+use crate::{bindings, c_types};
+use core::pin::Pin;
+
+mod arc;
+mod condvar;
+mod guard;
+mod locked_by;
+mod mutex;
+mod spinlock;
+
+pub use arc::{Ref, RefBorrow};
+pub use condvar::CondVar;
+pub use guard::{Guard, Lock};
+pub use locked_by::LockedBy;
+pub use mutex::Mutex;
+pub use spinlock::SpinLock;
+
+extern "C" {
+ fn rust_helper_cond_resched() -> c_types::c_int;
+}
+
+/// Safely initialises an object that has an `init` function that takes a name and a lock class as
+/// arguments, examples of these are [`Mutex`] and [`SpinLock`]. Each of them also provides a more
+/// specialised name that uses this macro.
+#[doc(hidden)]
+#[macro_export]
+macro_rules! init_with_lockdep {
+ ($obj:expr, $name:literal) => {{
+ static mut CLASS: core::mem::MaybeUninit<$crate::bindings::lock_class_key> =
+ core::mem::MaybeUninit::uninit();
+ let obj = $obj;
+ let name = $crate::c_str!($name);
+ // SAFETY: `CLASS` is never used by Rust code directly; the kernel may change it though.
+ #[allow(unused_unsafe)]
+ unsafe {
+ $crate::sync::NeedsLockClass::init(obj, name, CLASS.as_mut_ptr())
+ };
+ }};
+}
+
+/// A trait for types that need a lock class during initialisation.
+///
+/// Implementers of this trait benefit from the [`init_with_lockdep`] macro that generates a new
+/// class for each initialisation call site.
+pub trait NeedsLockClass {
+ /// Initialises the type instance so that it can be safely used.
+ ///
+ /// Callers are encouraged to use the [`init_with_lockdep`] macro as it automatically creates a
+ /// new lock class on each usage.
+ ///
+ /// # Safety
+ ///
+ /// `key` must point to a valid memory location as it will be used by the kernel.
+ unsafe fn init(self: Pin<&mut Self>, name: &'static CStr, key: *mut bindings::lock_class_key);
+}
+
+/// Reschedules the caller's task if needed.
+pub fn cond_resched() -> bool {
+ // SAFETY: No arguments, reschedules `current` if needed.
+ unsafe { rust_helper_cond_resched() != 0 }
+}
diff --git a/rust/kernel/sync/mutex.rs b/rust/kernel/sync/mutex.rs
new file mode 100644
index 000000000000..36605e8cdd62
--- /dev/null
+++ b/rust/kernel/sync/mutex.rs
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! A kernel mutex.
+//!
+//! This module allows Rust code to use the kernel's [`struct mutex`].
+
+use super::{Guard, Lock, NeedsLockClass};
+use crate::bindings;
+use crate::str::CStr;
+use core::{cell::UnsafeCell, marker::PhantomPinned, pin::Pin};
+
+/// Safely initialises a [`Mutex`] with the given name, generating a new lock class.
+#[macro_export]
+macro_rules! mutex_init {
+ ($mutex:expr, $name:literal) => {
+ $crate::init_with_lockdep!($mutex, $name)
+ };
+}
+
+/// Exposes the kernel's [`struct mutex`]. When multiple threads attempt to lock the same mutex,
+/// only one at a time is allowed to progress, the others will block (sleep) until the mutex is
+/// unlocked, at which point another thread will be allowed to wake up and make progress.
+///
+/// A [`Mutex`] must first be initialised with a call to [`Mutex::init`] before it can be used. The
+/// [`mutex_init`] macro is provided to automatically assign a new lock class to a mutex instance.
+///
+/// Since it may block, [`Mutex`] needs to be used with care in atomic contexts.
+///
+/// [`struct mutex`]: ../../../include/linux/mutex.h
+pub struct Mutex<T: ?Sized> {
+ /// The kernel `struct mutex` object.
+ mutex: UnsafeCell<bindings::mutex>,
+
+ /// A mutex needs to be pinned because it contains a [`struct list_head`] that is
+ /// self-referential, so it cannot be safely moved once it is initialised.
+ _pin: PhantomPinned,
+
+ /// The data protected by the mutex.
+ data: UnsafeCell<T>,
+}
+
+// SAFETY: `Mutex` can be transferred across thread boundaries iff the data it protects can.
+unsafe impl<T: ?Sized + Send> Send for Mutex<T> {}
+
+// SAFETY: `Mutex` serialises the interior mutability it provides, so it is `Sync` as long as the
+// data it protects is `Send`.
+unsafe impl<T: ?Sized + Send> Sync for Mutex<T> {}
+
+impl<T> Mutex<T> {
+ /// Constructs a new mutex.
+ ///
+ /// # Safety
+ ///
+ /// The caller must call [`Mutex::init`] before using the mutex.
+ pub unsafe fn new(t: T) -> Self {
+ Self {
+ mutex: UnsafeCell::new(bindings::mutex::default()),
+ data: UnsafeCell::new(t),
+ _pin: PhantomPinned,
+ }
+ }
+}
+
+impl<T: ?Sized> Mutex<T> {
+ /// Locks the mutex and gives the caller access to the data protected by it. Only one thread at
+ /// a time is allowed to access the protected data.
+ pub fn lock(&self) -> Guard<'_, Self> {
+ self.lock_noguard();
+ // SAFETY: The mutex was just acquired.
+ unsafe { Guard::new(self) }
+ }
+}
+
+impl<T: ?Sized> NeedsLockClass for Mutex<T> {
+ unsafe fn init(self: Pin<&mut Self>, name: &'static CStr, key: *mut bindings::lock_class_key) {
+ unsafe { bindings::__mutex_init(self.mutex.get(), name.as_char_ptr(), key) };
+ }
+}
+
+extern "C" {
+ fn rust_helper_mutex_lock(mutex: *mut bindings::mutex);
+}
+
+impl<T: ?Sized> Lock for Mutex<T> {
+ type Inner = T;
+
+ fn lock_noguard(&self) {
+ // SAFETY: `mutex` points to valid memory.
+ unsafe {
+ rust_helper_mutex_lock(self.mutex.get());
+ }
+ }
+
+ unsafe fn unlock(&self) {
+ unsafe { bindings::mutex_unlock(self.mutex.get()) };
+ }
+
+ fn locked_data(&self) -> &UnsafeCell<T> {
+ &self.data
+ }
+}
diff --git a/rust/kernel/sync/spinlock.rs b/rust/kernel/sync/spinlock.rs
new file mode 100644
index 000000000000..c6e38ef85b00
--- /dev/null
+++ b/rust/kernel/sync/spinlock.rs
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! A kernel spinlock.
+//!
+//! This module allows Rust code to use the kernel's [`struct spinlock`].
+//!
+//! See <https://www.kernel.org/doc/Documentation/locking/spinlocks.txt>.
+
+use super::{Guard, Lock, NeedsLockClass};
+use crate::str::CStr;
+use crate::{bindings, c_types};
+use core::{cell::UnsafeCell, marker::PhantomPinned, pin::Pin};
+
+extern "C" {
+ #[allow(improper_ctypes)]
+ fn rust_helper_spin_lock_init(
+ lock: *mut bindings::spinlock_t,
+ name: *const c_types::c_char,
+ key: *mut bindings::lock_class_key,
+ );
+ fn rust_helper_spin_lock(lock: *mut bindings::spinlock);
+ fn rust_helper_spin_unlock(lock: *mut bindings::spinlock);
+}
+
+/// Safely initialises a [`SpinLock`] with the given name, generating a new lock class.
+#[macro_export]
+macro_rules! spinlock_init {
+ ($spinlock:expr, $name:literal) => {
+ $crate::init_with_lockdep!($spinlock, $name)
+ };
+}
+
+/// Exposes the kernel's [`spinlock_t`]. When multiple CPUs attempt to lock the same spinlock, only
+/// one at a time is allowed to progress, the others will block (spinning) until the spinlock is
+/// unlocked, at which point another CPU will be allowed to make progress.
+///
+/// A [`SpinLock`] must first be initialised with a call to [`SpinLock::init`] before it can be
+/// used. The [`spinlock_init`] macro is provided to automatically assign a new lock class to a
+/// spinlock instance.
+///
+/// [`SpinLock`] does not manage the interrupt state, so it can be used in only two cases: (a) when
+/// the caller knows that interrupts are disabled, or (b) when callers never use it in interrupt
+/// handlers (in which case it is ok for interrupts to be enabled).
+///
+/// [`spinlock_t`]: ../../../include/linux/spinlock.h
+pub struct SpinLock<T: ?Sized> {
+ spin_lock: UnsafeCell<bindings::spinlock>,
+
+ /// Spinlocks are architecture-defined. So we conservatively require them to be pinned in case
+ /// some architecture uses self-references now or in the future.
+ _pin: PhantomPinned,
+
+ data: UnsafeCell<T>,
+}
+
+// SAFETY: `SpinLock` can be transferred across thread boundaries iff the data it protects can.
+unsafe impl<T: ?Sized + Send> Send for SpinLock<T> {}
+
+// SAFETY: `SpinLock` serialises the interior mutability it provides, so it is `Sync` as long as the
+// data it protects is `Send`.
+unsafe impl<T: ?Sized + Send> Sync for SpinLock<T> {}
+
+impl<T> SpinLock<T> {
+ /// Constructs a new spinlock.
+ ///
+ /// # Safety
+ ///
+ /// The caller must call [`SpinLock::init`] before using the spinlock.
+ pub unsafe fn new(t: T) -> Self {
+ Self {
+ spin_lock: UnsafeCell::new(bindings::spinlock::default()),
+ data: UnsafeCell::new(t),
+ _pin: PhantomPinned,
+ }
+ }
+}
+
+impl<T: ?Sized> SpinLock<T> {
+ /// Locks the spinlock and gives the caller access to the data protected by it. Only one thread
+ /// at a time is allowed to access the protected data.
+ pub fn lock(&self) -> Guard<'_, Self> {
+ self.lock_noguard();
+ // SAFETY: The spinlock was just acquired.
+ unsafe { Guard::new(self) }
+ }
+}
+
+impl<T: ?Sized> NeedsLockClass for SpinLock<T> {
+ unsafe fn init(self: Pin<&mut Self>, name: &'static CStr, key: *mut bindings::lock_class_key) {
+ unsafe { rust_helper_spin_lock_init(self.spin_lock.get(), name.as_char_ptr(), key) };
+ }
+}
+
+impl<T: ?Sized> Lock for SpinLock<T> {
+ type Inner = T;
+
+ fn lock_noguard(&self) {
+ // SAFETY: `spin_lock` points to valid memory.
+ unsafe { rust_helper_spin_lock(self.spin_lock.get()) };
+ }
+
+ unsafe fn unlock(&self) {
+ unsafe { rust_helper_spin_unlock(self.spin_lock.get()) };
+ }
+
+ fn locked_data(&self) -> &UnsafeCell<T> {
+ &self.data
+ }
+}
diff --git a/rust/kernel/sysctl.rs b/rust/kernel/sysctl.rs
new file mode 100644
index 000000000000..42a2c7c81824
--- /dev/null
+++ b/rust/kernel/sysctl.rs
@@ -0,0 +1,198 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! System control.
+//!
+//! C header: [`include/linux/sysctl.h`](../../../../include/linux/sysctl.h)
+//!
+//! Reference: <https://www.kernel.org/doc/Documentation/sysctl/README>
+
+use alloc::boxed::Box;
+use alloc::vec;
+use core::mem;
+use core::ptr;
+use core::sync::atomic;
+
+use crate::{
+ bindings, c_types, error,
+ io_buffer::IoBufferWriter,
+ str::CStr,
+ types,
+ user_ptr::{UserSlicePtr, UserSlicePtrWriter},
+};
+
+/// Sysctl storage.
+pub trait SysctlStorage: Sync {
+ /// Writes a byte slice.
+ fn store_value(&self, data: &[u8]) -> (usize, error::Result);
+
+ /// Reads via a [`UserSlicePtrWriter`].
+ fn read_value(&self, data: &mut UserSlicePtrWriter) -> (usize, error::Result);
+}
+
+fn trim_whitespace(mut data: &[u8]) -> &[u8] {
+ while !data.is_empty() && (data[0] == b' ' || data[0] == b'\t' || data[0] == b'\n') {
+ data = &data[1..];
+ }
+ while !data.is_empty()
+ && (data[data.len() - 1] == b' '
+ || data[data.len() - 1] == b'\t'
+ || data[data.len() - 1] == b'\n')
+ {
+ data = &data[..data.len() - 1];
+ }
+ data
+}
+
+impl<T> SysctlStorage for &T
+where
+ T: SysctlStorage,
+{
+ fn store_value(&self, data: &[u8]) -> (usize, error::Result) {
+ (*self).store_value(data)
+ }
+
+ fn read_value(&self, data: &mut UserSlicePtrWriter) -> (usize, error::Result) {
+ (*self).read_value(data)
+ }
+}
+
+impl SysctlStorage for atomic::AtomicBool {
+ fn store_value(&self, data: &[u8]) -> (usize, error::Result) {
+ let result = match trim_whitespace(data) {
+ b"0" => {
+ self.store(false, atomic::Ordering::Relaxed);
+ Ok(())
+ }
+ b"1" => {
+ self.store(true, atomic::Ordering::Relaxed);
+ Ok(())
+ }
+ _ => Err(error::Error::EINVAL),
+ };
+ (data.len(), result)
+ }
+
+ fn read_value(&self, data: &mut UserSlicePtrWriter) -> (usize, error::Result) {
+ let value = if self.load(atomic::Ordering::Relaxed) {
+ b"1\n"
+ } else {
+ b"0\n"
+ };
+ (value.len(), data.write_slice(value))
+ }
+}
+
+/// Holds a single `sysctl` entry (and its table).
+pub struct Sysctl<T: SysctlStorage> {
+ inner: Box<T>,
+ // Responsible for keeping the `ctl_table` alive.
+ _table: Box<[bindings::ctl_table]>,
+ header: *mut bindings::ctl_table_header,
+}
+
+// SAFETY: The only public method we have is `get()`, which returns `&T`, and
+// `T: Sync`. Any new methods must adhere to this requirement.
+unsafe impl<T: SysctlStorage> Sync for Sysctl<T> {}
+
+unsafe extern "C" fn proc_handler<T: SysctlStorage>(
+ ctl: *mut bindings::ctl_table,
+ write: c_types::c_int,
+ buffer: *mut c_types::c_void,
+ len: *mut usize,
+ ppos: *mut bindings::loff_t,
+) -> c_types::c_int {
+ // If we are reading from some offset other than the beginning of the file,
+ // return an empty read to signal EOF.
+ if unsafe { *ppos } != 0 && write == 0 {
+ unsafe { *len = 0 };
+ return 0;
+ }
+
+ let data = unsafe { UserSlicePtr::new(buffer, *len) };
+ let storage = unsafe { &*((*ctl).data as *const T) };
+ let (bytes_processed, result) = if write != 0 {
+ let data = match data.read_all() {
+ Ok(r) => r,
+ Err(e) => return e.to_kernel_errno(),
+ };
+ storage.store_value(&data)
+ } else {
+ let mut writer = data.writer();
+ storage.read_value(&mut writer)
+ };
+ unsafe { *len = bytes_processed };
+ unsafe { *ppos += *len as bindings::loff_t };
+ match result {
+ Ok(()) => 0,
+ Err(e) => e.to_kernel_errno(),
+ }
+}
+
+impl<T: SysctlStorage> Sysctl<T> {
+ /// Registers a single entry in `sysctl`.
+ pub fn register(
+ path: &'static CStr,
+ name: &'static CStr,
+ storage: T,
+ mode: types::Mode,
+ ) -> error::Result<Sysctl<T>> {
+ if name.contains(&b'/') {
+ return Err(error::Error::EINVAL);
+ }
+
+ let storage = Box::try_new(storage)?;
+ let mut table = vec![
+ bindings::ctl_table {
+ procname: name.as_char_ptr(),
+ mode: mode.as_int(),
+ data: &*storage as *const T as *mut c_types::c_void,
+ proc_handler: Some(proc_handler::<T>),
+
+ maxlen: 0,
+ child: ptr::null_mut(),
+ poll: ptr::null_mut(),
+ extra1: ptr::null_mut(),
+ extra2: ptr::null_mut(),
+ },
+ unsafe { mem::zeroed() },
+ ]
+ .try_into_boxed_slice()?;
+
+ let result = unsafe { bindings::register_sysctl(path.as_char_ptr(), table.as_mut_ptr()) };
+ if result.is_null() {
+ return Err(error::Error::ENOMEM);
+ }
+
+ Ok(Sysctl {
+ inner: storage,
+ _table: table,
+ header: result,
+ })
+ }
+
+ /// Gets the storage.
+ pub fn get(&self) -> &T {
+ &self.inner
+ }
+}
+
+impl<T: SysctlStorage> Drop for Sysctl<T> {
+ fn drop(&mut self) {
+ unsafe {
+ bindings::unregister_sysctl_table(self.header);
+ }
+ self.header = ptr::null_mut();
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_trim_whitespace() {
+ assert_eq!(trim_whitespace(b"foo "), b"foo");
+ assert_eq!(trim_whitespace(b" foo"), b"foo");
+ assert_eq!(trim_whitespace(b" foo "), b"foo");
+ }
+}
diff --git a/rust/kernel/task.rs b/rust/kernel/task.rs
new file mode 100644
index 000000000000..dcf376b992ec
--- /dev/null
+++ b/rust/kernel/task.rs
@@ -0,0 +1,193 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Tasks (threads and processes).
+//!
+//! C header: [`include/linux/sched.h`](../../../../include/linux/sched.h).
+
+use crate::{bindings, c_types};
+use core::{marker::PhantomData, mem::ManuallyDrop, ops::Deref};
+
+extern "C" {
+ #[allow(improper_ctypes)]
+ fn rust_helper_signal_pending(t: *const bindings::task_struct) -> c_types::c_int;
+ #[allow(improper_ctypes)]
+ fn rust_helper_get_current() -> *mut bindings::task_struct;
+ #[allow(improper_ctypes)]
+ fn rust_helper_get_task_struct(t: *mut bindings::task_struct);
+ #[allow(improper_ctypes)]
+ fn rust_helper_put_task_struct(t: *mut bindings::task_struct);
+}
+
+/// Wraps the kernel's `struct task_struct`.
+///
+/// # Invariants
+///
+/// The pointer `Task::ptr` is non-null and valid. Its reference count is also non-zero.
+///
+/// # Examples
+///
+/// The following is an example of getting the PID of the current thread with zero additional cost
+/// when compared to the C version:
+///
+/// ```
+/// # use kernel::prelude::*;
+/// use kernel::task::Task;
+///
+/// # fn test() {
+/// Task::current().pid();
+/// # }
+/// ```
+///
+/// Getting the PID of the current process, also zero additional cost:
+///
+/// ```
+/// # use kernel::prelude::*;
+/// use kernel::task::Task;
+///
+/// # fn test() {
+/// Task::current().group_leader().pid();
+/// # }
+/// ```
+///
+/// Getting the current task and storing it in some struct. The reference count is automatically
+/// incremented when creating `State` and decremented when it is dropped:
+///
+/// ```
+/// # use kernel::prelude::*;
+/// use kernel::task::Task;
+///
+/// struct State {
+/// creator: Task,
+/// index: u32,
+/// }
+///
+/// impl State {
+/// fn new() -> Self {
+/// Self {
+/// creator: Task::current().clone(),
+/// index: 0,
+/// }
+/// }
+/// }
+/// ```
+pub struct Task {
+ pub(crate) ptr: *mut bindings::task_struct,
+}
+
+// SAFETY: Given that the task is referenced, it is OK to send it to another thread.
+unsafe impl Send for Task {}
+
+// SAFETY: It's OK to access `Task` through references from other threads because we're either
+// accessing properties that don't change (e.g., `pid`, `group_leader`) or that are properly
+// synchronised by C code (e.g., `signal_pending`).
+unsafe impl Sync for Task {}
+
+/// The type of process identifiers (PIDs).
+type Pid = bindings::pid_t;
+
+impl Task {
+ /// Returns a task reference for the currently executing task/thread.
+ pub fn current<'a>() -> TaskRef<'a> {
+ // SAFETY: Just an FFI call.
+ let ptr = unsafe { rust_helper_get_current() };
+
+ // SAFETY: If the current thread is still running, the current task is valid. Given
+ // that `TaskRef` is not `Send`, we know it cannot be transferred to another thread (where
+ // it could potentially outlive the caller).
+ unsafe { TaskRef::from_ptr(ptr) }
+ }
+
+ /// Returns the group leader of the given task.
+ pub fn group_leader(&self) -> TaskRef<'_> {
+ // SAFETY: By the type invariant, we know that `self.ptr` is non-null and valid.
+ let ptr = unsafe { (*self.ptr).group_leader };
+
+ // SAFETY: The lifetime of the returned task reference is tied to the lifetime of `self`,
+ // and given that a task has a reference to its group leader, we know it must be valid for
+ // the lifetime of the returned task reference.
+ unsafe { TaskRef::from_ptr(ptr) }
+ }
+
+ /// Returns the PID of the given task.
+ pub fn pid(&self) -> Pid {
+ // SAFETY: By the type invariant, we know that `self.ptr` is non-null and valid.
+ unsafe { (*self.ptr).pid }
+ }
+
+ /// Determines whether the given task has pending signals.
+ pub fn signal_pending(&self) -> bool {
+ // SAFETY: By the type invariant, we know that `self.ptr` is non-null and valid.
+ unsafe { rust_helper_signal_pending(self.ptr) != 0 }
+ }
+}
+
+impl PartialEq for Task {
+ fn eq(&self, other: &Self) -> bool {
+ self.ptr == other.ptr
+ }
+}
+
+impl Eq for Task {}
+
+impl Clone for Task {
+ fn clone(&self) -> Self {
+ // SAFETY: The type invariants guarantee that `self.ptr` has a non-zero reference count.
+ unsafe { rust_helper_get_task_struct(self.ptr) };
+
+ // INVARIANT: We incremented the reference count to account for the new `Task` being
+ // created.
+ Self { ptr: self.ptr }
+ }
+}
+
+impl Drop for Task {
+ fn drop(&mut self) {
+ // INVARIANT: We may decrement the refcount to zero, but the `Task` is being dropped, so
+ // this is not observable.
+ // SAFETY: The type invariants guarantee that `Task::ptr` has a non-zero reference count.
+ unsafe { rust_helper_put_task_struct(self.ptr) };
+ }
+}
+
+/// A wrapper for [`Task`] that doesn't automatically decrement the refcount when dropped.
+///
+/// We need the wrapper because [`ManuallyDrop`] alone would allow callers to call
+/// [`ManuallyDrop::into_inner`]. This would allow an unsafe sequence to be triggered without
+/// `unsafe` blocks because it would trigger an unbalanced call to `put_task_struct`.
+///
+/// We make this explicitly not [`Send`] so that we can use it to represent the current thread
+/// without having to increment/decrement its reference count.
+///
+/// # Invariants
+///
+/// The wrapped [`Task`] remains valid for the lifetime of the object.
+pub struct TaskRef<'a> {
+ task: ManuallyDrop<Task>,
+ _not_send: PhantomData<(&'a (), *mut ())>,
+}
+
+impl TaskRef<'_> {
+ /// Constructs a new `struct task_struct` wrapper that doesn't change its reference count.
+ ///
+ /// # Safety
+ ///
+ /// The pointer `ptr` must be non-null and valid for the lifetime of the object.
+ pub(crate) unsafe fn from_ptr(ptr: *mut bindings::task_struct) -> Self {
+ Self {
+ task: ManuallyDrop::new(Task { ptr }),
+ _not_send: PhantomData,
+ }
+ }
+}
+
+// SAFETY: It is OK to share a reference to the current thread with another thread because we know
+// the owner cannot go away while the shared reference exists (and `Task` itself is `Sync`).
+unsafe impl Sync for TaskRef<'_> {}
+
+impl Deref for TaskRef<'_> {
+ type Target = Task;
+
+ fn deref(&self) -> &Self::Target {
+ self.task.deref()
+ }
+}
diff --git a/rust/kernel/traits.rs b/rust/kernel/traits.rs
new file mode 100644
index 000000000000..39a43169bf70
--- /dev/null
+++ b/rust/kernel/traits.rs
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Traits useful to drivers, and their implementations for common types.
+
+use core::{ops::Deref, pin::Pin};
+
+use alloc::{alloc::AllocError, sync::Arc};
+
+/// Trait which provides a fallible version of `pin()` for pointer types.
+///
+/// Common pointer types which implement a `pin()` method include [`Box`](alloc::boxed::Box) and [`Arc`].
+pub trait TryPin<P: Deref> {
+ /// Constructs a new `Pin<pointer<T>>`. If `T` does not implement [`Unpin`], then data
+ /// will be pinned in memory and unable to be moved. An error will be returned
+ /// if allocation fails.
+ fn try_pin(data: P::Target) -> core::result::Result<Pin<P>, AllocError>;
+}
+
+impl<T> TryPin<Arc<T>> for Arc<T> {
+ fn try_pin(data: T) -> core::result::Result<Pin<Arc<T>>, AllocError> {
+ // SAFETY: the data `T` is exposed only through a `Pin<Arc<T>>`, which
+ // does not allow data to move out of the `Arc`. Therefore it can
+ // never be moved.
+ Ok(unsafe { Pin::new_unchecked(Arc::try_new(data)?) })
+ }
+}
diff --git a/rust/kernel/types.rs b/rust/kernel/types.rs
new file mode 100644
index 000000000000..2146ee819cc9
--- /dev/null
+++ b/rust/kernel/types.rs
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Kernel types.
+//!
+//! C header: [`include/linux/types.h`](../../../../include/linux/types.h)
+
+use crate::{
+ bindings, c_types,
+ sync::{Ref, RefBorrow},
+};
+use alloc::{boxed::Box, sync::Arc};
+use core::{ops::Deref, pin::Pin, ptr::NonNull};
+
+/// Permissions.
+///
+/// C header: [`include/uapi/linux/stat.h`](../../../../include/uapi/linux/stat.h)
+///
+/// C header: [`include/linux/stat.h`](../../../../include/linux/stat.h)
+pub struct Mode(bindings::umode_t);
+
+impl Mode {
+ /// Creates a [`Mode`] from an integer.
+ pub fn from_int(m: u16) -> Mode {
+ Mode(m)
+ }
+
+ /// Returns the mode as an integer.
+ pub fn as_int(&self) -> u16 {
+ self.0
+ }
+}
+
+/// Used to convert an object into a raw pointer that represents it.
+///
+/// It can eventually be converted back into the object. This is used to store objects as pointers
+/// in kernel data structures, for example, an implementation of [`FileOperations`] in `struct
+/// file::private_data`.
+pub trait PointerWrapper {
+ /// Type of values borrowed between calls to [`PointerWrapper::into_pointer`] and
+ /// [`PointerWrapper::from_pointer`].
+ type Borrowed: Deref;
+
+ /// Returns the raw pointer.
+ fn into_pointer(self) -> *const c_types::c_void;
+
+ /// Returns a borrowed value.
+ ///
+ /// # Safety
+ ///
+ /// `ptr` must have been returned by a previous call to [`PointerWrapper::into_pointer`].
+ /// Additionally, [`PointerWrapper::from_pointer`] can only be called after *all* values
+ /// returned by [`PointerWrapper::borrow`] have been dropped.
+ unsafe fn borrow(ptr: *const c_types::c_void) -> Self::Borrowed;
+
+ /// Returns the instance back from the raw pointer.
+ ///
+ /// # Safety
+ ///
+ /// The passed pointer must come from a previous call to [`PointerWrapper::into_pointer()`].
+ unsafe fn from_pointer(ptr: *const c_types::c_void) -> Self;
+}
+
+impl<T> PointerWrapper for Box<T> {
+ type Borrowed = UnsafeReference<T>;
+
+ fn into_pointer(self) -> *const c_types::c_void {
+ Box::into_raw(self) as _
+ }
+
+ unsafe fn borrow(ptr: *const c_types::c_void) -> Self::Borrowed {
+ // SAFETY: The safety requirements for this function ensure that the object is still alive,
+ // so it is safe to dereference the raw pointer.
+ // The safety requirements also ensure that the object remains alive for the lifetime of
+ // the returned value.
+ unsafe { UnsafeReference::new(&*ptr.cast()) }
+ }
+
+ unsafe fn from_pointer(ptr: *const c_types::c_void) -> Self {
+ // SAFETY: The passed pointer comes from a previous call to [`Self::into_pointer()`].
+ unsafe { Box::from_raw(ptr as _) }
+ }
+}
+
+impl<T> PointerWrapper for Ref<T> {
+ type Borrowed = RefBorrow<T>;
+
+ fn into_pointer(self) -> *const c_types::c_void {
+ Ref::into_usize(self) as _
+ }
+
+ unsafe fn borrow(ptr: *const c_types::c_void) -> Self::Borrowed {
+ // SAFETY: The safety requirements for this function ensure that the underlying object
+ // remains valid for the lifetime of the returned value.
+ unsafe { Ref::borrow_usize(ptr as _) }
+ }
+
+ unsafe fn from_pointer(ptr: *const c_types::c_void) -> Self {
+ // SAFETY: The passed pointer comes from a previous call to [`Self::into_pointer()`].
+ unsafe { Ref::from_usize(ptr as _) }
+ }
+}
+
+impl<T> PointerWrapper for Arc<T> {
+ type Borrowed = UnsafeReference<T>;
+
+ fn into_pointer(self) -> *const c_types::c_void {
+ Arc::into_raw(self) as _
+ }
+
+ unsafe fn borrow(ptr: *const c_types::c_void) -> Self::Borrowed {
+ // SAFETY: The safety requirements for this function ensure that the object is still alive,
+ // so it is safe to dereference the raw pointer.
+ // The safety requirements also ensure that the object remains alive for the lifetime of
+ // the returned value.
+ unsafe { UnsafeReference::new(&*ptr.cast()) }
+ }
+
+ unsafe fn from_pointer(ptr: *const c_types::c_void) -> Self {
+ // SAFETY: The passed pointer comes from a previous call to [`Self::into_pointer()`].
+ unsafe { Arc::from_raw(ptr as _) }
+ }
+}
+
+/// A reference with manually-managed lifetime.
+///
+/// # Invariants
+///
+/// There are no mutable references to the underlying object, and it remains valid for the lifetime
+/// of the [`UnsafeReference`] instance.
+pub struct UnsafeReference<T: ?Sized> {
+ ptr: NonNull<T>,
+}
+
+impl<T: ?Sized> UnsafeReference<T> {
+ /// Creates a new [`UnsafeReference`] instance.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure the following for the lifetime of the returned [`UnsafeReference`]
+ /// instance:
+ /// 1. That `obj` remains valid;
+ /// 2. That no mutable references to `obj` are created.
+ unsafe fn new(obj: &T) -> Self {
+ // INVARIANT: The safety requirements of this function ensure that the invariants hold.
+ Self {
+ ptr: NonNull::from(obj),
+ }
+ }
+}
+
+impl<T: ?Sized> Deref for UnsafeReference<T> {
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ // SAFETY: By the type invariant, the object is still valid and alive, and there are no
+ // mutable references to it.
+ unsafe { self.ptr.as_ref() }
+ }
+}
+
+impl<T: PointerWrapper + Deref> PointerWrapper for Pin<T> {
+ type Borrowed = T::Borrowed;
+
+ fn into_pointer(self) -> *const c_types::c_void {
+ // SAFETY: We continue to treat the pointer as pinned by returning just a pointer to it to
+ // the caller.
+ let inner = unsafe { Pin::into_inner_unchecked(self) };
+ inner.into_pointer()
+ }
+
+ unsafe fn borrow(ptr: *const c_types::c_void) -> Self::Borrowed {
+ // SAFETY: The safety requirements for this function are the same as the ones for
+ // `T::borrow`.
+ unsafe { T::borrow(ptr) }
+ }
+
+ unsafe fn from_pointer(p: *const c_types::c_void) -> Self {
+ // SAFETY: The object was originally pinned.
+ // The passed pointer comes from a previous call to `inner::into_pointer()`.
+ unsafe { Pin::new_unchecked(T::from_pointer(p)) }
+ }
+}
+
+/// Runs a cleanup function/closure when dropped.
+///
+/// The [`ScopeGuard::dismiss`] function prevents the cleanup function from running.
+///
+/// # Examples
+///
+/// In the example below, we have multiple exit paths and we want to log regardless of which one is
+/// taken:
+/// ```
+/// # use kernel::prelude::*;
+/// # use kernel::ScopeGuard;
+/// fn example1(arg: bool) {
+/// let _log = ScopeGuard::new(|| pr_info!("example1 completed\n"));
+///
+/// if arg {
+/// return;
+/// }
+///
+/// // Do something...
+/// }
+/// ```
+///
+/// In the example below, we want to log the same message on all early exits but a different one on
+/// the main exit path:
+/// ```
+/// # use kernel::prelude::*;
+/// # use kernel::ScopeGuard;
+/// fn example2(arg: bool) {
+/// let log = ScopeGuard::new(|| pr_info!("example2 returned early\n"));
+///
+/// if arg {
+/// return;
+/// }
+///
+/// // (Other early returns...)
+///
+/// log.dismiss();
+/// pr_info!("example2 no early return\n");
+/// }
+/// ```
+pub struct ScopeGuard<T: FnOnce()> {
+ cleanup_func: Option<T>,
+}
+
+impl<T: FnOnce()> ScopeGuard<T> {
+ /// Creates a new cleanup object with the given cleanup function.
+ pub fn new(cleanup_func: T) -> Self {
+ Self {
+ cleanup_func: Some(cleanup_func),
+ }
+ }
+
+ /// Prevents the cleanup function from running.
+ pub fn dismiss(mut self) {
+ self.cleanup_func.take();
+ }
+}
+
+impl<T: FnOnce()> Drop for ScopeGuard<T> {
+ fn drop(&mut self) {
+ // Run the cleanup function if one is still present.
+ if let Some(cleanup) = self.cleanup_func.take() {
+ cleanup();
+ }
+ }
+}
diff --git a/rust/kernel/user_ptr.rs b/rust/kernel/user_ptr.rs
new file mode 100644
index 000000000000..71ec659bcee3
--- /dev/null
+++ b/rust/kernel/user_ptr.rs
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! User pointers.
+//!
+//! C header: [`include/linux/uaccess.h`](../../../../include/linux/uaccess.h)
+
+use crate::{
+ c_types,
+ error::Error,
+ io_buffer::{IoBufferReader, IoBufferWriter},
+ Result,
+};
+use alloc::vec::Vec;
+
+extern "C" {
+ fn rust_helper_copy_from_user(
+ to: *mut c_types::c_void,
+ from: *const c_types::c_void,
+ n: c_types::c_ulong,
+ ) -> c_types::c_ulong;
+
+ fn rust_helper_copy_to_user(
+ to: *mut c_types::c_void,
+ from: *const c_types::c_void,
+ n: c_types::c_ulong,
+ ) -> c_types::c_ulong;
+
+ fn rust_helper_clear_user(to: *mut c_types::c_void, n: c_types::c_ulong) -> c_types::c_ulong;
+}
+
+/// A reference to an area in userspace memory, which can be either
+/// read-only or read-write.
+///
+/// All methods on this struct are safe: invalid pointers return
+/// `EFAULT`. Concurrent access, *including data races to/from userspace
+/// memory*, is permitted, because fundamentally another userspace
+/// thread/process could always be modifying memory at the same time
+/// (in the same way that userspace Rust's [`std::io`] permits data races
+/// with the contents of files on disk). In the presence of a race, the
+/// exact byte values read/written are unspecified but the operation is
+/// well-defined. Kernelspace code should validate its copy of data
+/// after completing a read, and not expect that multiple reads of the
+/// same address will return the same value.
+///
+/// All APIs enforce the invariant that a given byte of memory from userspace
+/// may only be read once. By preventing double-fetches we avoid TOCTOU
+/// vulnerabilities. This is accomplished by taking `self` by value to prevent
+/// obtaining multiple readers on a given [`UserSlicePtr`], and the readers
+/// only permitting forward reads.
+///
+/// Constructing a [`UserSlicePtr`] performs no checks on the provided
+/// address and length, it can safely be constructed inside a kernel thread
+/// with no current userspace process. Reads and writes wrap the kernel APIs
+/// `copy_from_user` and `copy_to_user`, which check the memory map of the
+/// current process and enforce that the address range is within the user
+/// range (no additional calls to `access_ok` are needed).
+///
+/// [`std::io`]: https://doc.rust-lang.org/std/io/index.html
+pub struct UserSlicePtr(*mut c_types::c_void, usize);
+
+impl UserSlicePtr {
+ /// Constructs a user slice from a raw pointer and a length in bytes.
+ ///
+ /// # Safety
+ ///
+ /// Callers must be careful to avoid time-of-check-time-of-use
+ /// (TOCTOU) issues. The simplest way is to create a single instance of
+ /// [`UserSlicePtr`] per user memory block as it reads each byte at
+ /// most once.
+ pub unsafe fn new(ptr: *mut c_types::c_void, length: usize) -> Self {
+ UserSlicePtr(ptr, length)
+ }
+
+ /// Reads the entirety of the user slice.
+ ///
+ /// Returns `EFAULT` if the address does not currently point to
+ /// mapped, readable memory.
+ pub fn read_all(self) -> Result<Vec<u8>> {
+ self.reader().read_all()
+ }
+
+ /// Constructs a [`UserSlicePtrReader`].
+ pub fn reader(self) -> UserSlicePtrReader {
+ UserSlicePtrReader(self.0, self.1)
+ }
+
+ /// Writes the provided slice into the user slice.
+ ///
+ /// Returns `EFAULT` if the address does not currently point to
+ /// mapped, writable memory (in which case some data from before the
+ /// fault may be written), or `data` is larger than the user slice
+ /// (in which case no data is written).
+ pub fn write_all(self, data: &[u8]) -> Result {
+ self.writer().write_slice(data)
+ }
+
+ /// Constructs a [`UserSlicePtrWriter`].
+ pub fn writer(self) -> UserSlicePtrWriter {
+ UserSlicePtrWriter(self.0, self.1)
+ }
+
+ /// Constructs both a [`UserSlicePtrReader`] and a [`UserSlicePtrWriter`].
+ pub fn reader_writer(self) -> (UserSlicePtrReader, UserSlicePtrWriter) {
+ (
+ UserSlicePtrReader(self.0, self.1),
+ UserSlicePtrWriter(self.0, self.1),
+ )
+ }
+}
+
+/// A reader for [`UserSlicePtr`].
+///
+/// Used to incrementally read from the user slice.
+pub struct UserSlicePtrReader(*mut c_types::c_void, usize);
+
+impl IoBufferReader for UserSlicePtrReader {
+ /// Returns the number of bytes left to be read from this.
+ ///
+ /// Note that even reading less than this number of bytes may fail.
+ fn len(&self) -> usize {
+ self.1
+ }
+
+ /// Reads raw data from the user slice into a raw kernel buffer.
+ ///
+ /// # Safety
+ ///
+ /// The output buffer must be valid.
+ unsafe fn read_raw(&mut self, out: *mut u8, len: usize) -> Result {
+ if len > self.1 || len > u32::MAX as usize {
+ return Err(Error::EFAULT);
+ }
+ let res = unsafe { rust_helper_copy_from_user(out as _, self.0, len as _) };
+ if res != 0 {
+ return Err(Error::EFAULT);
+ }
+ // Since this is not a pointer to a valid object in our program,
+ // we cannot use `add`, which has C-style rules for defined
+ // behavior.
+ self.0 = self.0.wrapping_add(len);
+ self.1 -= len;
+ Ok(())
+ }
+}
+
+/// A writer for [`UserSlicePtr`].
+///
+/// Used to incrementally write into the user slice.
+pub struct UserSlicePtrWriter(*mut c_types::c_void, usize);
+
+impl IoBufferWriter for UserSlicePtrWriter {
+ fn len(&self) -> usize {
+ self.1
+ }
+
+ fn clear(&mut self, mut len: usize) -> Result {
+ let mut ret = Ok(());
+ if len > self.1 {
+ ret = Err(Error::EFAULT);
+ len = self.1;
+ }
+
+ // SAFETY: The buffer will be validated by `clear_user`. We ensure that `len` is within
+ // bounds in the check above.
+ let left = unsafe { rust_helper_clear_user(self.0, len as _) } as usize;
+ if left != 0 {
+ ret = Err(Error::EFAULT);
+ len -= left;
+ }
+
+ self.0 = self.0.wrapping_add(len);
+ self.1 -= len;
+ ret
+ }
+
+ unsafe fn write_raw(&mut self, data: *const u8, len: usize) -> Result {
+ if len > self.1 || len > u32::MAX as usize {
+ return Err(Error::EFAULT);
+ }
+ let res = unsafe { rust_helper_copy_to_user(self.0, data as _, len as _) };
+ if res != 0 {
+ return Err(Error::EFAULT);
+ }
+ // Since this is not a pointer to a valid object in our program,
+ // we cannot use `add`, which has C-style rules for defined
+ // behavior.
+ self.0 = self.0.wrapping_add(len);
+ self.1 -= len;
+ Ok(())
+ }
+}
diff --git a/rust/macros/lib.rs b/rust/macros/lib.rs
new file mode 100644
index 000000000000..cb7a4f12f3b4
--- /dev/null
+++ b/rust/macros/lib.rs
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Crate for all kernel procedural macros.
+
+mod module;
+
+use proc_macro::TokenStream;
+
+/// Declares a kernel module.
+///
+/// The `type` argument should be a type which implements the [`KernelModule`]
+/// trait. Also accepts various forms of kernel metadata.
+///
+/// C header: [`include/linux/moduleparam.h`](../../../include/linux/moduleparam.h)
+///
+/// [`KernelModule`]: ../kernel/trait.KernelModule.html
+///
+/// # Examples
+///
+/// ```ignore
+/// use kernel::prelude::*;
+///
+/// module!{
+/// type: MyKernelModule,
+/// name: b"my_kernel_module",
+/// author: b"Rust for Linux Contributors",
+/// description: b"My very own kernel module!",
+/// license: b"GPL v2",
+/// params: {
+/// my_i32: i32 {
+/// default: 42,
+/// permissions: 0o000,
+/// description: b"Example of i32",
+/// },
+/// writeable_i32: i32 {
+/// default: 42,
+/// permissions: 0o644,
+/// description: b"Example of i32",
+/// },
+/// },
+/// }
+///
+/// struct MyKernelModule;
+///
+/// impl KernelModule for MyKernelModule {
+/// fn init() -> Result<Self> {
+/// // If the parameter is writeable, then the kparam lock must be
+/// // taken to read the parameter:
+/// {
+/// let lock = THIS_MODULE.kernel_param_lock();
+/// pr_info!("i32 param is: {}\n", writeable_i32.read(&lock));
+/// }
+/// // If the parameter is read only, it can be read without locking
+/// // the kernel parameters:
+/// pr_info!("i32 param is: {}\n", my_i32.read());
+/// Ok(MyKernelModule)
+/// }
+/// }
+/// ```
+///
+/// # Supported argument types
+/// - `type`: type which implements the [`KernelModule`] trait (required).
+/// - `name`: byte array of the name of the kernel module (required).
+/// - `author`: byte array of the author of the kernel module.
+/// - `description`: byte array of the description of the kernel module.
+/// - `license`: byte array of the license of the kernel module (required).
+/// - `alias`: byte array of alias name of the kernel module.
+/// - `alias_rtnl_link`: byte array of the `rtnl_link_alias` of the kernel module (mutually exclusive with `alias`).
+/// - `params`: parameters for the kernel module, as described below.
+///
+/// # Supported parameter types
+///
+/// - `bool`: Corresponds to C `bool` param type.
+/// - `i8`: No equivalent C param type.
+/// - `u8`: Corresponds to C `char` param type.
+/// - `i16`: Corresponds to C `short` param type.
+/// - `u16`: Corresponds to C `ushort` param type.
+/// - `i32`: Corresponds to C `int` param type.
+/// - `u32`: Corresponds to C `uint` param type.
+/// - `i64`: No equivalent C param type.
+/// - `u64`: Corresponds to C `ullong` param type.
+/// - `isize`: No equivalent C param type.
+/// - `usize`: No equivalent C param type.
+/// - `str`: Corresponds to C `charp` param type. Reading returns a byte slice.
+/// - `ArrayParam<T,N>`: Corresponds to C parameters created using `module_param_array`. An array
+/// of `T`'s of length at **most** `N`.
+///
+/// `invbool` is unsupported: it was only ever used in a few modules.
+/// Consider using a `bool` and inverting the logic instead.
+#[proc_macro]
+pub fn module(ts: TokenStream) -> TokenStream {
+ module::module(ts)
+}
+
+/// Declares a kernel module that exposes a single misc device.
+///
+/// The `type` argument should be a type which implements the [`FileOpener`] trait. Also accepts
+/// various forms of kernel metadata.
+///
+/// C header: [`include/linux/moduleparam.h`](../../../include/linux/moduleparam.h)
+///
+/// [`FileOpener`]: ../kernel/file_operations/trait.FileOpener.html
+///
+/// # Examples
+///
+/// ```ignore
+/// use kernel::prelude::*;
+///
+/// module_misc_device! {
+/// type: MyFile,
+/// name: b"my_miscdev_kernel_module",
+/// author: b"Rust for Linux Contributors",
+/// description: b"My very own misc device kernel module!",
+/// license: b"GPL v2",
+/// }
+///
+/// #[derive(Default)]
+/// struct MyFile;
+///
+/// impl kernel::file_operations::FileOperations for MyFile {
+/// kernel::declare_file_operations!();
+/// }
+/// ```
+#[proc_macro]
+pub fn module_misc_device(ts: TokenStream) -> TokenStream {
+ module::module_misc_device(ts)
+}
diff --git a/rust/macros/module.rs b/rust/macros/module.rs
new file mode 100644
index 000000000000..1389c53aa228
--- /dev/null
+++ b/rust/macros/module.rs
@@ -0,0 +1,754 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use proc_macro::{token_stream, Delimiter, Group, Literal, TokenStream, TokenTree};
+
+fn try_ident(it: &mut token_stream::IntoIter) -> Option<String> {
+ if let Some(TokenTree::Ident(ident)) = it.next() {
+ Some(ident.to_string())
+ } else {
+ None
+ }
+}
+
+fn try_literal(it: &mut token_stream::IntoIter) -> Option<String> {
+ if let Some(TokenTree::Literal(literal)) = it.next() {
+ Some(literal.to_string())
+ } else {
+ None
+ }
+}
+
+fn try_byte_string(it: &mut token_stream::IntoIter) -> Option<String> {
+ try_literal(it).and_then(|byte_string| {
+ if byte_string.starts_with("b\"") && byte_string.ends_with('\"') {
+ Some(byte_string[2..byte_string.len() - 1].to_string())
+ } else {
+ None
+ }
+ })
+}
+
+fn expect_ident(it: &mut token_stream::IntoIter) -> String {
+ try_ident(it).expect("Expected Ident")
+}
+
+fn expect_punct(it: &mut token_stream::IntoIter) -> char {
+ if let TokenTree::Punct(punct) = it.next().expect("Reached end of token stream for Punct") {
+ punct.as_char()
+ } else {
+ panic!("Expected Punct");
+ }
+}
+
+fn expect_literal(it: &mut token_stream::IntoIter) -> String {
+ try_literal(it).expect("Expected Literal")
+}
+
+fn expect_group(it: &mut token_stream::IntoIter) -> Group {
+ if let TokenTree::Group(group) = it.next().expect("Reached end of token stream for Group") {
+ group
+ } else {
+ panic!("Expected Group");
+ }
+}
+
+fn expect_byte_string(it: &mut token_stream::IntoIter) -> String {
+ try_byte_string(it).expect("Expected byte string")
+}
+
+#[derive(Clone, PartialEq)]
+enum ParamType {
+ Ident(String),
+ Array { vals: String, max_length: usize },
+}
+
+fn expect_array_fields(it: &mut token_stream::IntoIter) -> ParamType {
+ assert_eq!(expect_punct(it), '<');
+ let vals = expect_ident(it);
+ assert_eq!(expect_punct(it), ',');
+ let max_length_str = expect_literal(it);
+ let max_length = max_length_str
+ .parse::<usize>()
+ .expect("Expected usize length");
+ assert_eq!(expect_punct(it), '>');
+ ParamType::Array { vals, max_length }
+}
+
+fn expect_type(it: &mut token_stream::IntoIter) -> ParamType {
+ if let TokenTree::Ident(ident) = it
+ .next()
+ .expect("Reached end of token stream for param type")
+ {
+ match ident.to_string().as_ref() {
+ "ArrayParam" => expect_array_fields(it),
+ _ => ParamType::Ident(ident.to_string()),
+ }
+ } else {
+ panic!("Expected Param Type")
+ }
+}
+
+fn expect_end(it: &mut token_stream::IntoIter) {
+ if it.next().is_some() {
+ panic!("Expected end");
+ }
+}
+
+fn get_literal(it: &mut token_stream::IntoIter, expected_name: &str) -> String {
+ assert_eq!(expect_ident(it), expected_name);
+ assert_eq!(expect_punct(it), ':');
+ let literal = expect_literal(it);
+ assert_eq!(expect_punct(it), ',');
+ literal
+}
+
+fn get_byte_string(it: &mut token_stream::IntoIter, expected_name: &str) -> String {
+ assert_eq!(expect_ident(it), expected_name);
+ assert_eq!(expect_punct(it), ':');
+ let byte_string = expect_byte_string(it);
+ assert_eq!(expect_punct(it), ',');
+ byte_string
+}
+
+struct ModInfoBuilder<'a> {
+ module: &'a str,
+ counter: usize,
+ buffer: String,
+}
+
+impl<'a> ModInfoBuilder<'a> {
+ fn new(module: &'a str) -> Self {
+ ModInfoBuilder {
+ module,
+ counter: 0,
+ buffer: String::new(),
+ }
+ }
+
+ fn emit_base(&mut self, field: &str, content: &str, builtin: bool) {
+ use std::fmt::Write;
+
+ let string = if builtin {
+ // Built-in modules prefix their modinfo strings by `module.`.
+ format!(
+ "{module}.{field}={content}\0",
+ module = self.module,
+ field = field,
+ content = content
+ )
+ } else {
+ // Loadable modules' modinfo strings go as-is.
+ format!("{field}={content}\0", field = field, content = content)
+ };
+
+ write!(
+ &mut self.buffer,
+ "
+ {cfg}
+ #[doc(hidden)]
+ #[link_section = \".modinfo\"]
+ #[used]
+ pub static __{module}_{counter}: [u8; {length}] = *{string};
+ ",
+ cfg = if builtin {
+ "#[cfg(not(MODULE))]"
+ } else {
+ "#[cfg(MODULE)]"
+ },
+ module = self.module,
+ counter = self.counter,
+ length = string.len(),
+ string = Literal::byte_string(string.as_bytes()),
+ )
+ .unwrap();
+
+ self.counter += 1;
+ }
+
+ fn emit_only_builtin(&mut self, field: &str, content: &str) {
+ self.emit_base(field, content, true)
+ }
+
+ fn emit_only_loadable(&mut self, field: &str, content: &str) {
+ self.emit_base(field, content, false)
+ }
+
+ fn emit(&mut self, field: &str, content: &str) {
+ self.emit_only_builtin(field, content);
+ self.emit_only_loadable(field, content);
+ }
+
+ fn emit_param(&mut self, field: &str, param: &str, content: &str) {
+ let content = format!("{param}:{content}", param = param, content = content);
+ self.emit(field, &content);
+ }
+}
+
+fn permissions_are_readonly(perms: &str) -> bool {
+ let (radix, digits) = if let Some(n) = perms.strip_prefix("0x") {
+ (16, n)
+ } else if let Some(n) = perms.strip_prefix("0o") {
+ (8, n)
+ } else if let Some(n) = perms.strip_prefix("0b") {
+ (2, n)
+ } else {
+ (10, perms)
+ };
+ match u32::from_str_radix(digits, radix) {
+ Ok(perms) => perms & 0o222 == 0,
+ Err(_) => false,
+ }
+}
+
+fn param_ops_path(param_type: &str) -> &'static str {
+ match param_type {
+ "bool" => "kernel::module_param::PARAM_OPS_BOOL",
+ "i8" => "kernel::module_param::PARAM_OPS_I8",
+ "u8" => "kernel::module_param::PARAM_OPS_U8",
+ "i16" => "kernel::module_param::PARAM_OPS_I16",
+ "u16" => "kernel::module_param::PARAM_OPS_U16",
+ "i32" => "kernel::module_param::PARAM_OPS_I32",
+ "u32" => "kernel::module_param::PARAM_OPS_U32",
+ "i64" => "kernel::module_param::PARAM_OPS_I64",
+ "u64" => "kernel::module_param::PARAM_OPS_U64",
+ "isize" => "kernel::module_param::PARAM_OPS_ISIZE",
+ "usize" => "kernel::module_param::PARAM_OPS_USIZE",
+ "str" => "kernel::module_param::PARAM_OPS_STR",
+ t => panic!("Unrecognized type {}", t),
+ }
+}
+
+fn try_simple_param_val(
+ param_type: &str,
+) -> Box<dyn Fn(&mut token_stream::IntoIter) -> Option<String>> {
+ match param_type {
+ "bool" => Box::new(|param_it| try_ident(param_it)),
+ "str" => Box::new(|param_it| {
+ try_byte_string(param_it)
+ .map(|s| format!("kernel::module_param::StringParam::Ref(b\"{}\")", s))
+ }),
+ _ => Box::new(|param_it| try_literal(param_it)),
+ }
+}
+
+fn get_default(param_type: &ParamType, param_it: &mut token_stream::IntoIter) -> String {
+ let try_param_val = match param_type {
+ ParamType::Ident(ref param_type)
+ | ParamType::Array {
+ vals: ref param_type,
+ max_length: _,
+ } => try_simple_param_val(param_type),
+ };
+ assert_eq!(expect_ident(param_it), "default");
+ assert_eq!(expect_punct(param_it), ':');
+ let default = match param_type {
+ ParamType::Ident(_) => try_param_val(param_it).expect("Expected default param value"),
+ ParamType::Array {
+ vals: _,
+ max_length: _,
+ } => {
+ let group = expect_group(param_it);
+ assert_eq!(group.delimiter(), Delimiter::Bracket);
+ let mut default_vals = Vec::new();
+ let mut it = group.stream().into_iter();
+
+ while let Some(default_val) = try_param_val(&mut it) {
+ default_vals.push(default_val);
+ match it.next() {
+ Some(TokenTree::Punct(punct)) => assert_eq!(punct.as_char(), ','),
+ None => break,
+ _ => panic!("Expected ',' or end of array default values"),
+ }
+ }
+
+ let mut default_array = "kernel::module_param::ArrayParam::create(&[".to_string();
+ default_array.push_str(
+ &default_vals
+ .iter()
+ .map(|val| val.to_string())
+ .collect::<Vec<String>>()
+ .join(","),
+ );
+ default_array.push_str("])");
+ default_array
+ }
+ };
+ assert_eq!(expect_punct(param_it), ',');
+ default
+}
+
+fn generated_array_ops_name(vals: &str, max_length: usize) -> String {
+ format!(
+ "__generated_array_ops_{vals}_{max_length}",
+ vals = vals,
+ max_length = max_length
+ )
+}
+
+#[derive(Debug, Default)]
+struct ModuleInfo {
+ type_: String,
+ license: String,
+ name: String,
+ author: Option<String>,
+ description: Option<String>,
+ alias: Option<String>,
+ params: Option<Group>,
+}
+
+impl ModuleInfo {
+ fn parse(it: &mut token_stream::IntoIter) -> Self {
+ let mut info = ModuleInfo::default();
+
+ const EXPECTED_KEYS: &[&str] = &[
+ "type",
+ "name",
+ "author",
+ "description",
+ "license",
+ "alias",
+ "alias_rtnl_link",
+ "params",
+ ];
+ const REQUIRED_KEYS: &[&str] = &["type", "name", "license"];
+ let mut seen_keys = Vec::new();
+
+ loop {
+ let key = match it.next() {
+ Some(TokenTree::Ident(ident)) => ident.to_string(),
+ Some(_) => panic!("Expected Ident or end"),
+ None => break,
+ };
+
+ if seen_keys.contains(&key) {
+ panic!(
+ "Duplicated key \"{}\". Keys can only be specified once.",
+ key
+ );
+ }
+
+ assert_eq!(expect_punct(it), ':');
+
+ match key.as_str() {
+ "type" => info.type_ = expect_ident(it),
+ "name" => info.name = expect_byte_string(it),
+ "author" => info.author = Some(expect_byte_string(it)),
+ "description" => info.description = Some(expect_byte_string(it)),
+ "license" => info.license = expect_byte_string(it),
+ "alias" => info.alias = Some(expect_byte_string(it)),
+ "alias_rtnl_link" => {
+ info.alias = Some(format!("rtnl-link-{}", expect_byte_string(it)))
+ }
+ "params" => info.params = Some(expect_group(it)),
+ _ => panic!(
+ "Unknown key \"{}\". Valid keys are: {:?}.",
+ key, EXPECTED_KEYS
+ ),
+ }
+
+ assert_eq!(expect_punct(it), ',');
+
+ seen_keys.push(key);
+ }
+
+ expect_end(it);
+
+ for key in REQUIRED_KEYS {
+ if !seen_keys.iter().any(|e| e == key) {
+ panic!("Missing required key \"{}\".", key);
+ }
+ }
+
+ let mut ordered_keys: Vec<&str> = Vec::new();
+ for key in EXPECTED_KEYS {
+ if seen_keys.iter().any(|e| e == key) {
+ ordered_keys.push(key);
+ }
+ }
+
+ if seen_keys != ordered_keys {
+ panic!(
+ "Keys are not ordered as expected. Order them like: {:?}.",
+ ordered_keys
+ );
+ }
+
+ info
+ }
+}
+
+pub fn module(ts: TokenStream) -> TokenStream {
+ let mut it = ts.into_iter();
+
+ let info = ModuleInfo::parse(&mut it);
+
+ let name = info.name.clone();
+
+ let mut modinfo = ModInfoBuilder::new(&name);
+ if let Some(author) = info.author {
+ modinfo.emit("author", &author);
+ }
+ if let Some(description) = info.description {
+ modinfo.emit("description", &description);
+ }
+ modinfo.emit("license", &info.license);
+ if let Some(alias) = info.alias {
+ modinfo.emit("alias", &alias);
+ }
+
+ // Built-in modules also export the `file` modinfo string
+ let file =
+ std::env::var("RUST_MODFILE").expect("Unable to fetch RUST_MODFILE environmental variable");
+ modinfo.emit_only_builtin("file", &file);
+
+ let mut array_types_to_generate = Vec::new();
+ if let Some(params) = info.params {
+ assert_eq!(params.delimiter(), Delimiter::Brace);
+
+ let mut it = params.stream().into_iter();
+
+ loop {
+ let param_name = match it.next() {
+ Some(TokenTree::Ident(ident)) => ident.to_string(),
+ Some(_) => panic!("Expected Ident or end"),
+ None => break,
+ };
+
+ assert_eq!(expect_punct(&mut it), ':');
+ let param_type = expect_type(&mut it);
+ let group = expect_group(&mut it);
+ assert_eq!(expect_punct(&mut it), ',');
+
+ assert_eq!(group.delimiter(), Delimiter::Brace);
+
+ let mut param_it = group.stream().into_iter();
+ let param_default = get_default(&param_type, &mut param_it);
+ let param_permissions = get_literal(&mut param_it, "permissions");
+ let param_description = get_byte_string(&mut param_it, "description");
+ expect_end(&mut param_it);
+
+ // TODO: more primitive types
+ // TODO: other kinds: unsafes, etc.
+ let (param_kernel_type, ops): (String, _) = match param_type {
+ ParamType::Ident(ref param_type) => (
+ param_type.to_string(),
+ param_ops_path(param_type).to_string(),
+ ),
+ ParamType::Array {
+ ref vals,
+ max_length,
+ } => {
+ array_types_to_generate.push((vals.clone(), max_length));
+ (
+ format!("__rust_array_param_{}_{}", vals, max_length),
+ generated_array_ops_name(vals, max_length),
+ )
+ }
+ };
+
+ modinfo.emit_param("parmtype", &param_name, &param_kernel_type);
+ modinfo.emit_param("parm", &param_name, &param_description);
+ let param_type_internal = match param_type {
+ ParamType::Ident(ref param_type) => match param_type.as_ref() {
+ "str" => "kernel::module_param::StringParam".to_string(),
+ other => other.to_string(),
+ },
+ ParamType::Array {
+ ref vals,
+ max_length,
+ } => format!(
+ "kernel::module_param::ArrayParam<{vals}, {max_length}>",
+ vals = vals,
+ max_length = max_length
+ ),
+ };
+ let read_func = if permissions_are_readonly(&param_permissions) {
+ format!(
+ "
+ fn read(&self) -> &<{param_type_internal} as kernel::module_param::ModuleParam>::Value {{
+ // SAFETY: Parameters do not need to be locked because they are read only or sysfs is not enabled.
+ unsafe {{ <{param_type_internal} as kernel::module_param::ModuleParam>::value(&__{name}_{param_name}_value) }}
+ }}
+ ",
+ name = name,
+ param_name = param_name,
+ param_type_internal = param_type_internal,
+ )
+ } else {
+ format!(
+ "
+ fn read<'lck>(&self, lock: &'lck kernel::KParamGuard) -> &'lck <{param_type_internal} as kernel::module_param::ModuleParam>::Value {{
+ // SAFETY: Parameters are locked by `KParamGuard`.
+ unsafe {{ <{param_type_internal} as kernel::module_param::ModuleParam>::value(&__{name}_{param_name}_value) }}
+ }}
+ ",
+ name = name,
+ param_name = param_name,
+ param_type_internal = param_type_internal,
+ )
+ };
+ let kparam = format!(
+ "
+ kernel::bindings::kernel_param__bindgen_ty_1 {{
+ arg: unsafe {{ &__{name}_{param_name}_value }} as *const _ as *mut kernel::c_types::c_void,
+ }},
+ ",
+ name = name,
+ param_name = param_name,
+ );
+ modinfo.buffer.push_str(
+ &format!(
+ "
+ static mut __{name}_{param_name}_value: {param_type_internal} = {param_default};
+
+ struct __{name}_{param_name};
+
+ impl __{name}_{param_name} {{ {read_func} }}
+
+ const {param_name}: __{name}_{param_name} = __{name}_{param_name};
+
+ // Note: the C macro that generates the static structs for the `__param` section
+ // asks for them to be `aligned(sizeof(void *))`. However, that was put in place
+ // in 2003 in commit 38d5b085d2 (\"[PATCH] Fix over-alignment problem on x86-64\")
+ // to undo GCC over-alignment of static structs of >32 bytes. It seems that is
+ // not the case anymore, so we simplify to a transparent representation here
+ // in the expectation that it is not needed anymore.
+ // TODO: revisit this to confirm the above comment and remove it if it happened
+ #[repr(transparent)]
+ struct __{name}_{param_name}_RacyKernelParam(kernel::bindings::kernel_param);
+
+ unsafe impl Sync for __{name}_{param_name}_RacyKernelParam {{
+ }}
+
+ #[cfg(not(MODULE))]
+ const __{name}_{param_name}_name: *const kernel::c_types::c_char = b\"{name}.{param_name}\\0\" as *const _ as *const kernel::c_types::c_char;
+
+ #[cfg(MODULE)]
+ const __{name}_{param_name}_name: *const kernel::c_types::c_char = b\"{param_name}\\0\" as *const _ as *const kernel::c_types::c_char;
+
+ #[link_section = \"__param\"]
+ #[used]
+ static __{name}_{param_name}_struct: __{name}_{param_name}_RacyKernelParam = __{name}_{param_name}_RacyKernelParam(kernel::bindings::kernel_param {{
+ name: __{name}_{param_name}_name,
+ // SAFETY: `__this_module` is constructed by the kernel at load time and will not be freed until the module is unloaded.
+ #[cfg(MODULE)]
+ mod_: unsafe {{ &kernel::bindings::__this_module as *const _ as *mut _ }},
+ #[cfg(not(MODULE))]
+ mod_: core::ptr::null_mut(),
+ ops: unsafe {{ &{ops} }} as *const kernel::bindings::kernel_param_ops,
+ perm: {permissions},
+ level: -1,
+ flags: 0,
+ __bindgen_anon_1: {kparam}
+ }});
+ ",
+ name = name,
+ param_type_internal = param_type_internal,
+ read_func = read_func,
+ param_default = param_default,
+ param_name = param_name,
+ ops = ops,
+ permissions = param_permissions,
+ kparam = kparam,
+ )
+ );
+ }
+ }
+
+ let mut generated_array_types = String::new();
+
+ for (vals, max_length) in array_types_to_generate {
+ let ops_name = generated_array_ops_name(&vals, max_length);
+ generated_array_types.push_str(&format!(
+ "
+ kernel::make_param_ops!(
+ {ops_name},
+ kernel::module_param::ArrayParam<{vals}, {{ {max_length} }}>
+ );
+ ",
+ ops_name = ops_name,
+ vals = vals,
+ max_length = max_length,
+ ));
+ }
+
+ format!(
+ "
+ /// The module name.
+ ///
+ /// Used by the printing macros, e.g. [`info!`].
+ const __LOG_PREFIX: &[u8] = b\"{name}\\0\";
+
+ static mut __MOD: Option<{type_}> = None;
+
+ // SAFETY: `__this_module` is constructed by the kernel at load time and will not be freed until the module is unloaded.
+ #[cfg(MODULE)]
+ static THIS_MODULE: kernel::ThisModule = unsafe {{ kernel::ThisModule::from_ptr(&kernel::bindings::__this_module as *const _ as *mut _) }};
+ #[cfg(not(MODULE))]
+ static THIS_MODULE: kernel::ThisModule = unsafe {{ kernel::ThisModule::from_ptr(core::ptr::null_mut()) }};
+
+ // Loadable modules need to export the `{{init,cleanup}}_module` identifiers
+ #[cfg(MODULE)]
+ #[doc(hidden)]
+ #[no_mangle]
+ pub extern \"C\" fn init_module() -> kernel::c_types::c_int {{
+ __init()
+ }}
+
+ #[cfg(MODULE)]
+ #[doc(hidden)]
+ #[no_mangle]
+ pub extern \"C\" fn cleanup_module() {{
+ __exit()
+ }}
+
+ // Built-in modules are initialized through an initcall pointer
+ // and the identifiers need to be unique
+ #[cfg(not(MODULE))]
+ #[cfg(not(CONFIG_HAVE_ARCH_PREL32_RELOCATIONS))]
+ #[doc(hidden)]
+ #[link_section = \"{initcall_section}\"]
+ #[used]
+ pub static __{name}_initcall: extern \"C\" fn() -> kernel::c_types::c_int = __{name}_init;
+
+ #[cfg(not(MODULE))]
+ #[cfg(CONFIG_HAVE_ARCH_PREL32_RELOCATIONS)]
+ global_asm!(
+ r#\".section \"{initcall_section}\", \"a\"
+ __{name}_initcall:
+ .long __{name}_init - .
+ .previous
+ \"#
+ );
+
+ #[cfg(not(MODULE))]
+ #[doc(hidden)]
+ #[no_mangle]
+ pub extern \"C\" fn __{name}_init() -> kernel::c_types::c_int {{
+ __init()
+ }}
+
+ #[cfg(not(MODULE))]
+ #[doc(hidden)]
+ #[no_mangle]
+ pub extern \"C\" fn __{name}_exit() {{
+ __exit()
+ }}
+
+ fn __init() -> kernel::c_types::c_int {{
+ match <{type_} as kernel::KernelModule>::init() {{
+ Ok(m) => {{
+ unsafe {{
+ __MOD = Some(m);
+ }}
+ return 0;
+ }}
+ Err(e) => {{
+ return e.to_kernel_errno();
+ }}
+ }}
+ }}
+
+ fn __exit() {{
+ unsafe {{
+ // Invokes `drop()` on `__MOD`, which should be used for cleanup.
+ __MOD = None;
+ }}
+ }}
+
+ {modinfo}
+
+ {generated_array_types}
+ ",
+ type_ = info.type_,
+ name = info.name,
+ modinfo = modinfo.buffer,
+ generated_array_types = generated_array_types,
+ initcall_section = ".initcall6.init"
+ ).parse().expect("Error parsing formatted string into token stream.")
+}
+
+pub fn module_misc_device(ts: TokenStream) -> TokenStream {
+ let mut it = ts.into_iter();
+
+ let info = ModuleInfo::parse(&mut it);
+
+ let module = format!("__internal_ModuleFor{}", info.type_);
+
+ format!(
+ "
+ #[doc(hidden)]
+ struct {module} {{
+ _dev: core::pin::Pin<alloc::boxed::Box<kernel::miscdev::Registration>>,
+ }}
+
+ impl kernel::KernelModule for {module} {{
+ fn init() -> kernel::Result<Self> {{
+ Ok(Self {{
+ _dev: kernel::miscdev::Registration::new_pinned::<{type_}>(
+ kernel::c_str!(\"{name}\"),
+ None,
+ (),
+ )?,
+ }})
+ }}
+ }}
+
+ kernel::prelude::module! {{
+ type: {module},
+ name: b\"{name}\",
+ {author}
+ {description}
+ license: b\"{license}\",
+ {alias}
+ }}
+ ",
+ module = module,
+ type_ = info.type_,
+ name = info.name,
+ author = info
+ .author
+ .map(|v| format!("author: b\"{}\",", v))
+ .unwrap_or_else(|| "".to_string()),
+ description = info
+ .description
+ .map(|v| format!("description: b\"{}\",", v))
+ .unwrap_or_else(|| "".to_string()),
+ alias = info
+ .alias
+ .map(|v| format!("alias: b\"{}\",", v))
+ .unwrap_or_else(|| "".to_string()),
+ license = info.license
+ )
+ .parse()
+ .expect("Error parsing formatted string into token stream.")
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_permissions_are_readonly() {
+ assert!(permissions_are_readonly("0b000000000"));
+ assert!(permissions_are_readonly("0o000"));
+ assert!(permissions_are_readonly("000"));
+ assert!(permissions_are_readonly("0x000"));
+
+ assert!(!permissions_are_readonly("0b111111111"));
+ assert!(!permissions_are_readonly("0o777"));
+ assert!(!permissions_are_readonly("511"));
+ assert!(!permissions_are_readonly("0x1ff"));
+
+ assert!(permissions_are_readonly("0o014"));
+ assert!(permissions_are_readonly("0o015"));
+
+ assert!(!permissions_are_readonly("0o214"));
+ assert!(!permissions_are_readonly("0o024"));
+ assert!(!permissions_are_readonly("0o012"));
+
+ assert!(!permissions_are_readonly("0o315"));
+ assert!(!permissions_are_readonly("0o065"));
+ assert!(!permissions_are_readonly("0o017"));
+ }
+}
diff --git a/samples/Kconfig b/samples/Kconfig
index b0503ef058d3..8cbd6490823f 100644
--- a/samples/Kconfig
+++ b/samples/Kconfig
@@ -223,4 +223,6 @@ config SAMPLE_WATCH_QUEUE
Build example userspace program to use the new mount_notify(),
sb_notify() syscalls and the KEYCTL_WATCH_KEY keyctl() function.
+source "samples/rust/Kconfig"
+
endif # SAMPLES
diff --git a/samples/Makefile b/samples/Makefile
index 087e0988ccc5..291663e56a3c 100644
--- a/samples/Makefile
+++ b/samples/Makefile
@@ -30,3 +30,4 @@ obj-$(CONFIG_SAMPLE_INTEL_MEI) += mei/
subdir-$(CONFIG_SAMPLE_WATCHDOG) += watchdog
subdir-$(CONFIG_SAMPLE_WATCH_QUEUE) += watch_queue
obj-$(CONFIG_DEBUG_KMEMLEAK_TEST) += kmemleak/
+obj-$(CONFIG_SAMPLES_RUST) += rust/
diff --git a/samples/rust/Kconfig b/samples/rust/Kconfig
new file mode 100644
index 000000000000..183a3c4dc80c
--- /dev/null
+++ b/samples/rust/Kconfig
@@ -0,0 +1,113 @@
+# SPDX-License-Identifier: GPL-2.0
+
+menuconfig SAMPLES_RUST
+ bool "Rust samples"
+ depends on RUST
+ help
+ You can build sample Rust kernel code here.
+
+ If unsure, say N.
+
+if SAMPLES_RUST
+
+config SAMPLE_RUST_MINIMAL
+ tristate "Minimal"
+ help
+ This option builds the Rust minimal module sample.
+
+ To compile this as a module, choose M here:
+ the module will be called rust_minimal.
+
+ If unsure, say N.
+
+config SAMPLE_RUST_PRINT
+ tristate "Printing macros"
+ help
+ This option builds the Rust printing macros sample.
+
+ To compile this as a module, choose M here:
+ the module will be called rust_print.
+
+ If unsure, say N.
+
+config SAMPLE_RUST_MODULE_PARAMETERS
+ tristate "Module parameters"
+ help
+ This option builds the Rust module parameters sample.
+
+ To compile this as a module, choose M here:
+ the module will be called rust_module_parameters.
+
+ If unsure, say N.
+
+config SAMPLE_RUST_SYNC
+ tristate "Synchronisation primitives"
+ help
+ This option builds the Rust synchronisation primitives sample.
+
+ To compile this as a module, choose M here:
+ the module will be called rust_sync.
+
+ If unsure, say N.
+
+config SAMPLE_RUST_CHRDEV
+ tristate "Character device"
+ help
+ This option builds the Rust character device sample.
+
+ To compile this as a module, choose M here:
+ the module will be called rust_chrdev.
+
+ If unsure, say N.
+
+config SAMPLE_RUST_MISCDEV
+ tristate "Miscellaneous device"
+ help
+ This option builds the Rust miscellaneous device sample.
+
+ To compile this as a module, choose M here:
+ the module will be called rust_miscdev.
+
+ If unsure, say N.
+
+config SAMPLE_RUST_STACK_PROBING
+ tristate "Stack probing"
+ help
+ This option builds the Rust stack probing sample.
+
+ To compile this as a module, choose M here:
+ the module will be called rust_stack_probing.
+
+ If unsure, say N.
+
+config SAMPLE_RUST_SEMAPHORE
+ tristate "Semaphore"
+ help
+ This option builds the Rust semaphore sample.
+
+ To compile this as a module, choose M here:
+ the module will be called rust_semaphore.
+
+ If unsure, say N.
+
+config SAMPLE_RUST_SEMAPHORE_C
+ tristate "Semaphore (in C, for comparison)"
+ help
+ This option builds the Rust semaphore sample (in C, for comparison).
+
+ To compile this as a module, choose M here:
+ the module will be called rust_semaphore_c.
+
+ If unsure, say N.
+
+config SAMPLE_RUST_RANDOM
+ tristate "Random"
+ help
+ This option builds the Rust random sample.
+
+ To compile this as a module, choose M here:
+ the module will be called rust_random.
+
+ If unsure, say N.
+
+endif # SAMPLES_RUST
diff --git a/samples/rust/Makefile b/samples/rust/Makefile
new file mode 100644
index 000000000000..48bc871ea1f8
--- /dev/null
+++ b/samples/rust/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_SAMPLE_RUST_MINIMAL) += rust_minimal.o
+obj-$(CONFIG_SAMPLE_RUST_PRINT) += rust_print.o
+obj-$(CONFIG_SAMPLE_RUST_MODULE_PARAMETERS) += rust_module_parameters.o
+obj-$(CONFIG_SAMPLE_RUST_SYNC) += rust_sync.o
+obj-$(CONFIG_SAMPLE_RUST_CHRDEV) += rust_chrdev.o
+obj-$(CONFIG_SAMPLE_RUST_MISCDEV) += rust_miscdev.o
+obj-$(CONFIG_SAMPLE_RUST_STACK_PROBING) += rust_stack_probing.o
+obj-$(CONFIG_SAMPLE_RUST_SEMAPHORE) += rust_semaphore.o
+obj-$(CONFIG_SAMPLE_RUST_SEMAPHORE_C) += rust_semaphore_c.o
+obj-$(CONFIG_SAMPLE_RUST_RANDOM) += rust_random.o
diff --git a/samples/rust/rust_chrdev.rs b/samples/rust/rust_chrdev.rs
new file mode 100644
index 000000000000..ccdfb2da7855
--- /dev/null
+++ b/samples/rust/rust_chrdev.rs
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Rust character device sample
+
+#![no_std]
+#![feature(allocator_api, global_asm)]
+
+use kernel::prelude::*;
+use kernel::{c_str, chrdev, file_operations::FileOperations};
+
+module! {
+ type: RustChrdev,
+ name: b"rust_chrdev",
+ author: b"Rust for Linux Contributors",
+ description: b"Rust character device sample",
+ license: b"GPL v2",
+}
+
+#[derive(Default)]
+struct RustFile;
+
+impl FileOperations for RustFile {
+ kernel::declare_file_operations!();
+}
+
+struct RustChrdev {
+ _dev: Pin<Box<chrdev::Registration<2>>>,
+}
+
+impl KernelModule for RustChrdev {
+ fn init() -> Result<Self> {
+ pr_info!("Rust character device sample (init)\n");
+
+ let mut chrdev_reg =
+ chrdev::Registration::new_pinned(c_str!("rust_chrdev"), 0, &THIS_MODULE)?;
+
+ // Register the same kind of device twice, we're just demonstrating
+ // that you can use multiple minors. There are two minors in this case
+ // because its type is `chrdev::Registration<2>`
+ chrdev_reg.as_mut().register::<RustFile>()?;
+ chrdev_reg.as_mut().register::<RustFile>()?;
+
+ Ok(RustChrdev { _dev: chrdev_reg })
+ }
+}
+
+impl Drop for RustChrdev {
+ fn drop(&mut self) {
+ pr_info!("Rust character device sample (exit)\n");
+ }
+}
diff --git a/samples/rust/rust_minimal.rs b/samples/rust/rust_minimal.rs
new file mode 100644
index 000000000000..49cfd8cf3aad
--- /dev/null
+++ b/samples/rust/rust_minimal.rs
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Rust minimal sample
+
+#![no_std]
+#![feature(allocator_api, global_asm)]
+
+use kernel::prelude::*;
+
+module! {
+ type: RustMinimal,
+ name: b"rust_minimal",
+ author: b"Rust for Linux Contributors",
+ description: b"Rust minimal sample",
+ license: b"GPL v2",
+}
+
+struct RustMinimal {
+ message: String,
+}
+
+impl KernelModule for RustMinimal {
+ fn init() -> Result<Self> {
+ pr_info!("Rust minimal sample (init)\n");
+ pr_info!("Am I built-in? {}\n", !cfg!(MODULE));
+
+ Ok(RustMinimal {
+ message: "on the heap!".try_to_owned()?,
+ })
+ }
+}
+
+impl Drop for RustMinimal {
+ fn drop(&mut self) {
+ pr_info!("My message is {}\n", self.message);
+ pr_info!("Rust minimal sample (exit)\n");
+ }
+}
diff --git a/samples/rust/rust_miscdev.rs b/samples/rust/rust_miscdev.rs
new file mode 100644
index 000000000000..df8681af9cc2
--- /dev/null
+++ b/samples/rust/rust_miscdev.rs
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Rust miscellaneous device sample
+
+#![no_std]
+#![feature(allocator_api, global_asm)]
+
+use kernel::prelude::*;
+use kernel::{
+ c_str,
+ file::File,
+ file_operations::{FileOpener, FileOperations},
+ io_buffer::{IoBufferReader, IoBufferWriter},
+ miscdev,
+ sync::{CondVar, Mutex, Ref},
+};
+
+module! {
+ type: RustMiscdev,
+ name: b"rust_miscdev",
+ author: b"Rust for Linux Contributors",
+ description: b"Rust miscellaneous device sample",
+ license: b"GPL v2",
+}
+
+const MAX_TOKENS: usize = 3;
+
+struct SharedStateInner {
+ token_count: usize,
+}
+
+struct SharedState {
+ state_changed: CondVar,
+ inner: Mutex<SharedStateInner>,
+}
+
+impl SharedState {
+ fn try_new() -> Result<Pin<Ref<Self>>> {
+ Ok(Ref::pinned(Ref::try_new_and_init(
+ Self {
+ // SAFETY: `condvar_init!` is called below.
+ state_changed: unsafe { CondVar::new() },
+ // SAFETY: `mutex_init!` is called below.
+ inner: unsafe { Mutex::new(SharedStateInner { token_count: 0 }) },
+ },
+ |mut state| {
+ // SAFETY: `state_changed` is pinned when `state` is.
+ let pinned = unsafe { state.as_mut().map_unchecked_mut(|s| &mut s.state_changed) };
+ kernel::condvar_init!(pinned, "SharedState::state_changed");
+ // SAFETY: `inner` is pinned when `state` is.
+ let pinned = unsafe { state.as_mut().map_unchecked_mut(|s| &mut s.inner) };
+ kernel::mutex_init!(pinned, "SharedState::inner");
+ },
+ )?))
+ }
+}
+
+struct Token;
+
+impl FileOpener<Pin<Ref<SharedState>>> for Token {
+ fn open(shared: &Pin<Ref<SharedState>>) -> Result<Self::Wrapper> {
+ Ok(shared.clone())
+ }
+}
+
+impl FileOperations for Token {
+ type Wrapper = Pin<Ref<SharedState>>;
+
+ kernel::declare_file_operations!(read, write);
+
+ fn read<T: IoBufferWriter>(
+ shared: &Ref<SharedState>,
+ _: &File,
+ data: &mut T,
+ offset: u64,
+ ) -> Result<usize> {
+ // Succeed if the caller doesn't provide a buffer or if not at the start.
+ if data.is_empty() || offset != 0 {
+ return Ok(0);
+ }
+
+ {
+ let mut inner = shared.inner.lock();
+
+ // Wait until we are allowed to decrement the token count or a signal arrives.
+ while inner.token_count == 0 {
+ if shared.state_changed.wait(&mut inner) {
+ return Err(Error::EINTR);
+ }
+ }
+
+ // Consume a token.
+ inner.token_count -= 1;
+ }
+
+ // Notify a possible writer waiting.
+ shared.state_changed.notify_all();
+
+ // Write a one-byte 1 to the reader.
+ data.write_slice(&[1u8; 1])?;
+ Ok(1)
+ }
+
+ fn write<T: IoBufferReader>(
+ shared: &Ref<SharedState>,
+ _: &File,
+ data: &mut T,
+ _offset: u64,
+ ) -> Result<usize> {
+ {
+ let mut inner = shared.inner.lock();
+
+ // Wait until we are allowed to increment the token count or a signal arrives.
+ while inner.token_count == MAX_TOKENS {
+ if shared.state_changed.wait(&mut inner) {
+ return Err(Error::EINTR);
+ }
+ }
+
+ // Increment the number of token so that a reader can be released.
+ inner.token_count += 1;
+ }
+
+ // Notify a possible reader waiting.
+ shared.state_changed.notify_all();
+ Ok(data.len())
+ }
+}
+
+struct RustMiscdev {
+ _dev: Pin<Box<miscdev::Registration<Pin<Ref<SharedState>>>>>,
+}
+
+impl KernelModule for RustMiscdev {
+ fn init() -> Result<Self> {
+ pr_info!("Rust miscellaneous device sample (init)\n");
+
+ let state = SharedState::try_new()?;
+
+ Ok(RustMiscdev {
+ _dev: miscdev::Registration::new_pinned::<Token>(c_str!("rust_miscdev"), None, state)?,
+ })
+ }
+}
+
+impl Drop for RustMiscdev {
+ fn drop(&mut self) {
+ pr_info!("Rust miscellaneous device sample (exit)\n");
+ }
+}
diff --git a/samples/rust/rust_module_parameters.rs b/samples/rust/rust_module_parameters.rs
new file mode 100644
index 000000000000..57e59e805027
--- /dev/null
+++ b/samples/rust/rust_module_parameters.rs
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Rust module parameters sample
+
+#![no_std]
+#![feature(allocator_api, global_asm)]
+
+use kernel::prelude::*;
+
+module! {
+ type: RustModuleParameters,
+ name: b"rust_module_parameters",
+ author: b"Rust for Linux Contributors",
+ description: b"Rust module parameters sample",
+ license: b"GPL v2",
+ params: {
+ my_bool: bool {
+ default: true,
+ permissions: 0,
+ description: b"Example of bool",
+ },
+ my_i32: i32 {
+ default: 42,
+ permissions: 0o644,
+ description: b"Example of i32",
+ },
+ my_str: str {
+ default: b"default str val",
+ permissions: 0o644,
+ description: b"Example of a string param",
+ },
+ my_usize: usize {
+ default: 42,
+ permissions: 0o644,
+ description: b"Example of usize",
+ },
+ my_array: ArrayParam<i32, 3> {
+ default: [0, 1],
+ permissions: 0,
+ description: b"Example of array",
+ },
+ },
+}
+
+struct RustModuleParameters;
+
+impl KernelModule for RustModuleParameters {
+ fn init() -> Result<Self> {
+ pr_info!("Rust module parameters sample (init)\n");
+
+ {
+ let lock = THIS_MODULE.kernel_param_lock();
+ pr_info!("Parameters:\n");
+ pr_info!(" my_bool: {}\n", my_bool.read());
+ pr_info!(" my_i32: {}\n", my_i32.read(&lock));
+ pr_info!(
+ " my_str: {}\n",
+ core::str::from_utf8(my_str.read(&lock))?
+ );
+ pr_info!(" my_usize: {}\n", my_usize.read(&lock));
+ pr_info!(" my_array: {:?}\n", my_array.read());
+ }
+
+ Ok(RustModuleParameters)
+ }
+}
+
+impl Drop for RustModuleParameters {
+ fn drop(&mut self) {
+ pr_info!("Rust module parameters sample (exit)\n");
+ }
+}
diff --git a/samples/rust/rust_print.rs b/samples/rust/rust_print.rs
new file mode 100644
index 000000000000..c43338fca507
--- /dev/null
+++ b/samples/rust/rust_print.rs
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Rust printing macros sample
+
+#![no_std]
+#![feature(allocator_api, global_asm)]
+
+use kernel::pr_cont;
+use kernel::prelude::*;
+
+module! {
+ type: RustPrint,
+ name: b"rust_print",
+ author: b"Rust for Linux Contributors",
+ description: b"Rust printing macros sample",
+ license: b"GPL v2",
+}
+
+struct RustPrint;
+
+impl KernelModule for RustPrint {
+ fn init() -> Result<Self> {
+ pr_info!("Rust printing macros sample (init)\n");
+
+ pr_emerg!("Emergency message (level 0) without args\n");
+ pr_alert!("Alert message (level 1) without args\n");
+ pr_crit!("Critical message (level 2) without args\n");
+ pr_err!("Error message (level 3) without args\n");
+ pr_warn!("Warning message (level 4) without args\n");
+ pr_notice!("Notice message (level 5) without args\n");
+ pr_info!("Info message (level 6) without args\n");
+
+ pr_info!("A line that");
+ pr_cont!(" is continued");
+ pr_cont!(" without args\n");
+
+ pr_emerg!("{} message (level {}) with args\n", "Emergency", 0);
+ pr_alert!("{} message (level {}) with args\n", "Alert", 1);
+ pr_crit!("{} message (level {}) with args\n", "Critical", 2);
+ pr_err!("{} message (level {}) with args\n", "Error", 3);
+ pr_warn!("{} message (level {}) with args\n", "Warning", 4);
+ pr_notice!("{} message (level {}) with args\n", "Notice", 5);
+ pr_info!("{} message (level {}) with args\n", "Info", 6);
+
+ pr_info!("A {} that", "line");
+ pr_cont!(" is {}", "continued");
+ pr_cont!(" with {}\n", "args");
+
+ Ok(RustPrint)
+ }
+}
+
+impl Drop for RustPrint {
+ fn drop(&mut self) {
+ pr_info!("Rust printing macros sample (exit)\n");
+ }
+}
diff --git a/samples/rust/rust_random.rs b/samples/rust/rust_random.rs
new file mode 100644
index 000000000000..135325b6ca0a
--- /dev/null
+++ b/samples/rust/rust_random.rs
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Rust random device
+//!
+//! Adapted from Alex Gaynor's original available at
+//! <https://github.com/alex/just-use/blob/master/src/lib.rs>.
+
+#![no_std]
+#![feature(allocator_api, global_asm)]
+
+use kernel::{
+ file::File,
+ file_operations::FileOperations,
+ io_buffer::{IoBufferReader, IoBufferWriter},
+ prelude::*,
+};
+
+#[derive(Default)]
+struct RandomFile;
+
+impl FileOperations for RandomFile {
+ kernel::declare_file_operations!(read, write, read_iter, write_iter);
+
+ fn read<T: IoBufferWriter>(_this: &Self, file: &File, buf: &mut T, _: u64) -> Result<usize> {
+ let total_len = buf.len();
+ let mut chunkbuf = [0; 256];
+
+ while !buf.is_empty() {
+ let len = chunkbuf.len().min(buf.len());
+ let chunk = &mut chunkbuf[0..len];
+
+ if file.is_blocking() {
+ kernel::random::getrandom(chunk)?;
+ } else {
+ kernel::random::getrandom_nonblock(chunk)?;
+ }
+ buf.write_slice(chunk)?;
+ }
+ Ok(total_len)
+ }
+
+ fn write<T: IoBufferReader>(_this: &Self, _file: &File, buf: &mut T, _: u64) -> Result<usize> {
+ let total_len = buf.len();
+ let mut chunkbuf = [0; 256];
+ while !buf.is_empty() {
+ let len = chunkbuf.len().min(buf.len());
+ let chunk = &mut chunkbuf[0..len];
+ buf.read_slice(chunk)?;
+ kernel::random::add_randomness(chunk);
+ }
+ Ok(total_len)
+ }
+}
+
+module_misc_device! {
+ type: RandomFile,
+ name: b"rust_random",
+ author: b"Rust for Linux Contributors",
+ description: b"Just use /dev/urandom: Now with early-boot safety",
+ license: b"GPL v2",
+}
diff --git a/samples/rust/rust_semaphore.rs b/samples/rust/rust_semaphore.rs
new file mode 100644
index 000000000000..1408d5095b88
--- /dev/null
+++ b/samples/rust/rust_semaphore.rs
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Rust semaphore sample
+//!
+//! A counting semaphore that can be used by userspace.
+//!
+//! The count is incremented by writes to the device. A write of `n` bytes results in an increment
+//! of `n`. It is decremented by reads; each read results in the count being decremented by 1. If
+//! the count is already zero, a read will block until another write increments it.
+//!
+//! This can be used in user space from the shell for example as follows (assuming a node called
+//! `semaphore`): `cat semaphore` decrements the count by 1 (waiting for it to become non-zero
+//! before decrementing); `echo -n 123 > semaphore` increments the semaphore by 3, potentially
+//! unblocking up to 3 blocked readers.
+
+#![no_std]
+#![feature(allocator_api, global_asm)]
+
+use core::sync::atomic::{AtomicU64, Ordering};
+use kernel::{
+ c_str, condvar_init, declare_file_operations,
+ file::File,
+ file_operations::{FileOpener, FileOperations, IoctlCommand, IoctlHandler},
+ io_buffer::{IoBufferReader, IoBufferWriter},
+ miscdev::Registration,
+ mutex_init,
+ prelude::*,
+ sync::{CondVar, Mutex, Ref},
+ user_ptr::{UserSlicePtrReader, UserSlicePtrWriter},
+};
+
+module! {
+ type: RustSemaphore,
+ name: b"rust_semaphore",
+ author: b"Rust for Linux Contributors",
+ description: b"Rust semaphore sample",
+ license: b"GPL v2",
+}
+
+struct SemaphoreInner {
+ count: usize,
+ max_seen: usize,
+}
+
+struct Semaphore {
+ changed: CondVar,
+ inner: Mutex<SemaphoreInner>,
+}
+
+struct FileState {
+ read_count: AtomicU64,
+ shared: Ref<Semaphore>,
+}
+
+impl FileState {
+ fn consume(&self) -> Result {
+ let mut inner = self.shared.inner.lock();
+ while inner.count == 0 {
+ if self.shared.changed.wait(&mut inner) {
+ return Err(Error::EINTR);
+ }
+ }
+ inner.count -= 1;
+ Ok(())
+ }
+}
+
+impl FileOpener<Ref<Semaphore>> for FileState {
+ fn open(shared: &Ref<Semaphore>) -> Result<Box<Self>> {
+ Ok(Box::try_new(Self {
+ read_count: AtomicU64::new(0),
+ shared: shared.clone(),
+ })?)
+ }
+}
+
+impl FileOperations for FileState {
+ declare_file_operations!(read, write, ioctl);
+
+ fn read<T: IoBufferWriter>(this: &Self, _: &File, data: &mut T, offset: u64) -> Result<usize> {
+ if data.is_empty() || offset > 0 {
+ return Ok(0);
+ }
+ this.consume()?;
+ data.write_slice(&[0u8; 1])?;
+ this.read_count.fetch_add(1, Ordering::Relaxed);
+ Ok(1)
+ }
+
+ fn write<T: IoBufferReader>(this: &Self, _: &File, data: &mut T, _offs: u64) -> Result<usize> {
+ {
+ let mut inner = this.shared.inner.lock();
+ inner.count = inner.count.saturating_add(data.len());
+ if inner.count > inner.max_seen {
+ inner.max_seen = inner.count;
+ }
+ }
+
+ this.shared.changed.notify_all();
+ Ok(data.len())
+ }
+
+ fn ioctl(this: &Self, file: &File, cmd: &mut IoctlCommand) -> Result<i32> {
+ cmd.dispatch::<Self>(this, file)
+ }
+}
+
+struct RustSemaphore {
+ _dev: Pin<Box<Registration<Ref<Semaphore>>>>,
+}
+
+impl KernelModule for RustSemaphore {
+ fn init() -> Result<Self> {
+ pr_info!("Rust semaphore sample (init)\n");
+
+ let sema = Ref::try_new_and_init(
+ Semaphore {
+ // SAFETY: `condvar_init!` is called below.
+ changed: unsafe { CondVar::new() },
+
+ // SAFETY: `mutex_init!` is called below.
+ inner: unsafe {
+ Mutex::new(SemaphoreInner {
+ count: 0,
+ max_seen: 0,
+ })
+ },
+ },
+ |mut sema| {
+ // SAFETY: `changed` is pinned when `sema` is.
+ let pinned = unsafe { sema.as_mut().map_unchecked_mut(|s| &mut s.changed) };
+ condvar_init!(pinned, "Semaphore::changed");
+
+ // SAFETY: `inner` is pinned when `sema` is.
+ let pinned = unsafe { sema.as_mut().map_unchecked_mut(|s| &mut s.inner) };
+ mutex_init!(pinned, "Semaphore::inner");
+ },
+ )?;
+
+ Ok(Self {
+ _dev: Registration::new_pinned::<FileState>(c_str!("rust_semaphore"), None, sema)?,
+ })
+ }
+}
+
+impl Drop for RustSemaphore {
+ fn drop(&mut self) {
+ pr_info!("Rust semaphore sample (exit)\n");
+ }
+}
+
+const IOCTL_GET_READ_COUNT: u32 = 0x80086301;
+const IOCTL_SET_READ_COUNT: u32 = 0x40086301;
+
+impl IoctlHandler for FileState {
+ type Target = Self;
+
+ fn read(this: &Self, _: &File, cmd: u32, writer: &mut UserSlicePtrWriter) -> Result<i32> {
+ match cmd {
+ IOCTL_GET_READ_COUNT => {
+ writer.write(&this.read_count.load(Ordering::Relaxed))?;
+ Ok(0)
+ }
+ _ => Err(Error::EINVAL),
+ }
+ }
+
+ fn write(this: &Self, _: &File, cmd: u32, reader: &mut UserSlicePtrReader) -> Result<i32> {
+ match cmd {
+ IOCTL_SET_READ_COUNT => {
+ this.read_count.store(reader.read()?, Ordering::Relaxed);
+ Ok(0)
+ }
+ _ => Err(Error::EINVAL),
+ }
+ }
+}
diff --git a/samples/rust/rust_semaphore_c.c b/samples/rust/rust_semaphore_c.c
new file mode 100644
index 000000000000..cdc121d4030d
--- /dev/null
+++ b/samples/rust/rust_semaphore_c.c
@@ -0,0 +1,212 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Rust semaphore sample (in C, for comparison)
+ *
+ * This is a C implementation of `rust_semaphore.rs`. Refer to the description
+ * in that file for details on the device.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/refcount.h>
+#include <linux/wait.h>
+
+#define IOCTL_GET_READ_COUNT _IOR('c', 1, u64)
+#define IOCTL_SET_READ_COUNT _IOW('c', 1, u64)
+
+struct semaphore_state {
+ struct kref ref;
+ struct miscdevice miscdev;
+ wait_queue_head_t changed;
+ struct mutex mutex;
+ size_t count;
+ size_t max_seen;
+};
+
+struct file_state {
+ atomic64_t read_count;
+ struct semaphore_state *shared;
+};
+
+static int semaphore_consume(struct semaphore_state *state)
+{
+ DEFINE_WAIT(wait);
+
+ mutex_lock(&state->mutex);
+ while (state->count == 0) {
+ prepare_to_wait(&state->changed, &wait, TASK_INTERRUPTIBLE);
+ mutex_unlock(&state->mutex);
+ schedule();
+ finish_wait(&state->changed, &wait);
+ if (signal_pending(current))
+ return -EINTR;
+ mutex_lock(&state->mutex);
+ }
+
+ state->count--;
+ mutex_unlock(&state->mutex);
+
+ return 0;
+}
+
+static int semaphore_open(struct inode *nodp, struct file *filp)
+{
+ struct semaphore_state *shared =
+ container_of(filp->private_data, struct semaphore_state, miscdev);
+ struct file_state *state;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ kref_get(&shared->ref);
+ state->shared = shared;
+ atomic64_set(&state->read_count, 0);
+
+ filp->private_data = state;
+
+ return 0;
+}
+
+static ssize_t semaphore_write(struct file *filp, const char __user *buffer, size_t count,
+ loff_t *ppos)
+{
+ struct file_state *state = filp->private_data;
+ struct semaphore_state *shared = state->shared;
+
+ mutex_lock(&shared->mutex);
+
+ shared->count += count;
+ if (shared->count < count)
+ shared->count = SIZE_MAX;
+
+ if (shared->count > shared->max_seen)
+ shared->max_seen = shared->count;
+
+ mutex_unlock(&shared->mutex);
+
+ wake_up_all(&shared->changed);
+
+ return count;
+}
+
+static ssize_t semaphore_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct file_state *state = filp->private_data;
+ char c = 0;
+ int ret;
+
+ if (count == 0 || *ppos > 0)
+ return 0;
+
+ ret = semaphore_consume(state->shared);
+ if (ret)
+ return ret;
+
+ if (copy_to_user(buffer, &c, sizeof(c)))
+ return -EFAULT;
+
+ atomic64_add(1, &state->read_count);
+ *ppos += 1;
+ return 1;
+}
+
+static long semaphore_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct file_state *state = filp->private_data;
+ void __user *buffer = (void __user *)arg;
+ u64 value;
+
+ switch (cmd) {
+ case IOCTL_GET_READ_COUNT:
+ value = atomic64_read(&state->read_count);
+ if (copy_to_user(buffer, &value, sizeof(value)))
+ return -EFAULT;
+ return 0;
+ case IOCTL_SET_READ_COUNT:
+ if (copy_from_user(&value, buffer, sizeof(value)))
+ return -EFAULT;
+ atomic64_set(&state->read_count, value);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static void semaphore_free(struct kref *kref)
+{
+ struct semaphore_state *device;
+
+ device = container_of(kref, struct semaphore_state, ref);
+ kfree(device);
+}
+
+static int semaphore_release(struct inode *nodp, struct file *filp)
+{
+ struct file_state *state = filp->private_data;
+
+ kref_put(&state->shared->ref, semaphore_free);
+ kfree(state);
+ return 0;
+}
+
+static const struct file_operations semaphore_fops = {
+ .owner = THIS_MODULE,
+ .open = semaphore_open,
+ .read = semaphore_read,
+ .write = semaphore_write,
+ .compat_ioctl = semaphore_ioctl,
+ .release = semaphore_release,
+};
+
+static struct semaphore_state *device;
+
+static int __init semaphore_init(void)
+{
+ int ret;
+ struct semaphore_state *state;
+
+ pr_info("Rust semaphore sample (in C, for comparison) (init)\n");
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ mutex_init(&state->mutex);
+ kref_init(&state->ref);
+ init_waitqueue_head(&state->changed);
+
+ state->miscdev.fops = &semaphore_fops;
+ state->miscdev.minor = MISC_DYNAMIC_MINOR;
+ state->miscdev.name = "semaphore";
+
+ ret = misc_register(&state->miscdev);
+ if (ret < 0) {
+ kfree(state);
+ return ret;
+ }
+
+ device = state;
+
+ return 0;
+}
+
+static void __exit semaphore_exit(void)
+{
+ pr_info("Rust semaphore sample (in C, for comparison) (exit)\n");
+
+ misc_deregister(&device->miscdev);
+ kref_put(&device->ref, semaphore_free);
+}
+
+module_init(semaphore_init);
+module_exit(semaphore_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Rust for Linux Contributors");
+MODULE_DESCRIPTION("Rust semaphore sample (in C, for comparison)");
diff --git a/samples/rust/rust_stack_probing.rs b/samples/rust/rust_stack_probing.rs
new file mode 100644
index 000000000000..fcb87a53ce7f
--- /dev/null
+++ b/samples/rust/rust_stack_probing.rs
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Rust stack probing sample
+
+#![no_std]
+#![feature(allocator_api, global_asm)]
+#![feature(bench_black_box)]
+
+use kernel::prelude::*;
+
+module! {
+ type: RustStackProbing,
+ name: b"rust_stack_probing",
+ author: b"Rust for Linux Contributors",
+ description: b"Rust stack probing sample",
+ license: b"GPL v2",
+}
+
+struct RustStackProbing;
+
+impl KernelModule for RustStackProbing {
+ fn init() -> Result<Self> {
+ pr_info!("Rust stack probing sample (init)\n");
+
+ // Including this large variable on the stack will trigger
+ // stack probing on the supported archs.
+ // This will verify that stack probing does not lead to
+ // any errors if we need to link `__rust_probestack`.
+ let x: [u64; 514] = core::hint::black_box([5; 514]);
+ pr_info!("Large array has length: {}\n", x.len());
+
+ Ok(RustStackProbing)
+ }
+}
+
+impl Drop for RustStackProbing {
+ fn drop(&mut self) {
+ pr_info!("Rust stack probing sample (exit)\n");
+ }
+}
diff --git a/samples/rust/rust_sync.rs b/samples/rust/rust_sync.rs
new file mode 100644
index 000000000000..40bbfec9ad27
--- /dev/null
+++ b/samples/rust/rust_sync.rs
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Rust synchronisation primitives sample
+
+#![no_std]
+#![feature(allocator_api, global_asm)]
+
+use kernel::prelude::*;
+use kernel::{
+ condvar_init, mutex_init, spinlock_init,
+ sync::{CondVar, Mutex, SpinLock},
+};
+
+module! {
+ type: RustSync,
+ name: b"rust_sync",
+ author: b"Rust for Linux Contributors",
+ description: b"Rust synchronisation primitives sample",
+ license: b"GPL v2",
+}
+
+struct RustSync;
+
+impl KernelModule for RustSync {
+ fn init() -> Result<Self> {
+ pr_info!("Rust synchronisation primitives sample (init)\n");
+
+ // Test mutexes.
+ {
+ // SAFETY: `init` is called below.
+ let mut data = Pin::from(Box::try_new(unsafe { Mutex::new(0) })?);
+ mutex_init!(data.as_mut(), "RustSync::init::data1");
+ *data.lock() = 10;
+ pr_info!("Value: {}\n", *data.lock());
+
+ // SAFETY: `init` is called below.
+ let mut cv = Pin::from(Box::try_new(unsafe { CondVar::new() })?);
+ condvar_init!(cv.as_mut(), "RustSync::init::cv1");
+
+ {
+ let mut guard = data.lock();
+ while *guard != 10 {
+ let _ = cv.wait(&mut guard);
+ }
+ }
+ cv.notify_one();
+ cv.notify_all();
+ cv.free_waiters();
+ }
+
+ // Test spinlocks.
+ {
+ // SAFETY: `init` is called below.
+ let mut data = Pin::from(Box::try_new(unsafe { SpinLock::new(0) })?);
+ spinlock_init!(data.as_mut(), "RustSync::init::data2");
+ *data.lock() = 10;
+ pr_info!("Value: {}\n", *data.lock());
+
+ // SAFETY: `init` is called below.
+ let mut cv = Pin::from(Box::try_new(unsafe { CondVar::new() })?);
+ condvar_init!(cv.as_mut(), "RustSync::init::cv2");
+ {
+ let mut guard = data.lock();
+ while *guard != 10 {
+ let _ = cv.wait(&mut guard);
+ }
+ }
+ cv.notify_one();
+ cv.notify_all();
+ cv.free_waiters();
+ }
+
+ Ok(RustSync)
+ }
+}
+
+impl Drop for RustSync {
+ fn drop(&mut self) {
+ pr_info!("Rust synchronisation primitives sample (exit)\n");
+ }
+}
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 02197cb8e3a7..7b0defd0c180 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -26,6 +26,7 @@ EXTRA_CPPFLAGS :=
EXTRA_LDFLAGS :=
asflags-y :=
ccflags-y :=
+rustflags-y :=
cppflags-y :=
ldflags-y :=
@@ -287,6 +288,27 @@ quiet_cmd_cc_lst_c = MKLST $@
$(obj)/%.lst: $(src)/%.c FORCE
$(call if_changed_dep,cc_lst_c)
+# Compile Rust sources (.rs)
+# ---------------------------------------------------------------------------
+
+# Need to use absolute path here and have symbolic links resolved;
+# otherwise rustdoc and rustc compute different hashes for the target.
+rust_cross_flags := --target=$(realpath $(KBUILD_RUST_TARGET))
+
+quiet_cmd_rustc_o_rs = $(RUSTC_OR_CLIPPY_QUIET) $(quiet_modtag) $@
+ cmd_rustc_o_rs = \
+ RUST_MODFILE=$(modfile) \
+ $(RUSTC_OR_CLIPPY) $(rust_flags) $(rust_cross_flags) \
+ -Zallow-features=allocator_api,bench_black_box,concat_idents,global_asm,try_reserve \
+ --extern alloc --extern kernel \
+ --crate-type rlib --out-dir $(obj) -L $(objtree)/rust/ \
+ --crate-name $(patsubst %.o,%,$(notdir $@)) $<; \
+ mv $(obj)/$(subst .o,,$(notdir $@)).d $(depfile); \
+ sed -i '/^\#/d' $(depfile)
+
+$(obj)/%.o: $(src)/%.rs FORCE
+ $(call if_changed_dep,rustc_o_rs)
+
# Compile assembler sources (.S)
# ---------------------------------------------------------------------------
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 4917c4848e12..a329bc117aca 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -8,6 +8,7 @@ ldflags-y += $(EXTRA_LDFLAGS)
# flags that take effect in current and sub directories
KBUILD_AFLAGS += $(subdir-asflags-y)
KBUILD_CFLAGS += $(subdir-ccflags-y)
+KBUILD_RUSTFLAGS += $(subdir-rustflags-y)
# Figure out what we need to build from the various variables
# ===========================================================================
@@ -133,6 +134,10 @@ _c_flags = $(filter-out $(CFLAGS_REMOVE_$(target-stem).o), \
$(filter-out $(ccflags-remove-y), \
$(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(ccflags-y)) \
$(CFLAGS_$(target-stem).o))
+_rust_flags = $(filter-out $(RUSTFLAGS_REMOVE_$(target-stem).o), \
+ $(filter-out $(rustflags-remove-y), \
+ $(KBUILD_RUSTFLAGS) $(rustflags-y)) \
+ $(RUSTFLAGS_$(target-stem).o))
_a_flags = $(filter-out $(AFLAGS_REMOVE_$(target-stem).o), \
$(filter-out $(asflags-remove-y), \
$(KBUILD_CPPFLAGS) $(KBUILD_AFLAGS) $(asflags-y)) \
@@ -212,6 +217,11 @@ modkern_cflags = \
$(KBUILD_CFLAGS_MODULE) $(CFLAGS_MODULE), \
$(KBUILD_CFLAGS_KERNEL) $(CFLAGS_KERNEL) $(modfile_flags))
+modkern_rustflags = \
+ $(if $(part-of-module), \
+ $(KBUILD_RUSTFLAGS_MODULE) $(RUSTFLAGS_MODULE), \
+ $(KBUILD_RUSTFLAGS_KERNEL) $(RUSTFLAGS_KERNEL))
+
modkern_aflags = $(if $(part-of-module), \
$(KBUILD_AFLAGS_MODULE) $(AFLAGS_MODULE), \
$(KBUILD_AFLAGS_KERNEL) $(AFLAGS_KERNEL))
@@ -221,6 +231,8 @@ c_flags = -Wp,-MMD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) \
$(_c_flags) $(modkern_cflags) \
$(basename_flags) $(modname_flags)
+rust_flags = $(_rust_flags) $(modkern_rustflags) @$(objtree)/include/generated/rustc_cfg
+
a_flags = -Wp,-MMD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) \
$(_a_flags) $(modkern_aflags)
diff --git a/scripts/generate_rust_analyzer.py b/scripts/generate_rust_analyzer.py
new file mode 100755
index 000000000000..72c453e1aea0
--- /dev/null
+++ b/scripts/generate_rust_analyzer.py
@@ -0,0 +1,143 @@
+#!/usr/bin/env python3
+"""generate_rust_analyzer - Generates the `rust-project.json` file for `rust-analyzer`.
+"""
+
+import argparse
+import json
+import logging
+import pathlib
+import sys
+
+def generate_crates(srctree, objtree, sysroot_src, bindings_file):
+ # Generate the configuration list.
+ cfg = []
+ with open(objtree / "include" / "generated" / "rustc_cfg") as fd:
+ for line in fd:
+ line = line.replace("--cfg=", "")
+ line = line.replace("\n", "")
+ cfg.append(line)
+
+ # Now fill the crates list -- dependencies need to come first.
+ #
+ # Avoid O(n^2) iterations by keeping a map of indexes.
+ crates = []
+ crates_indexes = {}
+
+ def append_crate(display_name, root_module, is_workspace_member, deps, cfg):
+ crates_indexes[display_name] = len(crates)
+ crates.append({
+ "display_name": display_name,
+ "root_module": str(root_module),
+ "is_workspace_member": is_workspace_member,
+ "deps": [{"crate": crates_indexes[dep], "name": dep} for dep in deps],
+ "cfg": cfg,
+ "edition": "2018",
+ "env": {
+ "RUST_MODFILE": "This is only for rust-analyzer"
+ }
+ })
+
+ # First, the ones in `rust/` since they are a bit special.
+ append_crate(
+ "core",
+ sysroot_src / "core" / "src" / "lib.rs",
+ False,
+ [],
+ [],
+ )
+
+ append_crate(
+ "compiler_builtins",
+ srctree / "rust" / "compiler_builtins.rs",
+ True,
+ [],
+ [],
+ )
+
+ append_crate(
+ "alloc",
+ srctree / "rust" / "alloc" / "lib.rs",
+ True,
+ ["core", "compiler_builtins"],
+ [],
+ )
+
+ append_crate(
+ "macros",
+ srctree / "rust" / "macros" / "lib.rs",
+ True,
+ [],
+ [],
+ )
+ crates[-1]["proc_macro_dylib_path"] = "rust/libmacros.so"
+
+ append_crate(
+ "build_error",
+ srctree / "rust" / "build_error.rs",
+ True,
+ ["core", "compiler_builtins"],
+ [],
+ )
+
+ append_crate(
+ "kernel",
+ srctree / "rust" / "kernel" / "lib.rs",
+ True,
+ ["core", "alloc", "macros", "build_error"],
+ cfg,
+ )
+ crates[-1]["env"]["RUST_BINDINGS_FILE"] = str(bindings_file.resolve(True))
+ crates[-1]["source"] = {
+ "include_dirs": [
+ str(srctree / "rust" / "kernel"),
+ str(objtree / "rust")
+ ],
+ "exclude_dirs": [],
+ }
+
+ # Then, the rest outside of `rust/`.
+ #
+ # We explicitly mention the top-level folders we want to cover.
+ for folder in ("samples", "drivers"):
+ for path in (srctree / folder).rglob("*.rs"):
+ logging.info("Checking %s", path)
+ name = path.name.replace(".rs", "")
+
+ # Skip those that are not crate roots.
+ if f"{name}.o" not in open(path.parent / "Makefile").read():
+ continue
+
+ logging.info("Adding %s", name)
+ append_crate(
+ name,
+ path,
+ True,
+ ["core", "alloc", "kernel"],
+ cfg,
+ )
+
+ return crates
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--verbose', '-v', action='store_true')
+ parser.add_argument("srctree", type=pathlib.Path)
+ parser.add_argument("objtree", type=pathlib.Path)
+ parser.add_argument("sysroot_src", type=pathlib.Path)
+ parser.add_argument("bindings_file", type=pathlib.Path)
+ args = parser.parse_args()
+
+ logging.basicConfig(
+ format="[%(asctime)s] [%(levelname)s] %(message)s",
+ level=logging.INFO if args.verbose else logging.WARNING
+ )
+
+ rust_project = {
+ "crates": generate_crates(args.srctree, args.objtree, args.sysroot_src, args.bindings_file),
+ "sysroot_src": str(args.sysroot_src),
+ }
+
+ json.dump(rust_project, sys.stdout, sort_keys=True, indent=4)
+
+if __name__ == "__main__":
+ main()
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
index 54ad86d13784..9bab5f55ade3 100644
--- a/scripts/kallsyms.c
+++ b/scripts/kallsyms.c
@@ -27,7 +27,7 @@
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof(arr[0]))
-#define KSYM_NAME_LEN 128
+#define KSYM_NAME_LEN 512
struct sym_entry {
unsigned long long addr;
@@ -470,12 +470,37 @@ static void write_src(void)
if ((i & 0xFF) == 0)
markers[i >> 8] = off;
- printf("\t.byte 0x%02x", table[i]->len);
+ /*
+ * There cannot be any symbol of length zero -- we use that
+ * to mark a "big" symbol (and it doesn't make sense anyway).
+ */
+ if (table[i]->len == 0) {
+ fprintf(stderr, "kallsyms failure: "
+ "unexpected zero symbol length\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Only lengths that fit in up to two bytes are supported. */
+ if (table[i]->len > 0xFFFF) {
+ fprintf(stderr, "kallsyms failure: "
+ "unexpected huge symbol length\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (table[i]->len <= 0xFF) {
+ /* Most symbols use a single byte for the length. */
+ printf("\t.byte 0x%02x", table[i]->len);
+ off += table[i]->len + 1;
+ } else {
+ /* "Big" symbols use a zero and then two bytes. */
+ printf("\t.byte 0x00, 0x%02x, 0x%02x",
+ (table[i]->len >> 8) & 0xFF,
+ table[i]->len & 0xFF);
+ off += table[i]->len + 3;
+ }
for (k = 0; k < table[i]->len; k++)
printf(", 0x%02x", table[i]->sym[k]);
printf("\n");
-
- off += table[i]->len + 1;
}
printf("\n");
diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
index cf72680cd769..d9fc638dfa86 100644
--- a/scripts/kconfig/confdata.c
+++ b/scripts/kconfig/confdata.c
@@ -652,6 +652,56 @@ static struct conf_printer kconfig_printer_cb =
};
/*
+ * rustc cfg printer
+ *
+ * This printer is used when generating the resulting rustc configuration
+ * after kconfig invocation and `defconfig` files.
+ */
+static void rustc_cfg_print_symbol(FILE *fp, struct symbol *sym, const char *value, void *arg)
+{
+ const char *str;
+
+ switch (sym->type) {
+ case S_INT:
+ case S_HEX:
+ case S_BOOLEAN:
+ case S_TRISTATE:
+ str = sym_escape_string_value(value);
+
+ /*
+ * We don't care about disabled ones, i.e. no need for
+ * what otherwise are "comments" in other printers.
+ */
+ if (*value == 'n')
+ return;
+
+ /*
+ * To have similar functionality to the C macro `IS_ENABLED()`
+ * we provide an empty `--cfg CONFIG_X` here in both `y`
+ * and `m` cases.
+ *
+ * Then, the common `fprintf()` below will also give us
+ * a `--cfg CONFIG_X="y"` or `--cfg CONFIG_X="m"`, which can
+ * be used as the equivalent of `IS_BUILTIN()`/`IS_MODULE()`.
+ */
+ if (*value == 'y' || *value == 'm')
+ fprintf(fp, "--cfg=%s%s\n", CONFIG_, sym->name);
+
+ break;
+ default:
+ str = value;
+ break;
+ }
+
+ fprintf(fp, "--cfg=%s%s=%s\n", CONFIG_, sym->name, str);
+}
+
+static struct conf_printer rustc_cfg_printer_cb =
+{
+ .print_symbol = rustc_cfg_print_symbol,
+};
+
+/*
* Header printer
*
* This printer is used when generating the `include/generated/autoconf.h' file.
@@ -1058,7 +1108,7 @@ int conf_write_autoconf(int overwrite)
struct symbol *sym;
const char *name;
const char *autoconf_name = conf_get_autoconfig_name();
- FILE *out, *out_h;
+ FILE *out, *out_h, *out_rustc_cfg;
int i;
if (!overwrite && is_present(autoconf_name))
@@ -1079,6 +1129,13 @@ int conf_write_autoconf(int overwrite)
return 1;
}
+ out_rustc_cfg = fopen(".tmp_rustc_cfg", "w");
+ if (!out_rustc_cfg) {
+ fclose(out);
+ fclose(out_h);
+ return 1;
+ }
+
conf_write_heading(out, &kconfig_printer_cb, NULL);
conf_write_heading(out_h, &header_printer_cb, NULL);
@@ -1090,9 +1147,11 @@ int conf_write_autoconf(int overwrite)
/* write symbols to auto.conf and autoconf.h */
conf_write_symbol(out, sym, &kconfig_printer_cb, (void *)1);
conf_write_symbol(out_h, sym, &header_printer_cb, NULL);
+ conf_write_symbol(out_rustc_cfg, sym, &rustc_cfg_printer_cb, NULL);
}
fclose(out);
fclose(out_h);
+ fclose(out_rustc_cfg);
name = getenv("KCONFIG_AUTOHEADER");
if (!name)
@@ -1111,6 +1170,12 @@ int conf_write_autoconf(int overwrite)
if (rename(".tmpconfig", autoconf_name))
return 1;
+ name = "include/generated/rustc_cfg";
+ if (make_parent_dir(name))
+ return 1;
+ if (rename(".tmp_rustc_cfg", name))
+ return 1;
+
return 0;
}
diff --git a/scripts/rust-version.sh b/scripts/rust-version.sh
new file mode 100755
index 000000000000..67b6d31688e2
--- /dev/null
+++ b/scripts/rust-version.sh
@@ -0,0 +1,31 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+#
+# rust-version rust-command
+#
+# Print the compiler version of `rust-command' in a 5 or 6-digit form
+# such as `14502' for rustc-1.45.2 etc.
+#
+# Returns 0 if not found (so that Kconfig does not complain)
+compiler="$*"
+
+if [ ${#compiler} -eq 0 ]; then
+ echo "Error: No compiler specified." >&2
+ printf "Usage:\n\t$0 <rust-command>\n" >&2
+ exit 1
+fi
+
+if ! command -v $compiler >/dev/null 2>&1; then
+ echo 0
+ exit 0
+fi
+
+VERSION=$($compiler --version | cut -f2 -d' ')
+
+# Cut suffix if any (e.g. `-dev`)
+VERSION=$(echo $VERSION | cut -f1 -d'-')
+
+MAJOR=$(echo $VERSION | cut -f1 -d'.')
+MINOR=$(echo $VERSION | cut -f2 -d'.')
+PATCHLEVEL=$(echo $VERSION | cut -f3 -d'.')
+printf "%d%02d%02d\\n" $MAJOR $MINOR $PATCHLEVEL
diff --git a/tools/include/linux/kallsyms.h b/tools/include/linux/kallsyms.h
index efb6c3f5f2a9..5a37ccbec54f 100644
--- a/tools/include/linux/kallsyms.h
+++ b/tools/include/linux/kallsyms.h
@@ -6,7 +6,7 @@
#include <stdio.h>
#include <unistd.h>
-#define KSYM_NAME_LEN 128
+#define KSYM_NAME_LEN 512
struct module;
diff --git a/tools/include/linux/lockdep.h b/tools/include/linux/lockdep.h
index e56997288f2b..d9c163f3ab24 100644
--- a/tools/include/linux/lockdep.h
+++ b/tools/include/linux/lockdep.h
@@ -47,7 +47,7 @@ static inline int debug_locks_off(void)
#define task_pid_nr(tsk) ((tsk)->pid)
-#define KSYM_NAME_LEN 128
+#define KSYM_NAME_LEN 512
#define printk(...) dprintf(STDOUT_FILENO, __VA_ARGS__)
#define pr_err(format, ...) fprintf (stderr, format, ## __VA_ARGS__)
#define pr_warn pr_err
diff --git a/tools/lib/perf/include/perf/event.h b/tools/lib/perf/include/perf/event.h
index 4d0c02ba3f7d..095d60144a70 100644
--- a/tools/lib/perf/include/perf/event.h
+++ b/tools/lib/perf/include/perf/event.h
@@ -95,7 +95,7 @@ struct perf_record_throttle {
};
#ifndef KSYM_NAME_LEN
-#define KSYM_NAME_LEN 256
+#define KSYM_NAME_LEN 512
#endif
struct perf_record_ksymbol {
diff --git a/tools/lib/symbol/kallsyms.h b/tools/lib/symbol/kallsyms.h
index 72ab9870454b..542f9b059c3b 100644
--- a/tools/lib/symbol/kallsyms.h
+++ b/tools/lib/symbol/kallsyms.h
@@ -7,7 +7,7 @@
#include <linux/types.h>
#ifndef KSYM_NAME_LEN
-#define KSYM_NAME_LEN 256
+#define KSYM_NAME_LEN 512
#endif
static inline u8 kallsyms2elf_binding(char type)