Migrate 25 crates to monorepo.

sec1
semver
serde_cbor
serde_derive
shared_library
slab
smallvec
smccc
socket2
spin
spki
strsim
strum
strum_macros
sync_wrapper
syn-mid
synstructure
tempfile
termcolor
termtree
textwrap
thiserror
thread_local
tinytemplate
tokio-macros

Bug: 339424309
Test: treehugger
Change-Id: I40e64049d46a8a400520355d7804f64a5e0e5d2c
diff --git a/crates/der/Android.bp b/crates/der/Android.bp
index 5658e47..327328c 100644
--- a/crates/der/Android.bp
+++ b/crates/der/Android.bp
@@ -41,8 +41,8 @@
     product_available: true,
     vendor_available: true,
     visibility: [
-        "//external/rust/crates/sec1:__subpackages__",
-        "//external/rust/crates/spki:__subpackages__",
+        "//external/rust/android-crates-io/crates/sec1:__subpackages__",
+        "//external/rust/android-crates-io/crates/spki:__subpackages__",
         "//external/rust/android-crates-io/crates/pkcs1:__subpackages__",
         "//external/rust/android-crates-io/crates/pkcs8:__subpackages__",
         "//external/rust/crates/x509-cert:__subpackages__",
@@ -86,8 +86,8 @@
     product_available: true,
     vendor_available: true,
     visibility: [
-        "//external/rust/crates/sec1:__subpackages__",
-        "//external/rust/crates/spki:__subpackages__",
+        "//external/rust/android-crates-io/crates/sec1:__subpackages__",
+        "//external/rust/android-crates-io/crates/spki:__subpackages__",
         "//external/rust/android-crates-io/crates/pkcs1:__subpackages__",
         "//external/rust/android-crates-io/crates/pkcs8:__subpackages__",
         "//external/rust/crates/x509-cert:__subpackages__",
diff --git a/crates/der/cargo2android_viz.bp b/crates/der/cargo2android_viz.bp
index e9d8bf7..4cb13b9 100644
--- a/crates/der/cargo2android_viz.bp
+++ b/crates/der/cargo2android_viz.bp
@@ -1,6 +1,6 @@
 visibility: [
-     "//external/rust/crates/sec1:__subpackages__",
-     "//external/rust/crates/spki:__subpackages__",
+     "//external/rust/android-crates-io/crates/sec1:__subpackages__",
+     "//external/rust/android-crates-io/crates/spki:__subpackages__",
      "//external/rust/android-crates-io/crates/pkcs1:__subpackages__",
      "//external/rust/android-crates-io/crates/pkcs8:__subpackages__",
      "//external/rust/crates/x509-cert:__subpackages__",
diff --git a/crates/pkcs8/Android.bp b/crates/pkcs8/Android.bp
index b6227bc..8356af6 100644
--- a/crates/pkcs8/Android.bp
+++ b/crates/pkcs8/Android.bp
@@ -33,8 +33,8 @@
     product_available: true,
     vendor_available: true,
     visibility: [
-        "//external/rust/crates/pkcs1:__subpackages__",
-        "//external/rust/crates/sec1:__subpackages__",
+        "//external/rust/android-crates-io/crates/pkcs1:__subpackages__",
+        "//external/rust/android-crates-io/crates/sec1:__subpackages__",
         "//packages/modules/Virtualization:__subpackages__",
         "//system/keymint:__subpackages__",
     ],
@@ -66,8 +66,8 @@
     product_available: true,
     vendor_available: true,
     visibility: [
-        "//external/rust/crates/pkcs1:__subpackages__",
-        "//external/rust/crates/sec1:__subpackages__",
+        "//external/rust/android-crates-io/crates/pkcs1:__subpackages__",
+        "//external/rust/android-crates-io/crates/sec1:__subpackages__",
         "//packages/modules/Virtualization:__subpackages__",
         "//system/keymint:__subpackages__",
     ],
diff --git a/crates/pkcs8/cargo2android_viz.bp b/crates/pkcs8/cargo2android_viz.bp
index b7d85b3..178a6bb 100644
--- a/crates/pkcs8/cargo2android_viz.bp
+++ b/crates/pkcs8/cargo2android_viz.bp
@@ -1,6 +1,6 @@
 visibility: [
-     "//external/rust/crates/pkcs1:__subpackages__",
-     "//external/rust/crates/sec1:__subpackages__",
+     "//external/rust/android-crates-io/crates/pkcs1:__subpackages__",
+     "//external/rust/android-crates-io/crates/sec1:__subpackages__",
      "//packages/modules/Virtualization:__subpackages__",
      "//system/keymint:__subpackages__",
 ]
diff --git a/crates/sec1/.cargo-checksum.json b/crates/sec1/.cargo-checksum.json
new file mode 100644
index 0000000..ab3fe63
--- /dev/null
+++ b/crates/sec1/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"CHANGELOG.md":"2b3e288a2da4733e1286829facc55fef0466ec25dd896079855d7cb930ce624b","Cargo.toml":"24a70b7095f9fd000e904aea11d5c67846e1aa252962d73c2d85085da49593aa","LICENSE-APACHE":"a9040321c3712d8fd0b09cf52b17445de04a23a10165049ae187cd39e5c86be5","LICENSE-MIT":"4a883ecc3bb1010faed542bf63d53e530fea5e5e12cf676aed588784298ba929","README.md":"f4cb78d53d52f2a44d2336e4c3dc29f10f62f2636f6db0757e0b6edb94d6e52b","src/error.rs":"a59b7e2881f9caf4f932d11eedaedaa9eb34797e652c6d363b11b604848f7d2d","src/lib.rs":"43415382857e83d425069163323a341624ae80a7282ab01c2a43519aee4472db","src/parameters.rs":"f186cde9c122319b4d408b1ad8090cbc9f6bc817ee7c761c0dbf6868131e2259","src/point.rs":"9bb2bcf504594606ae21914349f3e78e1c63c174a8f4b9312f22a6069ee226ac","src/private_key.rs":"daac1e9b5e87af5f7bcde728b6711cae5b04188ae683aea43478a85e53b76276","src/traits.rs":"b554960b3ceb586871f2febd403288bbc4c104fa1ad3a14b67b3f91a9dfb2f3b","tests/examples/p256-priv.der":"36186d76a14000b87c31162269207a757dc147668219c1adcdcdc25fa6f04a8d","tests/examples/p256-priv.pem":"7f9b6b52c303da1ad081a416e3b159109d158338374de902099877dbd1102dc8","tests/private_key.rs":"9a43eb7673d0acde453c47581eec93463b7b2adca41bf210154695a971966de1","tests/traits.rs":"837859875b3d6cabd6d2e1d16c6bb686be0be838f11b924adbfad0793b54f6fb"},"package":"d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc"}
\ No newline at end of file
diff --git a/crates/sec1/Android.bp b/crates/sec1/Android.bp
new file mode 100644
index 0000000..579fce2
--- /dev/null
+++ b/crates/sec1/Android.bp
@@ -0,0 +1,83 @@
+// This file is generated by cargo_embargo.
+// Do not modify this file because the changes will be overridden on upgrade.
+
+package {
+    default_applicable_licenses: ["external_rust_crates_sec1_license"],
+    default_team: "trendy_team_android_rust",
+}
+
+license {
+    name: "external_rust_crates_sec1_license",
+    visibility: [":__subpackages__"],
+    license_kinds: ["SPDX-license-identifier-Apache-2.0"],
+    license_text: ["LICENSE"],
+}
+
+rust_library {
+    name: "libsec1",
+    host_supported: true,
+    crate_name: "sec1",
+    cargo_env_compat: true,
+    cargo_pkg_version: "0.7.3",
+    crate_root: "src/lib.rs",
+    edition: "2021",
+    features: [
+        "alloc",
+        "der",
+        "pkcs8",
+        "zeroize",
+    ],
+    rustlibs: [
+        "libder",
+        "libpkcs8",
+        "libzeroize",
+    ],
+    apex_available: [
+        "//apex_available:platform",
+        "com.android.virt",
+    ],
+    product_available: true,
+    vendor_available: true,
+    visibility: [
+        "//packages/modules/Virtualization:__subpackages__",
+        "//system/keymint:__subpackages__",
+    ],
+
+}
+
+rust_library_rlib {
+    name: "libsec1_nostd",
+    crate_name: "sec1",
+    cargo_env_compat: true,
+    cargo_pkg_version: "0.7.3",
+    crate_root: "src/lib.rs",
+    edition: "2021",
+    features: [
+        "alloc",
+        "der",
+        "pkcs8",
+        "zeroize",
+    ],
+    rustlibs: [
+        "libder_nostd",
+        "libpkcs8_nostd",
+        "libzeroize_nostd",
+    ],
+    apex_available: [
+        "//apex_available:platform",
+        "com.android.virt",
+    ],
+    prefer_rlib: true,
+    no_stdlibs: true,
+    stdlibs: [
+        "libcompiler_builtins.rust_sysroot",
+        "libcore.rust_sysroot",
+    ],
+    product_available: true,
+    vendor_available: true,
+    visibility: [
+        "//packages/modules/Virtualization:__subpackages__",
+        "//system/keymint:__subpackages__",
+    ],
+
+}
diff --git a/crates/sec1/CHANGELOG.md b/crates/sec1/CHANGELOG.md
new file mode 100644
index 0000000..44a0fc9
--- /dev/null
+++ b/crates/sec1/CHANGELOG.md
@@ -0,0 +1,92 @@
+# Changelog
+All notable changes to this project will be documented in this file.
+
+The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
+and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+
+## 0.7.3 (2023-07-16)
+### Added
+- Impl `Hash` for `EncodedPoint` ([#1102])
+
+[#1102]: https://github.com/RustCrypto/formats/pull/1102
+
+## 0.7.2 (2023-04-09)
+### Added
+- Impl `ModulusSize` for `U24` ([#995])
+
+[#995]: https://github.com/RustCrypto/formats/pull/995
+
+## 0.7.1 (2023-02-27)
+### Fixed
+- Encode `ECPrivateKey` version ([#908])
+
+[#908]: https://github.com/RustCrypto/formats/pull/908
+
+## 0.7.0 (2023-02-26) [YANKED]
+### Changed
+- MSRV 1.65 ([#805])
+- Bump `serdect` to v0.2 ([#893])
+- Bump `der` dependency to v0.7 ([#899])
+- Bump `spki` dependency to v0.7 ([#900])
+- Bump `pkcs8` to v0.10 ([#902])
+
+[#805]: https://github.com/RustCrypto/formats/pull/805
+[#893]: https://github.com/RustCrypto/formats/pull/893
+[#899]: https://github.com/RustCrypto/formats/pull/899
+[#900]: https://github.com/RustCrypto/formats/pull/900
+[#902]: https://github.com/RustCrypto/formats/pull/902
+
+## 0.6.0 (Skipped)
+- Skipped to synchronize version number with `der` and `spki`
+
+## 0.5.0 (Skipped)
+- Skipped to synchronize version number with `der` and `spki`
+
+## 0.4.0 (Skipped)
+- Skipped to synchronize version number with `der` and `spki`
+
+## 0.3.0 (2022-05-08)
+### Added
+- Make `der` feature optional but on-by-default ([#497])
+- Make `point` feature optional but on-by-default ([#516])
+
+### Changed
+- Use `base16ct` and `serdect` crates ([#648])
+- Bump `der` to v0.6 ([#653])
+- Bump `pkcs8` to v0.9 ([#656])
+
+[#497]: https://github.com/RustCrypto/formats/pull/497
+[#516]: https://github.com/RustCrypto/formats/pull/516
+[#648]: https://github.com/RustCrypto/formats/pull/648
+[#653]: https://github.com/RustCrypto/formats/pull/653
+[#656]: https://github.com/RustCrypto/formats/pull/656
+
+## 0.2.1 (2021-11-18)
+### Added
+- `serde` feature ([#248])
+- Hexadecimal serialization/deserialization support for `EncodedPoint` ([#248])
+
+[#248]: https://github.com/RustCrypto/formats/pull/248
+
+## 0.2.0 (2021-11-17) [YANKED]
+### Added
+- `pkcs8` feature ([#229])
+
+### Changed
+- Rename `From/ToEcPrivateKey` => `DecodeEcPrivateKey`/`EncodeEcPrivateKey` ([#122])
+- Use `der::Document` to impl `EcPrivateKeyDocument` ([#133])
+- Rust 2021 edition upgrade; MSRV 1.56 ([#136])
+- Bump `der` crate dependency to v0.5 ([#222])
+
+### Removed
+- I/O related errors ([#158])
+
+[#122]: https://github.com/RustCrypto/formats/pull/122
+[#133]: https://github.com/RustCrypto/formats/pull/133
+[#136]: https://github.com/RustCrypto/formats/pull/136
+[#158]: https://github.com/RustCrypto/formats/pull/158
+[#222]: https://github.com/RustCrypto/formats/pull/222
+[#229]: https://github.com/RustCrypto/formats/pull/229
+
+## 0.1.0 (2021-09-22)
+- Initial release
diff --git a/crates/sec1/Cargo.lock b/crates/sec1/Cargo.lock
new file mode 100644
index 0000000..ed266d7
--- /dev/null
+++ b/crates/sec1/Cargo.lock
@@ -0,0 +1,335 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "base16ct"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf"
+
+[[package]]
+name = "base64ct"
+version = "1.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b"
+
+[[package]]
+name = "bitflags"
+version = "2.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de"
+
+[[package]]
+name = "cfg-if"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
+
+[[package]]
+name = "const-oid"
+version = "0.9.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8"
+
+[[package]]
+name = "der"
+version = "0.7.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f55bf8e7b65898637379c1b74eb1551107c8294ed26d855ceb9fd1a09cfc9bc0"
+dependencies = [
+ "const-oid",
+ "pem-rfc7468",
+ "zeroize",
+]
+
+[[package]]
+name = "errno"
+version = "0.3.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba"
+dependencies = [
+ "libc",
+ "windows-sys 0.52.0",
+]
+
+[[package]]
+name = "fastrand"
+version = "2.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6"
+
+[[package]]
+name = "generic-array"
+version = "0.14.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a"
+dependencies = [
+ "typenum",
+ "version_check",
+]
+
+[[package]]
+name = "hex-literal"
+version = "0.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46"
+
+[[package]]
+name = "libc"
+version = "0.2.158"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439"
+
+[[package]]
+name = "linux-raw-sys"
+version = "0.4.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89"
+
+[[package]]
+name = "once_cell"
+version = "1.19.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92"
+
+[[package]]
+name = "pem-rfc7468"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "88b39c9bfcfc231068454382784bb460aae594343fb030d46e9f50a645418412"
+dependencies = [
+ "base64ct",
+]
+
+[[package]]
+name = "pkcs8"
+version = "0.10.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7"
+dependencies = [
+ "der",
+ "spki",
+]
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.86"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77"
+dependencies = [
+ "unicode-ident",
+]
+
+[[package]]
+name = "quote"
+version = "1.0.37"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af"
+dependencies = [
+ "proc-macro2",
+]
+
+[[package]]
+name = "rustix"
+version = "0.38.35"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a85d50532239da68e9addb745ba38ff4612a242c1c7ceea689c4bc7c2f43c36f"
+dependencies = [
+ "bitflags",
+ "errno",
+ "libc",
+ "linux-raw-sys",
+ "windows-sys 0.52.0",
+]
+
+[[package]]
+name = "sec1"
+version = "0.7.3"
+dependencies = [
+ "base16ct",
+ "der",
+ "generic-array",
+ "hex-literal",
+ "pkcs8",
+ "serdect",
+ "subtle",
+ "tempfile",
+ "zeroize",
+]
+
+[[package]]
+name = "serde"
+version = "1.0.209"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "99fce0ffe7310761ca6bf9faf5115afbc19688edd00171d81b1bb1b116c63e09"
+dependencies = [
+ "serde_derive",
+]
+
+[[package]]
+name = "serde_derive"
+version = "1.0.209"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a5831b979fd7b5439637af1752d535ff49f4860c0f341d1baeb6faf0f4242170"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "serdect"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a84f14a19e9a014bb9f4512488d9829a68e04ecabffb0f9904cd1ace94598177"
+dependencies = [
+ "base16ct",
+ "serde",
+]
+
+[[package]]
+name = "spki"
+version = "0.7.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d"
+dependencies = [
+ "base64ct",
+ "der",
+]
+
+[[package]]
+name = "subtle"
+version = "2.6.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292"
+
+[[package]]
+name = "syn"
+version = "2.0.76"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "578e081a14e0cefc3279b0472138c513f37b41a08d5a3cca9b6e4e8ceb6cd525"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
+]
+
+[[package]]
+name = "tempfile"
+version = "3.12.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "04cbcdd0c794ebb0d4cf35e88edd2f7d2c4c3e9a5a6dab322839b321c6a87a64"
+dependencies = [
+ "cfg-if",
+ "fastrand",
+ "once_cell",
+ "rustix",
+ "windows-sys 0.59.0",
+]
+
+[[package]]
+name = "typenum"
+version = "1.17.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825"
+
+[[package]]
+name = "unicode-ident"
+version = "1.0.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
+
+[[package]]
+name = "version_check"
+version = "0.9.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a"
+
+[[package]]
+name = "windows-sys"
+version = "0.52.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d"
+dependencies = [
+ "windows-targets",
+]
+
+[[package]]
+name = "windows-sys"
+version = "0.59.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b"
+dependencies = [
+ "windows-targets",
+]
+
+[[package]]
+name = "windows-targets"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973"
+dependencies = [
+ "windows_aarch64_gnullvm",
+ "windows_aarch64_msvc",
+ "windows_i686_gnu",
+ "windows_i686_gnullvm",
+ "windows_i686_msvc",
+ "windows_x86_64_gnu",
+ "windows_x86_64_gnullvm",
+ "windows_x86_64_msvc",
+]
+
+[[package]]
+name = "windows_aarch64_gnullvm"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"
+
+[[package]]
+name = "windows_aarch64_msvc"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"
+
+[[package]]
+name = "windows_i686_gnu"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b"
+
+[[package]]
+name = "windows_i686_gnullvm"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"
+
+[[package]]
+name = "windows_i686_msvc"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
+
+[[package]]
+name = "windows_x86_64_gnu"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
+
+[[package]]
+name = "windows_x86_64_gnullvm"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
+
+[[package]]
+name = "windows_x86_64_msvc"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
+
+[[package]]
+name = "zeroize"
+version = "1.8.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde"
diff --git a/crates/sec1/Cargo.toml b/crates/sec1/Cargo.toml
new file mode 100644
index 0000000..c640dea
--- /dev/null
+++ b/crates/sec1/Cargo.toml
@@ -0,0 +1,120 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2021"
+rust-version = "1.65"
+name = "sec1"
+version = "0.7.3"
+authors = ["RustCrypto Developers"]
+description = """
+Pure Rust implementation of SEC1: Elliptic Curve Cryptography encoding formats
+including ASN.1 DER-serialized private keys as well as the
+Elliptic-Curve-Point-to-Octet-String encoding
+"""
+readme = "README.md"
+keywords = [
+    "crypto",
+    "key",
+    "elliptic-curve",
+    "secg",
+]
+categories = [
+    "cryptography",
+    "data-structures",
+    "encoding",
+    "no-std",
+    "parser-implementations",
+]
+license = "Apache-2.0 OR MIT"
+repository = "https://github.com/RustCrypto/formats/tree/master/sec1"
+
+[package.metadata.docs.rs]
+all-features = true
+rustdoc-args = [
+    "--cfg",
+    "docsrs",
+]
+
+[dependencies.base16ct]
+version = "0.2"
+optional = true
+default-features = false
+
+[dependencies.der]
+version = "0.7"
+features = ["oid"]
+optional = true
+
+[dependencies.generic-array]
+version = "0.14.7"
+optional = true
+default-features = false
+
+[dependencies.pkcs8]
+version = "0.10"
+optional = true
+default-features = false
+
+[dependencies.serdect]
+version = "0.2"
+features = ["alloc"]
+optional = true
+default-features = false
+
+[dependencies.subtle]
+version = "2"
+optional = true
+default-features = false
+
+[dependencies.zeroize]
+version = "1"
+optional = true
+default-features = false
+
+[dev-dependencies.hex-literal]
+version = "0.4"
+
+[dev-dependencies.tempfile]
+version = "3"
+
+[features]
+alloc = [
+    "der?/alloc",
+    "pkcs8?/alloc",
+    "zeroize?/alloc",
+]
+default = [
+    "der",
+    "point",
+]
+der = [
+    "dep:der",
+    "zeroize",
+]
+pem = [
+    "alloc",
+    "der/pem",
+    "pkcs8/pem",
+]
+point = [
+    "dep:base16ct",
+    "dep:generic-array",
+]
+serde = ["dep:serdect"]
+std = [
+    "alloc",
+    "der?/std",
+]
+zeroize = [
+    "dep:zeroize",
+    "der?/zeroize",
+]
diff --git a/crates/sec1/LICENSE b/crates/sec1/LICENSE
new file mode 120000
index 0000000..6b579aa
--- /dev/null
+++ b/crates/sec1/LICENSE
@@ -0,0 +1 @@
+LICENSE-APACHE
\ No newline at end of file
diff --git a/crates/sec1/LICENSE-APACHE b/crates/sec1/LICENSE-APACHE
new file mode 100644
index 0000000..78173fa
--- /dev/null
+++ b/crates/sec1/LICENSE-APACHE
@@ -0,0 +1,201 @@
+                              Apache License
+                        Version 2.0, January 2004
+                     http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+   "License" shall mean the terms and conditions for use, reproduction,
+   and distribution as defined by Sections 1 through 9 of this document.
+
+   "Licensor" shall mean the copyright owner or entity authorized by
+   the copyright owner that is granting the License.
+
+   "Legal Entity" shall mean the union of the acting entity and all
+   other entities that control, are controlled by, or are under common
+   control with that entity. For the purposes of this definition,
+   "control" means (i) the power, direct or indirect, to cause the
+   direction or management of such entity, whether by contract or
+   otherwise, or (ii) ownership of fifty percent (50%) or more of the
+   outstanding shares, or (iii) beneficial ownership of such entity.
+
+   "You" (or "Your") shall mean an individual or Legal Entity
+   exercising permissions granted by this License.
+
+   "Source" form shall mean the preferred form for making modifications,
+   including but not limited to software source code, documentation
+   source, and configuration files.
+
+   "Object" form shall mean any form resulting from mechanical
+   transformation or translation of a Source form, including but
+   not limited to compiled object code, generated documentation,
+   and conversions to other media types.
+
+   "Work" shall mean the work of authorship, whether in Source or
+   Object form, made available under the License, as indicated by a
+   copyright notice that is included in or attached to the work
+   (an example is provided in the Appendix below).
+
+   "Derivative Works" shall mean any work, whether in Source or Object
+   form, that is based on (or derived from) the Work and for which the
+   editorial revisions, annotations, elaborations, or other modifications
+   represent, as a whole, an original work of authorship. For the purposes
+   of this License, Derivative Works shall not include works that remain
+   separable from, or merely link (or bind by name) to the interfaces of,
+   the Work and Derivative Works thereof.
+
+   "Contribution" shall mean any work of authorship, including
+   the original version of the Work and any modifications or additions
+   to that Work or Derivative Works thereof, that is intentionally
+   submitted to Licensor for inclusion in the Work by the copyright owner
+   or by an individual or Legal Entity authorized to submit on behalf of
+   the copyright owner. For the purposes of this definition, "submitted"
+   means any form of electronic, verbal, or written communication sent
+   to the Licensor or its representatives, including but not limited to
+   communication on electronic mailing lists, source code control systems,
+   and issue tracking systems that are managed by, or on behalf of, the
+   Licensor for the purpose of discussing and improving the Work, but
+   excluding communication that is conspicuously marked or otherwise
+   designated in writing by the copyright owner as "Not a Contribution."
+
+   "Contributor" shall mean Licensor and any individual or Legal Entity
+   on behalf of whom a Contribution has been received by Licensor and
+   subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+   this License, each Contributor hereby grants to You a perpetual,
+   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+   copyright license to reproduce, prepare Derivative Works of,
+   publicly display, publicly perform, sublicense, and distribute the
+   Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+   this License, each Contributor hereby grants to You a perpetual,
+   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+   (except as stated in this section) patent license to make, have made,
+   use, offer to sell, sell, import, and otherwise transfer the Work,
+   where such license applies only to those patent claims licensable
+   by such Contributor that are necessarily infringed by their
+   Contribution(s) alone or by combination of their Contribution(s)
+   with the Work to which such Contribution(s) was submitted. If You
+   institute patent litigation against any entity (including a
+   cross-claim or counterclaim in a lawsuit) alleging that the Work
+   or a Contribution incorporated within the Work constitutes direct
+   or contributory patent infringement, then any patent licenses
+   granted to You under this License for that Work shall terminate
+   as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+   Work or Derivative Works thereof in any medium, with or without
+   modifications, and in Source or Object form, provided that You
+   meet the following conditions:
+
+   (a) You must give any other recipients of the Work or
+       Derivative Works a copy of this License; and
+
+   (b) You must cause any modified files to carry prominent notices
+       stating that You changed the files; and
+
+   (c) You must retain, in the Source form of any Derivative Works
+       that You distribute, all copyright, patent, trademark, and
+       attribution notices from the Source form of the Work,
+       excluding those notices that do not pertain to any part of
+       the Derivative Works; and
+
+   (d) If the Work includes a "NOTICE" text file as part of its
+       distribution, then any Derivative Works that You distribute must
+       include a readable copy of the attribution notices contained
+       within such NOTICE file, excluding those notices that do not
+       pertain to any part of the Derivative Works, in at least one
+       of the following places: within a NOTICE text file distributed
+       as part of the Derivative Works; within the Source form or
+       documentation, if provided along with the Derivative Works; or,
+       within a display generated by the Derivative Works, if and
+       wherever such third-party notices normally appear. The contents
+       of the NOTICE file are for informational purposes only and
+       do not modify the License. You may add Your own attribution
+       notices within Derivative Works that You distribute, alongside
+       or as an addendum to the NOTICE text from the Work, provided
+       that such additional attribution notices cannot be construed
+       as modifying the License.
+
+   You may add Your own copyright statement to Your modifications and
+   may provide additional or different license terms and conditions
+   for use, reproduction, or distribution of Your modifications, or
+   for any such Derivative Works as a whole, provided Your use,
+   reproduction, and distribution of the Work otherwise complies with
+   the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+   any Contribution intentionally submitted for inclusion in the Work
+   by You to the Licensor shall be under the terms and conditions of
+   this License, without any additional terms or conditions.
+   Notwithstanding the above, nothing herein shall supersede or modify
+   the terms of any separate license agreement you may have executed
+   with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+   names, trademarks, service marks, or product names of the Licensor,
+   except as required for reasonable and customary use in describing the
+   origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+   agreed to in writing, Licensor provides the Work (and each
+   Contributor provides its Contributions) on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+   implied, including, without limitation, any warranties or conditions
+   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+   PARTICULAR PURPOSE. You are solely responsible for determining the
+   appropriateness of using or redistributing the Work and assume any
+   risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+   whether in tort (including negligence), contract, or otherwise,
+   unless required by applicable law (such as deliberate and grossly
+   negligent acts) or agreed to in writing, shall any Contributor be
+   liable to You for damages, including any direct, indirect, special,
+   incidental, or consequential damages of any character arising as a
+   result of this License or out of the use or inability to use the
+   Work (including but not limited to damages for loss of goodwill,
+   work stoppage, computer failure or malfunction, or any and all
+   other commercial damages or losses), even if such Contributor
+   has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+   the Work or Derivative Works thereof, You may choose to offer,
+   and charge a fee for, acceptance of support, warranty, indemnity,
+   or other liability obligations and/or rights consistent with this
+   License. However, in accepting such obligations, You may act only
+   on Your own behalf and on Your sole responsibility, not on behalf
+   of any other Contributor, and only if You agree to indemnify,
+   defend, and hold each Contributor harmless for any liability
+   incurred by, or claims asserted against, such Contributor by reason
+   of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+   To apply the Apache License to your work, attach the following
+   boilerplate notice, with the fields enclosed by brackets "[]"
+   replaced with your own identifying information. (Don't include
+   the brackets!)  The text should be enclosed in the appropriate
+   comment syntax for the file format. We also recommend that a
+   file or class name and description of purpose be included on the
+   same "printed page" as the copyright notice for easier
+   identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/crates/sec1/LICENSE-MIT b/crates/sec1/LICENSE-MIT
new file mode 100644
index 0000000..68ddaa3
--- /dev/null
+++ b/crates/sec1/LICENSE-MIT
@@ -0,0 +1,25 @@
+Copyright (c) 2021-2022 The RustCrypto Project Developers
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/crates/sec1/METADATA b/crates/sec1/METADATA
new file mode 100644
index 0000000..309de68
--- /dev/null
+++ b/crates/sec1/METADATA
@@ -0,0 +1,20 @@
+# This project was upgraded with external_updater.
+# Usage: tools/external_updater/updater.sh update rust/crates/sec1
+# For more info, check https://cs.android.com/android/platform/superproject/+/main:tools/external_updater/README.md
+
+name: "sec1"
+description: "Pure Rust implementation of SEC1: Elliptic Curve Cryptography encoding formats including ASN.1 DER-serialized private keys (also described in RFC5915) as well as the Elliptic-Curve-Point-to-Octet-String and Octet-String-to-Elliptic Curve-Point encoding algorithms."
+third_party {
+  license_type: NOTICE
+  last_upgrade_date {
+    year: 2023
+    month: 12
+    day: 15
+  }
+  homepage: "https://crates.io/crates/sec1"
+  identifier {
+    type: "Archive"
+    value: "https://static.crates.io/crates/sec1/sec1-0.7.3.crate"
+    version: "0.7.3"
+  }
+}
diff --git a/crates/sec1/MODULE_LICENSE_APACHE2 b/crates/sec1/MODULE_LICENSE_APACHE2
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/crates/sec1/MODULE_LICENSE_APACHE2
diff --git a/crates/sec1/README.md b/crates/sec1/README.md
new file mode 100644
index 0000000..5678de6
--- /dev/null
+++ b/crates/sec1/README.md
@@ -0,0 +1,58 @@
+# [RustCrypto]: SEC1 Elliptic Curve Cryptography Formats
+
+[![crate][crate-image]][crate-link]
+[![Docs][docs-image]][docs-link]
+[![Build Status][build-image]][build-link]
+![Apache2/MIT licensed][license-image]
+![Rust Version][rustc-image]
+[![Project Chat][chat-image]][chat-link]
+
+[Documentation][docs-link]
+
+## About
+
+Pure Rust implementation of [SEC1: Elliptic Curve Cryptography] encoding
+formats including ASN.1 DER-serialized private keys (also described in
+[RFC5915]) as well as the `Elliptic-Curve-Point-to-Octet-String` and
+`Octet-String-to-Elliptic-Curve-Point` encoding algorithms.
+
+## Minimum Supported Rust Version
+
+This crate requires **Rust 1.65** at a minimum.
+
+We may change the MSRV in the future, but it will be accompanied by a minor
+version bump.
+
+## License
+
+Licensed under either of:
+
+ * [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0)
+ * [MIT license](http://opensource.org/licenses/MIT)
+
+at your option.
+
+### Contribution
+
+Unless you explicitly state otherwise, any contribution intentionally submitted
+for inclusion in the work by you, as defined in the Apache-2.0 license, shall be
+dual licensed as above, without any additional terms or conditions.
+
+[//]: # (badges)
+
+[crate-image]: https://buildstats.info/crate/sec1
+[crate-link]: https://crates.io/crates/sec1
+[docs-image]: https://docs.rs/sec1/badge.svg
+[docs-link]: https://docs.rs/sec1/
+[license-image]: https://img.shields.io/badge/license-Apache2.0/MIT-blue.svg
+[rustc-image]: https://img.shields.io/badge/rustc-1.65+-blue.svg
+[chat-image]: https://img.shields.io/badge/zulip-join_chat-blue.svg
+[chat-link]: https://rustcrypto.zulipchat.com/#narrow/stream/300570-formats
+[build-image]: https://github.com/RustCrypto/formats/workflows/sec1/badge.svg?branch=master&event=push
+[build-link]: https://github.com/RustCrypto/formats/actions
+
+[//]: # (links)
+
+[RustCrypto]: https://github.com/rustcrypto
+[SEC1: Elliptic Curve Cryptography]: https://www.secg.org/sec1-v2.pdf
+[RFC5915]: https://datatracker.ietf.org/doc/html/rfc5915
diff --git a/crates/sec1/cargo2android_viz.bp b/crates/sec1/cargo2android_viz.bp
new file mode 100644
index 0000000..8ce03b7
--- /dev/null
+++ b/crates/sec1/cargo2android_viz.bp
@@ -0,0 +1,4 @@
+visibility: [
+     "//packages/modules/Virtualization:__subpackages__",
+     "//system/keymint:__subpackages__",
+]
diff --git a/crates/sec1/cargo_embargo.json b/crates/sec1/cargo_embargo.json
new file mode 100644
index 0000000..0d9e163
--- /dev/null
+++ b/crates/sec1/cargo_embargo.json
@@ -0,0 +1,37 @@
+{
+  "apex_available": [
+    "//apex_available:platform",
+    "com.android.virt"
+  ],
+  "features": [
+    "alloc",
+    "der",
+    "pkcs8"
+  ],
+  "run_cargo": false,
+  "variants": [
+    {
+      "package": {
+        "sec1": {
+          "add_module_block": "cargo2android_viz.bp"
+        }
+      }
+    },
+    {
+      "module_name_overrides": {
+        "libder": "libder_nostd",
+        "libpkcs8": "libpkcs8_nostd",
+        "libsec1": "libsec1_nostd",
+        "libzeroize": "libzeroize_nostd"
+      },
+      "package": {
+        "sec1": {
+          "add_module_block": "cargo2android_viz.bp",
+          "force_rlib": true,
+          "host_supported": false,
+          "no_std": true
+        }
+      }
+    }
+  ]
+}
diff --git a/crates/sec1/patches/rules.mk.diff b/crates/sec1/patches/rules.mk.diff
new file mode 100644
index 0000000..0556b5b
--- /dev/null
+++ b/crates/sec1/patches/rules.mk.diff
@@ -0,0 +1,17 @@
+diff --git b/rules.mk a/rules.mk
+index 4e31c7c..17355aa 100644
+--- b/rules.mk
++++ a/rules.mk
+@@ -15,9 +15,8 @@ MODULE_RUSTFLAGS += \
+ 	--cfg 'feature="zeroize"' \
+ 
+ MODULE_LIBRARY_DEPS := \
+-	external/rust/crates/base16ct \
+-	external/rust/crates/generic-array \
+-	external/rust/crates/serdect \
+-	external/rust/crates/subtle \
++	external/rust/crates/der \
++	external/rust/crates/pkcs8 \
++	external/rust/crates/zeroize \
+ 
+ include make/library.mk
diff --git a/crates/sec1/patches/std.diff b/crates/sec1/patches/std.diff
new file mode 100644
index 0000000..93800a5
--- /dev/null
+++ b/crates/sec1/patches/std.diff
@@ -0,0 +1,15 @@
+diff --git a/src/lib.rs b/src/lib.rs
+index 8e01b1f..6b658b6 100644
+--- a/src/lib.rs
++++ b/src/lib.rs
+@@ -17,6 +17,10 @@
+ //! serializers/deserializers will autodetect if a "human friendly" textual
+ //! encoding is being used, and if so encode the points as hexadecimal.
+ 
++/// Local Android change: Use std to allow building as a dylib.
++#[cfg(android_dylib)]
++extern crate std;
++
+ #[cfg(feature = "alloc")]
+ #[allow(unused_extern_crates)]
+ extern crate alloc;
diff --git a/crates/sec1/src/error.rs b/crates/sec1/src/error.rs
new file mode 100644
index 0000000..0d8bc8b
--- /dev/null
+++ b/crates/sec1/src/error.rs
@@ -0,0 +1,82 @@
+//! Error types
+
+use core::fmt;
+
+#[cfg(feature = "pem")]
+use der::pem;
+
+/// Result type with `sec1` crate's [`Error`] type.
+pub type Result<T> = core::result::Result<T, Error>;
+
+/// Error type
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+#[non_exhaustive]
+pub enum Error {
+    /// ASN.1 DER-related errors.
+    #[cfg(feature = "der")]
+    Asn1(der::Error),
+
+    /// Cryptographic errors.
+    ///
+    /// These can be used by EC implementations to signal that a key is
+    /// invalid for cryptographic reasons. This means the document parsed
+    /// correctly, but one of the values contained within was invalid, e.g.
+    /// a number expected to be a prime was not a prime.
+    Crypto,
+
+    /// PKCS#8 errors.
+    #[cfg(feature = "pkcs8")]
+    Pkcs8(pkcs8::Error),
+
+    /// Errors relating to the `Elliptic-Curve-Point-to-Octet-String` or
+    /// `Octet-String-to-Elliptic-Curve-Point` encodings.
+    PointEncoding,
+
+    /// Version errors
+    Version,
+}
+
+impl fmt::Display for Error {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        match self {
+            #[cfg(feature = "der")]
+            Error::Asn1(err) => write!(f, "SEC1 ASN.1 error: {}", err),
+            Error::Crypto => f.write_str("SEC1 cryptographic error"),
+            #[cfg(feature = "pkcs8")]
+            Error::Pkcs8(err) => write!(f, "{}", err),
+            Error::PointEncoding => f.write_str("elliptic curve point encoding error"),
+            Error::Version => f.write_str("SEC1 version error"),
+        }
+    }
+}
+
+#[cfg(feature = "der")]
+impl From<der::Error> for Error {
+    fn from(err: der::Error) -> Error {
+        Error::Asn1(err)
+    }
+}
+
+#[cfg(feature = "pem")]
+impl From<pem::Error> for Error {
+    fn from(err: pem::Error) -> Error {
+        der::Error::from(err).into()
+    }
+}
+
+#[cfg(feature = "pkcs8")]
+impl From<pkcs8::Error> for Error {
+    fn from(err: pkcs8::Error) -> Error {
+        Error::Pkcs8(err)
+    }
+}
+
+#[cfg(feature = "pkcs8")]
+impl From<pkcs8::spki::Error> for Error {
+    fn from(err: pkcs8::spki::Error) -> Error {
+        Error::Pkcs8(pkcs8::Error::PublicKey(err))
+    }
+}
+
+#[cfg(feature = "std")]
+impl std::error::Error for Error {}
diff --git a/crates/sec1/src/lib.rs b/crates/sec1/src/lib.rs
new file mode 100644
index 0000000..6b44d3d
--- /dev/null
+++ b/crates/sec1/src/lib.rs
@@ -0,0 +1,81 @@
+#![no_std]
+#![cfg_attr(docsrs, feature(doc_auto_cfg))]
+#![doc = include_str!("../README.md")]
+#![doc(
+    html_logo_url = "https://raw.githubusercontent.com/RustCrypto/media/6ee8e381/logo.svg",
+    html_favicon_url = "https://raw.githubusercontent.com/RustCrypto/media/6ee8e381/logo.svg"
+)]
+#![forbid(unsafe_code)]
+#![warn(
+    clippy::mod_module_files,
+    clippy::unwrap_used,
+    missing_docs,
+    rust_2018_idioms,
+    unused_qualifications
+)]
+
+//! ## `serde` support
+//!
+//! When the `serde` feature of this crate is enabled, the [`EncodedPoint`]
+//! type receives impls of [`serde::Serialize`] and [`serde::Deserialize`].
+//!
+//! Additionally, when both the `alloc` and `serde` features are enabled, the
+//! serializers/deserializers will autodetect if a "human friendly" textual
+//! encoding is being used, and if so encode the points as hexadecimal.
+
+/// Local Android change: Use std to allow building as a dylib.
+#[cfg(android_dylib)]
+extern crate std;
+
+#[cfg(feature = "alloc")]
+#[allow(unused_extern_crates)]
+extern crate alloc;
+#[cfg(feature = "std")]
+extern crate std;
+
+#[cfg(feature = "point")]
+pub mod point;
+
+mod error;
+#[cfg(feature = "der")]
+mod parameters;
+#[cfg(feature = "der")]
+mod private_key;
+#[cfg(feature = "der")]
+mod traits;
+
+#[cfg(feature = "der")]
+pub use der;
+
+pub use crate::error::{Error, Result};
+
+#[cfg(feature = "point")]
+pub use crate::point::EncodedPoint;
+
+#[cfg(feature = "point")]
+pub use generic_array::typenum::consts;
+
+#[cfg(feature = "der")]
+pub use crate::{parameters::EcParameters, private_key::EcPrivateKey, traits::DecodeEcPrivateKey};
+
+#[cfg(all(feature = "alloc", feature = "der"))]
+pub use crate::traits::EncodeEcPrivateKey;
+
+#[cfg(feature = "pem")]
+pub use der::pem::{self, LineEnding};
+
+#[cfg(feature = "pkcs8")]
+pub use pkcs8;
+
+#[cfg(feature = "pkcs8")]
+use pkcs8::ObjectIdentifier;
+
+#[cfg(all(doc, feature = "serde"))]
+use serdect::serde;
+
+/// Algorithm [`ObjectIdentifier`] for elliptic curve public key cryptography
+/// (`id-ecPublicKey`).
+///
+/// <http://oid-info.com/get/1.2.840.10045.2.1>
+#[cfg(feature = "pkcs8")]
+pub const ALGORITHM_OID: ObjectIdentifier = ObjectIdentifier::new_unwrap("1.2.840.10045.2.1");
diff --git a/crates/sec1/src/parameters.rs b/crates/sec1/src/parameters.rs
new file mode 100644
index 0000000..20458e6
--- /dev/null
+++ b/crates/sec1/src/parameters.rs
@@ -0,0 +1,75 @@
+use der::{
+    asn1::{AnyRef, ObjectIdentifier},
+    DecodeValue, EncodeValue, FixedTag, Header, Length, Reader, Tag, Writer,
+};
+
+/// Elliptic curve parameters as described in
+/// [RFC5480 Section 2.1.1](https://datatracker.ietf.org/doc/html/rfc5480#section-2.1.1):
+///
+/// ```text
+/// ECParameters ::= CHOICE {
+///   namedCurve         OBJECT IDENTIFIER
+///   -- implicitCurve   NULL
+///   -- specifiedCurve  SpecifiedECDomain
+/// }
+///   -- implicitCurve and specifiedCurve MUST NOT be used in PKIX.
+///   -- Details for SpecifiedECDomain can be found in [X9.62].
+///   -- Any future additions to this CHOICE should be coordinated
+///   -- with ANSI X9.
+/// ```
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+pub enum EcParameters {
+    /// Elliptic curve named by a particular OID.
+    ///
+    /// > namedCurve identifies all the required values for a particular
+    /// > set of elliptic curve domain parameters to be represented by an
+    /// > object identifier.
+    NamedCurve(ObjectIdentifier),
+}
+
+impl<'a> DecodeValue<'a> for EcParameters {
+    fn decode_value<R: Reader<'a>>(decoder: &mut R, header: Header) -> der::Result<Self> {
+        ObjectIdentifier::decode_value(decoder, header).map(Self::NamedCurve)
+    }
+}
+
+impl EncodeValue for EcParameters {
+    fn value_len(&self) -> der::Result<Length> {
+        match self {
+            Self::NamedCurve(oid) => oid.value_len(),
+        }
+    }
+
+    fn encode_value(&self, writer: &mut impl Writer) -> der::Result<()> {
+        match self {
+            Self::NamedCurve(oid) => oid.encode_value(writer),
+        }
+    }
+}
+
+impl EcParameters {
+    /// Obtain the `namedCurve` OID.
+    pub fn named_curve(self) -> Option<ObjectIdentifier> {
+        match self {
+            Self::NamedCurve(oid) => Some(oid),
+        }
+    }
+}
+
+impl<'a> From<&'a EcParameters> for AnyRef<'a> {
+    fn from(params: &'a EcParameters) -> AnyRef<'a> {
+        match params {
+            EcParameters::NamedCurve(oid) => oid.into(),
+        }
+    }
+}
+
+impl From<ObjectIdentifier> for EcParameters {
+    fn from(oid: ObjectIdentifier) -> EcParameters {
+        EcParameters::NamedCurve(oid)
+    }
+}
+
+impl FixedTag for EcParameters {
+    const TAG: Tag = Tag::ObjectIdentifier;
+}
diff --git a/crates/sec1/src/point.rs b/crates/sec1/src/point.rs
new file mode 100644
index 0000000..818f5bd
--- /dev/null
+++ b/crates/sec1/src/point.rs
@@ -0,0 +1,776 @@
+//! Support for the SEC1 `Elliptic-Curve-Point-to-Octet-String` and
+//! `Octet-String-to-Elliptic-Curve-Point` encoding algorithms.
+//!
+//! Described in [SEC1: Elliptic Curve Cryptography] (Version 2.0) section 2.3.3 (p.10).
+//!
+//! [SEC1: Elliptic Curve Cryptography]: https://www.secg.org/sec1-v2.pdf
+
+use crate::{Error, Result};
+use base16ct::HexDisplay;
+use core::{
+    cmp::Ordering,
+    fmt::{self, Debug},
+    hash::{Hash, Hasher},
+    ops::Add,
+    str,
+};
+use generic_array::{
+    typenum::{U1, U24, U28, U32, U48, U66},
+    ArrayLength, GenericArray,
+};
+
+#[cfg(feature = "alloc")]
+use alloc::boxed::Box;
+
+#[cfg(feature = "serde")]
+use serdect::serde::{de, ser, Deserialize, Serialize};
+
+#[cfg(feature = "subtle")]
+use subtle::{Choice, ConditionallySelectable};
+
+#[cfg(feature = "zeroize")]
+use zeroize::Zeroize;
+
+/// Trait for supported modulus sizes which precomputes the typenums for
+/// various point encodings so they don't need to be included as bounds.
+// TODO(tarcieri): replace this all with const generic expressions.
+pub trait ModulusSize: 'static + ArrayLength<u8> + Copy + Debug {
+    /// Size of a compressed point for the given elliptic curve when encoded
+    /// using the SEC1 `Elliptic-Curve-Point-to-Octet-String` algorithm
+    /// (including leading `0x02` or `0x03` tag byte).
+    type CompressedPointSize: 'static + ArrayLength<u8> + Copy + Debug;
+
+    /// Size of an uncompressed point for the given elliptic curve when encoded
+    /// using the SEC1 `Elliptic-Curve-Point-to-Octet-String` algorithm
+    /// (including leading `0x04` tag byte).
+    type UncompressedPointSize: 'static + ArrayLength<u8> + Copy + Debug;
+
+    /// Size of an untagged point for given elliptic curve, i.e. size of two
+    /// serialized base field elements.
+    type UntaggedPointSize: 'static + ArrayLength<u8> + Copy + Debug;
+}
+
+macro_rules! impl_modulus_size {
+    ($($size:ty),+) => {
+        $(impl ModulusSize for $size {
+            type CompressedPointSize = <$size as Add<U1>>::Output;
+            type UncompressedPointSize = <Self::UntaggedPointSize as Add<U1>>::Output;
+            type UntaggedPointSize = <$size as Add>::Output;
+        })+
+    }
+}
+
+impl_modulus_size!(U24, U28, U32, U48, U66);
+
+/// SEC1 encoded curve point.
+///
+/// This type is an enum over the compressed and uncompressed encodings,
+/// useful for cases where either encoding can be supported, or conversions
+/// between the two forms.
+#[derive(Clone, Default)]
+pub struct EncodedPoint<Size>
+where
+    Size: ModulusSize,
+{
+    bytes: GenericArray<u8, Size::UncompressedPointSize>,
+}
+
+#[allow(clippy::len_without_is_empty)]
+impl<Size> EncodedPoint<Size>
+where
+    Size: ModulusSize,
+{
+    /// Decode elliptic curve point (compressed or uncompressed) from the
+    /// `Elliptic-Curve-Point-to-Octet-String` encoding described in
+    /// SEC 1: Elliptic Curve Cryptography (Version 2.0) section
+    /// 2.3.3 (page 10).
+    ///
+    /// <http://www.secg.org/sec1-v2.pdf>
+    pub fn from_bytes(input: impl AsRef<[u8]>) -> Result<Self> {
+        let input = input.as_ref();
+
+        // Validate tag
+        let tag = input
+            .first()
+            .cloned()
+            .ok_or(Error::PointEncoding)
+            .and_then(Tag::from_u8)?;
+
+        // Validate length
+        let expected_len = tag.message_len(Size::to_usize());
+
+        if input.len() != expected_len {
+            return Err(Error::PointEncoding);
+        }
+
+        let mut bytes = GenericArray::default();
+        bytes[..expected_len].copy_from_slice(input);
+        Ok(Self { bytes })
+    }
+
+    /// Decode elliptic curve point from raw uncompressed coordinates, i.e.
+    /// encoded as the concatenated `x || y` coordinates with no leading SEC1
+    /// tag byte (which would otherwise be `0x04` for an uncompressed point).
+    pub fn from_untagged_bytes(bytes: &GenericArray<u8, Size::UntaggedPointSize>) -> Self {
+        let (x, y) = bytes.split_at(Size::to_usize());
+        Self::from_affine_coordinates(x.into(), y.into(), false)
+    }
+
+    /// Encode an elliptic curve point from big endian serialized coordinates
+    /// (with optional point compression)
+    pub fn from_affine_coordinates(
+        x: &GenericArray<u8, Size>,
+        y: &GenericArray<u8, Size>,
+        compress: bool,
+    ) -> Self {
+        let tag = if compress {
+            Tag::compress_y(y.as_slice())
+        } else {
+            Tag::Uncompressed
+        };
+
+        let mut bytes = GenericArray::default();
+        bytes[0] = tag.into();
+        bytes[1..(Size::to_usize() + 1)].copy_from_slice(x);
+
+        if !compress {
+            bytes[(Size::to_usize() + 1)..].copy_from_slice(y);
+        }
+
+        Self { bytes }
+    }
+
+    /// Return [`EncodedPoint`] representing the additive identity
+    /// (a.k.a. point at infinity)
+    pub fn identity() -> Self {
+        Self::default()
+    }
+
+    /// Get the length of the encoded point in bytes
+    pub fn len(&self) -> usize {
+        self.tag().message_len(Size::to_usize())
+    }
+
+    /// Get byte slice containing the serialized [`EncodedPoint`].
+    pub fn as_bytes(&self) -> &[u8] {
+        &self.bytes[..self.len()]
+    }
+
+    /// Get boxed byte slice containing the serialized [`EncodedPoint`]
+    #[cfg(feature = "alloc")]
+    pub fn to_bytes(&self) -> Box<[u8]> {
+        self.as_bytes().to_vec().into_boxed_slice()
+    }
+
+    /// Is this [`EncodedPoint`] compact?
+    pub fn is_compact(&self) -> bool {
+        self.tag().is_compact()
+    }
+
+    /// Is this [`EncodedPoint`] compressed?
+    pub fn is_compressed(&self) -> bool {
+        self.tag().is_compressed()
+    }
+
+    /// Is this [`EncodedPoint`] the additive identity? (a.k.a. point at infinity)
+    pub fn is_identity(&self) -> bool {
+        self.tag().is_identity()
+    }
+
+    /// Compress this [`EncodedPoint`], returning a new [`EncodedPoint`].
+    pub fn compress(&self) -> Self {
+        match self.coordinates() {
+            Coordinates::Compressed { .. }
+            | Coordinates::Compact { .. }
+            | Coordinates::Identity => self.clone(),
+            Coordinates::Uncompressed { x, y } => Self::from_affine_coordinates(x, y, true),
+        }
+    }
+
+    /// Get the SEC1 tag for this [`EncodedPoint`]
+    pub fn tag(&self) -> Tag {
+        // Tag is ensured valid by the constructor
+        Tag::from_u8(self.bytes[0]).expect("invalid tag")
+    }
+
+    /// Get the [`Coordinates`] for this [`EncodedPoint`].
+    #[inline]
+    pub fn coordinates(&self) -> Coordinates<'_, Size> {
+        if self.is_identity() {
+            return Coordinates::Identity;
+        }
+
+        let (x, y) = self.bytes[1..].split_at(Size::to_usize());
+
+        if self.is_compressed() {
+            Coordinates::Compressed {
+                x: x.into(),
+                y_is_odd: self.tag() as u8 & 1 == 1,
+            }
+        } else if self.is_compact() {
+            Coordinates::Compact { x: x.into() }
+        } else {
+            Coordinates::Uncompressed {
+                x: x.into(),
+                y: y.into(),
+            }
+        }
+    }
+
+    /// Get the x-coordinate for this [`EncodedPoint`].
+    ///
+    /// Returns `None` if this point is the identity point.
+    pub fn x(&self) -> Option<&GenericArray<u8, Size>> {
+        match self.coordinates() {
+            Coordinates::Identity => None,
+            Coordinates::Compressed { x, .. } => Some(x),
+            Coordinates::Uncompressed { x, .. } => Some(x),
+            Coordinates::Compact { x } => Some(x),
+        }
+    }
+
+    /// Get the y-coordinate for this [`EncodedPoint`].
+    ///
+    /// Returns `None` if this point is compressed or the identity point.
+    pub fn y(&self) -> Option<&GenericArray<u8, Size>> {
+        match self.coordinates() {
+            Coordinates::Compressed { .. } | Coordinates::Identity => None,
+            Coordinates::Uncompressed { y, .. } => Some(y),
+            Coordinates::Compact { .. } => None,
+        }
+    }
+}
+
+impl<Size> AsRef<[u8]> for EncodedPoint<Size>
+where
+    Size: ModulusSize,
+{
+    #[inline]
+    fn as_ref(&self) -> &[u8] {
+        self.as_bytes()
+    }
+}
+
+#[cfg(feature = "subtle")]
+impl<Size> ConditionallySelectable for EncodedPoint<Size>
+where
+    Size: ModulusSize,
+    <Size::UncompressedPointSize as ArrayLength<u8>>::ArrayType: Copy,
+{
+    fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self {
+        let mut bytes = GenericArray::default();
+
+        for (i, byte) in bytes.iter_mut().enumerate() {
+            *byte = u8::conditional_select(&a.bytes[i], &b.bytes[i], choice);
+        }
+
+        Self { bytes }
+    }
+}
+
+impl<Size> Copy for EncodedPoint<Size>
+where
+    Size: ModulusSize,
+    <Size::UncompressedPointSize as ArrayLength<u8>>::ArrayType: Copy,
+{
+}
+
+impl<Size> Debug for EncodedPoint<Size>
+where
+    Size: ModulusSize,
+{
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(f, "EncodedPoint({:?})", self.coordinates())
+    }
+}
+
+impl<Size: ModulusSize> Eq for EncodedPoint<Size> {}
+
+impl<Size> PartialEq for EncodedPoint<Size>
+where
+    Size: ModulusSize,
+{
+    fn eq(&self, other: &Self) -> bool {
+        self.as_bytes() == other.as_bytes()
+    }
+}
+
+impl<Size> Hash for EncodedPoint<Size>
+where
+    Size: ModulusSize,
+{
+    fn hash<H: Hasher>(&self, state: &mut H) {
+        self.as_bytes().hash(state)
+    }
+}
+
+impl<Size: ModulusSize> PartialOrd for EncodedPoint<Size>
+where
+    Size: ModulusSize,
+{
+    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+        Some(self.cmp(other))
+    }
+}
+
+impl<Size: ModulusSize> Ord for EncodedPoint<Size>
+where
+    Size: ModulusSize,
+{
+    fn cmp(&self, other: &Self) -> Ordering {
+        self.as_bytes().cmp(other.as_bytes())
+    }
+}
+
+impl<Size: ModulusSize> TryFrom<&[u8]> for EncodedPoint<Size>
+where
+    Size: ModulusSize,
+{
+    type Error = Error;
+
+    fn try_from(bytes: &[u8]) -> Result<Self> {
+        Self::from_bytes(bytes)
+    }
+}
+
+#[cfg(feature = "zeroize")]
+impl<Size> Zeroize for EncodedPoint<Size>
+where
+    Size: ModulusSize,
+{
+    fn zeroize(&mut self) {
+        self.bytes.zeroize();
+        *self = Self::identity();
+    }
+}
+
+impl<Size> fmt::Display for EncodedPoint<Size>
+where
+    Size: ModulusSize,
+{
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(f, "{:X}", self)
+    }
+}
+
+impl<Size> fmt::LowerHex for EncodedPoint<Size>
+where
+    Size: ModulusSize,
+{
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(f, "{:x}", HexDisplay(self.as_bytes()))
+    }
+}
+
+impl<Size> fmt::UpperHex for EncodedPoint<Size>
+where
+    Size: ModulusSize,
+{
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(f, "{:X}", HexDisplay(self.as_bytes()))
+    }
+}
+
+/// Decode a SEC1-encoded point from hexadecimal.
+///
+/// Upper and lower case hexadecimal are both accepted, however mixed case is
+/// rejected.
+impl<Size> str::FromStr for EncodedPoint<Size>
+where
+    Size: ModulusSize,
+{
+    type Err = Error;
+
+    fn from_str(hex: &str) -> Result<Self> {
+        let mut buf = GenericArray::<u8, Size::UncompressedPointSize>::default();
+        base16ct::mixed::decode(hex, &mut buf)
+            .map_err(|_| Error::PointEncoding)
+            .and_then(Self::from_bytes)
+    }
+}
+
+#[cfg(feature = "serde")]
+impl<Size> Serialize for EncodedPoint<Size>
+where
+    Size: ModulusSize,
+{
+    fn serialize<S>(&self, serializer: S) -> core::result::Result<S::Ok, S::Error>
+    where
+        S: ser::Serializer,
+    {
+        serdect::slice::serialize_hex_upper_or_bin(&self.as_bytes(), serializer)
+    }
+}
+
+#[cfg(feature = "serde")]
+impl<'de, Size> Deserialize<'de> for EncodedPoint<Size>
+where
+    Size: ModulusSize,
+{
+    fn deserialize<D>(deserializer: D) -> core::result::Result<Self, D::Error>
+    where
+        D: de::Deserializer<'de>,
+    {
+        let bytes = serdect::slice::deserialize_hex_or_bin_vec(deserializer)?;
+        Self::from_bytes(bytes).map_err(de::Error::custom)
+    }
+}
+
+/// Enum representing the coordinates of either compressed or uncompressed
+/// SEC1-encoded elliptic curve points.
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+pub enum Coordinates<'a, Size: ModulusSize> {
+    /// Identity point (a.k.a. point at infinity)
+    Identity,
+
+    /// Compact curve point
+    Compact {
+        /// x-coordinate
+        x: &'a GenericArray<u8, Size>,
+    },
+
+    /// Compressed curve point
+    Compressed {
+        /// x-coordinate
+        x: &'a GenericArray<u8, Size>,
+
+        /// Is the y-coordinate odd?
+        y_is_odd: bool,
+    },
+
+    /// Uncompressed curve point
+    Uncompressed {
+        /// x-coordinate
+        x: &'a GenericArray<u8, Size>,
+
+        /// y-coordinate
+        y: &'a GenericArray<u8, Size>,
+    },
+}
+
+impl<'a, Size: ModulusSize> Coordinates<'a, Size> {
+    /// Get the tag octet needed to encode this set of [`Coordinates`]
+    pub fn tag(&self) -> Tag {
+        match self {
+            Coordinates::Compact { .. } => Tag::Compact,
+            Coordinates::Compressed { y_is_odd, .. } => {
+                if *y_is_odd {
+                    Tag::CompressedOddY
+                } else {
+                    Tag::CompressedEvenY
+                }
+            }
+            Coordinates::Identity => Tag::Identity,
+            Coordinates::Uncompressed { .. } => Tag::Uncompressed,
+        }
+    }
+}
+
+/// Tag byte used by the `Elliptic-Curve-Point-to-Octet-String` encoding.
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+#[repr(u8)]
+pub enum Tag {
+    /// Identity point (`0x00`)
+    Identity = 0,
+
+    /// Compressed point with even y-coordinate (`0x02`)
+    CompressedEvenY = 2,
+
+    /// Compressed point with odd y-coordinate (`0x03`)
+    CompressedOddY = 3,
+
+    /// Uncompressed point (`0x04`)
+    Uncompressed = 4,
+
+    /// Compact point (`0x05`)
+    Compact = 5,
+}
+
+impl Tag {
+    /// Parse a tag value from a byte
+    pub fn from_u8(byte: u8) -> Result<Self> {
+        match byte {
+            0 => Ok(Tag::Identity),
+            2 => Ok(Tag::CompressedEvenY),
+            3 => Ok(Tag::CompressedOddY),
+            4 => Ok(Tag::Uncompressed),
+            5 => Ok(Tag::Compact),
+            _ => Err(Error::PointEncoding),
+        }
+    }
+
+    /// Is this point compact?
+    pub fn is_compact(self) -> bool {
+        matches!(self, Tag::Compact)
+    }
+
+    /// Is this point compressed?
+    pub fn is_compressed(self) -> bool {
+        matches!(self, Tag::CompressedEvenY | Tag::CompressedOddY)
+    }
+
+    /// Is this point the identity point?
+    pub fn is_identity(self) -> bool {
+        self == Tag::Identity
+    }
+
+    /// Compute the expected total message length for a message prefixed
+    /// with this tag (including the tag byte), given the field element size
+    /// (in bytes) for a particular elliptic curve.
+    pub fn message_len(self, field_element_size: usize) -> usize {
+        1 + match self {
+            Tag::Identity => 0,
+            Tag::CompressedEvenY | Tag::CompressedOddY => field_element_size,
+            Tag::Uncompressed => field_element_size * 2,
+            Tag::Compact => field_element_size,
+        }
+    }
+
+    /// Compress the given y-coordinate, returning a `Tag::Compressed*` value
+    fn compress_y(y: &[u8]) -> Self {
+        // Is the y-coordinate odd in the SEC1 sense: `self mod 2 == 1`?
+        if y.as_ref().last().expect("empty y-coordinate") & 1 == 1 {
+            Tag::CompressedOddY
+        } else {
+            Tag::CompressedEvenY
+        }
+    }
+}
+
+impl TryFrom<u8> for Tag {
+    type Error = Error;
+
+    fn try_from(byte: u8) -> Result<Self> {
+        Self::from_u8(byte)
+    }
+}
+
+impl From<Tag> for u8 {
+    fn from(tag: Tag) -> u8 {
+        tag as u8
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::{Coordinates, Tag};
+    use core::str::FromStr;
+    use generic_array::{typenum::U32, GenericArray};
+    use hex_literal::hex;
+
+    #[cfg(feature = "alloc")]
+    use alloc::string::ToString;
+
+    #[cfg(feature = "subtle")]
+    use subtle::ConditionallySelectable;
+
+    type EncodedPoint = super::EncodedPoint<U32>;
+
+    /// Identity point
+    const IDENTITY_BYTES: [u8; 1] = [0];
+
+    /// Example uncompressed point
+    const UNCOMPRESSED_BYTES: [u8; 65] = hex!("0411111111111111111111111111111111111111111111111111111111111111112222222222222222222222222222222222222222222222222222222222222222");
+
+    /// Example compressed point: `UNCOMPRESSED_BYTES` after point compression
+    const COMPRESSED_BYTES: [u8; 33] =
+        hex!("021111111111111111111111111111111111111111111111111111111111111111");
+
+    #[test]
+    fn decode_compressed_point() {
+        // Even y-coordinate
+        let compressed_even_y_bytes =
+            hex!("020100000000000000000000000000000000000000000000000000000000000000");
+
+        let compressed_even_y = EncodedPoint::from_bytes(&compressed_even_y_bytes[..]).unwrap();
+
+        assert!(compressed_even_y.is_compressed());
+        assert_eq!(compressed_even_y.tag(), Tag::CompressedEvenY);
+        assert_eq!(compressed_even_y.len(), 33);
+        assert_eq!(compressed_even_y.as_bytes(), &compressed_even_y_bytes[..]);
+
+        assert_eq!(
+            compressed_even_y.coordinates(),
+            Coordinates::Compressed {
+                x: &hex!("0100000000000000000000000000000000000000000000000000000000000000").into(),
+                y_is_odd: false
+            }
+        );
+
+        assert_eq!(
+            compressed_even_y.x().unwrap(),
+            &hex!("0100000000000000000000000000000000000000000000000000000000000000").into()
+        );
+        assert_eq!(compressed_even_y.y(), None);
+
+        // Odd y-coordinate
+        let compressed_odd_y_bytes =
+            hex!("030200000000000000000000000000000000000000000000000000000000000000");
+
+        let compressed_odd_y = EncodedPoint::from_bytes(&compressed_odd_y_bytes[..]).unwrap();
+
+        assert!(compressed_odd_y.is_compressed());
+        assert_eq!(compressed_odd_y.tag(), Tag::CompressedOddY);
+        assert_eq!(compressed_odd_y.len(), 33);
+        assert_eq!(compressed_odd_y.as_bytes(), &compressed_odd_y_bytes[..]);
+
+        assert_eq!(
+            compressed_odd_y.coordinates(),
+            Coordinates::Compressed {
+                x: &hex!("0200000000000000000000000000000000000000000000000000000000000000").into(),
+                y_is_odd: true
+            }
+        );
+
+        assert_eq!(
+            compressed_odd_y.x().unwrap(),
+            &hex!("0200000000000000000000000000000000000000000000000000000000000000").into()
+        );
+        assert_eq!(compressed_odd_y.y(), None);
+    }
+
+    #[test]
+    fn decode_uncompressed_point() {
+        let uncompressed_point = EncodedPoint::from_bytes(&UNCOMPRESSED_BYTES[..]).unwrap();
+
+        assert!(!uncompressed_point.is_compressed());
+        assert_eq!(uncompressed_point.tag(), Tag::Uncompressed);
+        assert_eq!(uncompressed_point.len(), 65);
+        assert_eq!(uncompressed_point.as_bytes(), &UNCOMPRESSED_BYTES[..]);
+
+        assert_eq!(
+            uncompressed_point.coordinates(),
+            Coordinates::Uncompressed {
+                x: &hex!("1111111111111111111111111111111111111111111111111111111111111111").into(),
+                y: &hex!("2222222222222222222222222222222222222222222222222222222222222222").into()
+            }
+        );
+
+        assert_eq!(
+            uncompressed_point.x().unwrap(),
+            &hex!("1111111111111111111111111111111111111111111111111111111111111111").into()
+        );
+        assert_eq!(
+            uncompressed_point.y().unwrap(),
+            &hex!("2222222222222222222222222222222222222222222222222222222222222222").into()
+        );
+    }
+
+    #[test]
+    fn decode_identity() {
+        let identity_point = EncodedPoint::from_bytes(&IDENTITY_BYTES[..]).unwrap();
+        assert!(identity_point.is_identity());
+        assert_eq!(identity_point.tag(), Tag::Identity);
+        assert_eq!(identity_point.len(), 1);
+        assert_eq!(identity_point.as_bytes(), &IDENTITY_BYTES[..]);
+        assert_eq!(identity_point.coordinates(), Coordinates::Identity);
+        assert_eq!(identity_point.x(), None);
+        assert_eq!(identity_point.y(), None);
+    }
+
+    #[test]
+    fn decode_invalid_tag() {
+        let mut compressed_bytes = COMPRESSED_BYTES;
+        let mut uncompressed_bytes = UNCOMPRESSED_BYTES;
+
+        for bytes in &mut [&mut compressed_bytes[..], &mut uncompressed_bytes[..]] {
+            for tag in 0..=0xFF {
+                // valid tags
+                if tag == 2 || tag == 3 || tag == 4 || tag == 5 {
+                    continue;
+                }
+
+                (*bytes)[0] = tag;
+                let decode_result = EncodedPoint::from_bytes(&*bytes);
+                assert!(decode_result.is_err());
+            }
+        }
+    }
+
+    #[test]
+    fn decode_truncated_point() {
+        for bytes in &[&COMPRESSED_BYTES[..], &UNCOMPRESSED_BYTES[..]] {
+            for len in 0..bytes.len() {
+                let decode_result = EncodedPoint::from_bytes(&bytes[..len]);
+                assert!(decode_result.is_err());
+            }
+        }
+    }
+
+    #[test]
+    fn from_untagged_point() {
+        let untagged_bytes = hex!("11111111111111111111111111111111111111111111111111111111111111112222222222222222222222222222222222222222222222222222222222222222");
+        let uncompressed_point =
+            EncodedPoint::from_untagged_bytes(GenericArray::from_slice(&untagged_bytes[..]));
+        assert_eq!(uncompressed_point.as_bytes(), &UNCOMPRESSED_BYTES[..]);
+    }
+
+    #[test]
+    fn from_affine_coordinates() {
+        let x = hex!("1111111111111111111111111111111111111111111111111111111111111111");
+        let y = hex!("2222222222222222222222222222222222222222222222222222222222222222");
+
+        let uncompressed_point = EncodedPoint::from_affine_coordinates(&x.into(), &y.into(), false);
+        assert_eq!(uncompressed_point.as_bytes(), &UNCOMPRESSED_BYTES[..]);
+
+        let compressed_point = EncodedPoint::from_affine_coordinates(&x.into(), &y.into(), true);
+        assert_eq!(compressed_point.as_bytes(), &COMPRESSED_BYTES[..]);
+    }
+
+    #[test]
+    fn compress() {
+        let uncompressed_point = EncodedPoint::from_bytes(&UNCOMPRESSED_BYTES[..]).unwrap();
+        let compressed_point = uncompressed_point.compress();
+        assert_eq!(compressed_point.as_bytes(), &COMPRESSED_BYTES[..]);
+    }
+
+    #[cfg(feature = "subtle")]
+    #[test]
+    fn conditional_select() {
+        let a = EncodedPoint::from_bytes(&COMPRESSED_BYTES[..]).unwrap();
+        let b = EncodedPoint::from_bytes(&UNCOMPRESSED_BYTES[..]).unwrap();
+
+        let a_selected = EncodedPoint::conditional_select(&a, &b, 0.into());
+        assert_eq!(a, a_selected);
+
+        let b_selected = EncodedPoint::conditional_select(&a, &b, 1.into());
+        assert_eq!(b, b_selected);
+    }
+
+    #[test]
+    fn identity() {
+        let identity_point = EncodedPoint::identity();
+        assert_eq!(identity_point.tag(), Tag::Identity);
+        assert_eq!(identity_point.len(), 1);
+        assert_eq!(identity_point.as_bytes(), &IDENTITY_BYTES[..]);
+
+        // identity is default
+        assert_eq!(identity_point, EncodedPoint::default());
+    }
+
+    #[test]
+    fn decode_hex() {
+        let point = EncodedPoint::from_str(
+            "021111111111111111111111111111111111111111111111111111111111111111",
+        )
+        .unwrap();
+        assert_eq!(point.as_bytes(), COMPRESSED_BYTES);
+    }
+
+    #[cfg(feature = "alloc")]
+    #[test]
+    fn to_bytes() {
+        let uncompressed_point = EncodedPoint::from_bytes(&UNCOMPRESSED_BYTES[..]).unwrap();
+        assert_eq!(&*uncompressed_point.to_bytes(), &UNCOMPRESSED_BYTES[..]);
+    }
+
+    #[cfg(feature = "alloc")]
+    #[test]
+    fn to_string() {
+        let point = EncodedPoint::from_bytes(&COMPRESSED_BYTES[..]).unwrap();
+        assert_eq!(
+            point.to_string(),
+            "021111111111111111111111111111111111111111111111111111111111111111"
+        );
+    }
+}
diff --git a/crates/sec1/src/private_key.rs b/crates/sec1/src/private_key.rs
new file mode 100644
index 0000000..5315799
--- /dev/null
+++ b/crates/sec1/src/private_key.rs
@@ -0,0 +1,177 @@
+//! SEC1 elliptic curve private key support.
+//!
+//! Support for ASN.1 DER-encoded elliptic curve private keys as described in
+//! SEC1: Elliptic Curve Cryptography (Version 2.0) Appendix C.4 (p.108):
+//!
+//! <https://www.secg.org/sec1-v2.pdf>
+
+use crate::{EcParameters, Error, Result};
+use core::fmt;
+use der::{
+    asn1::{BitStringRef, ContextSpecific, ContextSpecificRef, OctetStringRef},
+    Decode, DecodeValue, Encode, EncodeValue, Header, Length, Reader, Sequence, Tag, TagMode,
+    TagNumber, Writer,
+};
+
+#[cfg(all(feature = "alloc", feature = "zeroize"))]
+use der::SecretDocument;
+
+#[cfg(feature = "pem")]
+use der::pem::PemLabel;
+
+/// `ECPrivateKey` version.
+///
+/// From [RFC5913 Section 3]:
+/// > version specifies the syntax version number of the elliptic curve
+/// > private key structure.  For this version of the document, it SHALL
+/// > be set to ecPrivkeyVer1, which is of type INTEGER and whose value
+/// > is one (1).
+///
+/// [RFC5915 Section 3]: https://datatracker.ietf.org/doc/html/rfc5915#section-3
+const VERSION: u8 = 1;
+
+/// Context-specific tag number for the elliptic curve parameters.
+const EC_PARAMETERS_TAG: TagNumber = TagNumber::new(0);
+
+/// Context-specific tag number for the public key.
+const PUBLIC_KEY_TAG: TagNumber = TagNumber::new(1);
+
+/// SEC1 elliptic curve private key.
+///
+/// Described in [SEC1: Elliptic Curve Cryptography (Version 2.0)]
+/// Appendix C.4 (p.108) and also [RFC5915 Section 3]:
+///
+/// ```text
+/// ECPrivateKey ::= SEQUENCE {
+///   version        INTEGER { ecPrivkeyVer1(1) } (ecPrivkeyVer1),
+///   privateKey     OCTET STRING,
+///   parameters [0] ECParameters {{ NamedCurve }} OPTIONAL,
+///   publicKey  [1] BIT STRING OPTIONAL
+/// }
+/// ```
+///
+/// When encoded as PEM (text), keys in this format begin with the following:
+///
+/// ```text
+/// -----BEGIN EC PRIVATE KEY-----
+/// ```
+///
+/// [SEC1: Elliptic Curve Cryptography (Version 2.0)]: https://www.secg.org/sec1-v2.pdf
+/// [RFC5915 Section 3]: https://datatracker.ietf.org/doc/html/rfc5915#section-3
+#[derive(Clone)]
+pub struct EcPrivateKey<'a> {
+    /// Private key data.
+    pub private_key: &'a [u8],
+
+    /// Elliptic curve parameters.
+    pub parameters: Option<EcParameters>,
+
+    /// Public key data, optionally available if version is V2.
+    pub public_key: Option<&'a [u8]>,
+}
+
+impl<'a> EcPrivateKey<'a> {
+    fn context_specific_parameters(&self) -> Option<ContextSpecificRef<'_, EcParameters>> {
+        self.parameters.as_ref().map(|params| ContextSpecificRef {
+            tag_number: EC_PARAMETERS_TAG,
+            tag_mode: TagMode::Explicit,
+            value: params,
+        })
+    }
+
+    fn context_specific_public_key(
+        &self,
+    ) -> der::Result<Option<ContextSpecific<BitStringRef<'a>>>> {
+        self.public_key
+            .map(|pk| {
+                BitStringRef::from_bytes(pk).map(|value| ContextSpecific {
+                    tag_number: PUBLIC_KEY_TAG,
+                    tag_mode: TagMode::Explicit,
+                    value,
+                })
+            })
+            .transpose()
+    }
+}
+
+impl<'a> DecodeValue<'a> for EcPrivateKey<'a> {
+    fn decode_value<R: Reader<'a>>(reader: &mut R, header: Header) -> der::Result<Self> {
+        reader.read_nested(header.length, |reader| {
+            if u8::decode(reader)? != VERSION {
+                return Err(der::Tag::Integer.value_error());
+            }
+
+            let private_key = OctetStringRef::decode(reader)?.as_bytes();
+            let parameters = reader.context_specific(EC_PARAMETERS_TAG, TagMode::Explicit)?;
+            let public_key = reader
+                .context_specific::<BitStringRef<'_>>(PUBLIC_KEY_TAG, TagMode::Explicit)?
+                .map(|bs| bs.as_bytes().ok_or_else(|| Tag::BitString.value_error()))
+                .transpose()?;
+
+            Ok(EcPrivateKey {
+                private_key,
+                parameters,
+                public_key,
+            })
+        })
+    }
+}
+
+impl EncodeValue for EcPrivateKey<'_> {
+    fn value_len(&self) -> der::Result<Length> {
+        VERSION.encoded_len()?
+            + OctetStringRef::new(self.private_key)?.encoded_len()?
+            + self.context_specific_parameters().encoded_len()?
+            + self.context_specific_public_key()?.encoded_len()?
+    }
+
+    fn encode_value(&self, writer: &mut impl Writer) -> der::Result<()> {
+        VERSION.encode(writer)?;
+        OctetStringRef::new(self.private_key)?.encode(writer)?;
+        self.context_specific_parameters().encode(writer)?;
+        self.context_specific_public_key()?.encode(writer)?;
+        Ok(())
+    }
+}
+
+impl<'a> Sequence<'a> for EcPrivateKey<'a> {}
+
+impl<'a> TryFrom<&'a [u8]> for EcPrivateKey<'a> {
+    type Error = Error;
+
+    fn try_from(bytes: &'a [u8]) -> Result<EcPrivateKey<'a>> {
+        Ok(Self::from_der(bytes)?)
+    }
+}
+
+impl<'a> fmt::Debug for EcPrivateKey<'a> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        f.debug_struct("EcPrivateKey")
+            .field("parameters", &self.parameters)
+            .field("public_key", &self.public_key)
+            .finish_non_exhaustive()
+    }
+}
+
+#[cfg(feature = "alloc")]
+impl TryFrom<EcPrivateKey<'_>> for SecretDocument {
+    type Error = Error;
+
+    fn try_from(private_key: EcPrivateKey<'_>) -> Result<Self> {
+        SecretDocument::try_from(&private_key)
+    }
+}
+
+#[cfg(feature = "alloc")]
+impl TryFrom<&EcPrivateKey<'_>> for SecretDocument {
+    type Error = Error;
+
+    fn try_from(private_key: &EcPrivateKey<'_>) -> Result<Self> {
+        Ok(Self::encode_msg(private_key)?)
+    }
+}
+
+#[cfg(feature = "pem")]
+impl PemLabel for EcPrivateKey<'_> {
+    const PEM_LABEL: &'static str = "EC PRIVATE KEY";
+}
diff --git a/crates/sec1/src/traits.rs b/crates/sec1/src/traits.rs
new file mode 100644
index 0000000..304019e
--- /dev/null
+++ b/crates/sec1/src/traits.rs
@@ -0,0 +1,122 @@
+//! Traits for parsing objects from SEC1 encoded documents
+
+use crate::Result;
+
+#[cfg(feature = "alloc")]
+use der::SecretDocument;
+
+#[cfg(feature = "pem")]
+use {crate::LineEnding, alloc::string::String, der::pem::PemLabel};
+
+#[cfg(feature = "pkcs8")]
+use {
+    crate::{EcPrivateKey, ALGORITHM_OID},
+    der::Decode,
+};
+
+#[cfg(feature = "std")]
+use std::path::Path;
+
+#[cfg(feature = "pem")]
+use zeroize::Zeroizing;
+
+/// Parse an [`EcPrivateKey`] from a SEC1-encoded document.
+pub trait DecodeEcPrivateKey: Sized {
+    /// Deserialize SEC1 private key from ASN.1 DER-encoded data
+    /// (binary format).
+    fn from_sec1_der(bytes: &[u8]) -> Result<Self>;
+
+    /// Deserialize SEC1-encoded private key from PEM.
+    ///
+    /// Keys in this format begin with the following:
+    ///
+    /// ```text
+    /// -----BEGIN EC PRIVATE KEY-----
+    /// ```
+    #[cfg(feature = "pem")]
+    fn from_sec1_pem(s: &str) -> Result<Self> {
+        let (label, doc) = SecretDocument::from_pem(s)?;
+        EcPrivateKey::validate_pem_label(label)?;
+        Self::from_sec1_der(doc.as_bytes())
+    }
+
+    /// Load SEC1 private key from an ASN.1 DER-encoded file on the local
+    /// filesystem (binary format).
+    #[cfg(feature = "std")]
+    fn read_sec1_der_file(path: impl AsRef<Path>) -> Result<Self> {
+        Self::from_sec1_der(SecretDocument::read_der_file(path)?.as_bytes())
+    }
+
+    /// Load SEC1 private key from a PEM-encoded file on the local filesystem.
+    #[cfg(all(feature = "pem", feature = "std"))]
+    fn read_sec1_pem_file(path: impl AsRef<Path>) -> Result<Self> {
+        let (label, doc) = SecretDocument::read_pem_file(path)?;
+        EcPrivateKey::validate_pem_label(&label)?;
+        Self::from_sec1_der(doc.as_bytes())
+    }
+}
+
+/// Serialize a [`EcPrivateKey`] to a SEC1 encoded document.
+#[cfg(feature = "alloc")]
+pub trait EncodeEcPrivateKey {
+    /// Serialize a [`SecretDocument`] containing a SEC1-encoded private key.
+    fn to_sec1_der(&self) -> Result<SecretDocument>;
+
+    /// Serialize this private key as PEM-encoded SEC1 with the given [`LineEnding`].
+    ///
+    /// To use the OS's native line endings, pass `Default::default()`.
+    #[cfg(feature = "pem")]
+    fn to_sec1_pem(&self, line_ending: LineEnding) -> Result<Zeroizing<String>> {
+        let doc = self.to_sec1_der()?;
+        Ok(doc.to_pem(EcPrivateKey::PEM_LABEL, line_ending)?)
+    }
+
+    /// Write ASN.1 DER-encoded SEC1 private key to the given path.
+    #[cfg(feature = "std")]
+    fn write_sec1_der_file(&self, path: impl AsRef<Path>) -> Result<()> {
+        Ok(self.to_sec1_der()?.write_der_file(path)?)
+    }
+
+    /// Write ASN.1 DER-encoded SEC1 private key to the given path.
+    #[cfg(all(feature = "pem", feature = "std"))]
+    fn write_sec1_pem_file(&self, path: impl AsRef<Path>, line_ending: LineEnding) -> Result<()> {
+        let doc = self.to_sec1_der()?;
+        Ok(doc.write_pem_file(path, EcPrivateKey::PEM_LABEL, line_ending)?)
+    }
+}
+
+#[cfg(feature = "pkcs8")]
+impl<T> DecodeEcPrivateKey for T
+where
+    T: for<'a> TryFrom<pkcs8::PrivateKeyInfo<'a>, Error = pkcs8::Error>,
+{
+    fn from_sec1_der(private_key: &[u8]) -> Result<Self> {
+        let params_oid = EcPrivateKey::from_der(private_key)?
+            .parameters
+            .and_then(|params| params.named_curve());
+
+        let algorithm = pkcs8::AlgorithmIdentifierRef {
+            oid: ALGORITHM_OID,
+            parameters: params_oid.as_ref().map(Into::into),
+        };
+
+        Ok(Self::try_from(pkcs8::PrivateKeyInfo {
+            algorithm,
+            private_key,
+            public_key: None,
+        })?)
+    }
+}
+
+#[cfg(all(feature = "alloc", feature = "pkcs8"))]
+impl<T: pkcs8::EncodePrivateKey> EncodeEcPrivateKey for T {
+    fn to_sec1_der(&self) -> Result<SecretDocument> {
+        let doc = self.to_pkcs8_der()?;
+        let pkcs8_key = pkcs8::PrivateKeyInfo::from_der(doc.as_bytes())?;
+        pkcs8_key.algorithm.assert_algorithm_oid(ALGORITHM_OID)?;
+
+        let mut pkcs1_key = EcPrivateKey::from_der(pkcs8_key.private_key)?;
+        pkcs1_key.parameters = Some(pkcs8_key.algorithm.parameters_oid()?.into());
+        pkcs1_key.try_into()
+    }
+}
diff --git a/crates/sec1/tests/examples/p256-priv.der b/crates/sec1/tests/examples/p256-priv.der
new file mode 100644
index 0000000..c8528c3
--- /dev/null
+++ b/crates/sec1/tests/examples/p256-priv.der
Binary files differ
diff --git a/crates/sec1/tests/examples/p256-priv.pem b/crates/sec1/tests/examples/p256-priv.pem
new file mode 100644
index 0000000..d5a1c1a
--- /dev/null
+++ b/crates/sec1/tests/examples/p256-priv.pem
@@ -0,0 +1,5 @@
+-----BEGIN EC PRIVATE KEY-----
+MHcCAQEEIGliQXFWGmM0DeDn2GnyoFSSVY4aBIaLap+FSoZniBiNoAoGCCqGSM49
+AwEHoUQDQgAEHKz/tV8vLO/YnYnrN0smgRUkUoAt7qCZFgaBN9g5z3/EgaREkjBN
+fvZqwRe+/oOo0I8VXytS+fYY3URwKQSODw==
+-----END EC PRIVATE KEY-----
diff --git a/crates/sec1/tests/private_key.rs b/crates/sec1/tests/private_key.rs
new file mode 100644
index 0000000..224a947
--- /dev/null
+++ b/crates/sec1/tests/private_key.rs
@@ -0,0 +1,43 @@
+//! SEC1 private key tests
+
+#![cfg(feature = "der")]
+
+use der::asn1::ObjectIdentifier;
+use hex_literal::hex;
+use sec1::{EcParameters, EcPrivateKey};
+
+#[cfg(feature = "alloc")]
+use der::Encode;
+
+/// NIST P-256 SEC1 private key encoded as ASN.1 DER.
+///
+/// Note: this key is extracted from the corresponding `p256-priv.der`
+/// example key in the `pkcs8` crate.
+const P256_DER_EXAMPLE: &[u8] = include_bytes!("examples/p256-priv.der");
+
+#[test]
+fn decode_p256_der() {
+    let key = EcPrivateKey::try_from(P256_DER_EXAMPLE).unwrap();
+
+    // Extracted using:
+    // $ openssl asn1parse -in tests/examples/p256-priv.pem
+    assert_eq!(
+        key.private_key,
+        hex!("69624171561A63340DE0E7D869F2A05492558E1A04868B6A9F854A866788188D")
+    );
+    assert_eq!(
+        key.parameters,
+        Some(EcParameters::NamedCurve(
+            ObjectIdentifier::new("1.2.840.10045.3.1.7").unwrap()
+        ))
+    );
+    assert_eq!(key.public_key, Some(hex!("041CACFFB55F2F2CEFD89D89EB374B2681152452802DEEA09916068137D839CF7FC481A44492304D7EF66AC117BEFE83A8D08F155F2B52F9F618DD447029048E0F").as_ref()));
+}
+
+#[cfg(feature = "alloc")]
+#[test]
+fn encode_p256_der() {
+    let key = EcPrivateKey::try_from(P256_DER_EXAMPLE).unwrap();
+    let key_encoded = key.to_der().unwrap();
+    assert_eq!(P256_DER_EXAMPLE, key_encoded);
+}
diff --git a/crates/sec1/tests/traits.rs b/crates/sec1/tests/traits.rs
new file mode 100644
index 0000000..ab6e09a
--- /dev/null
+++ b/crates/sec1/tests/traits.rs
@@ -0,0 +1,100 @@
+//! Tests for SEC1 encoding/decoding traits.
+
+#![cfg(any(feature = "pem", all(feature = "der", feature = "std")))]
+
+use der::SecretDocument;
+use sec1::{DecodeEcPrivateKey, EncodeEcPrivateKey, Result};
+
+#[cfg(feature = "pem")]
+use sec1::der::pem::LineEnding;
+
+#[cfg(feature = "std")]
+use tempfile::tempdir;
+
+#[cfg(all(feature = "pem", feature = "std"))]
+use std::fs;
+
+/// SEC1 `EcPrivateKey` encoded as ASN.1 DER
+const P256_DER_EXAMPLE: &[u8] = include_bytes!("examples/p256-priv.der");
+
+/// SEC1 `EcPrivateKey` encoded as PEM
+#[cfg(feature = "pem")]
+const P256_PEM_EXAMPLE: &str = include_str!("examples/p256-priv.pem");
+
+/// Mock private key type for testing trait impls against.
+pub struct MockPrivateKey(Vec<u8>);
+
+impl AsRef<[u8]> for MockPrivateKey {
+    fn as_ref(&self) -> &[u8] {
+        self.0.as_ref()
+    }
+}
+
+impl DecodeEcPrivateKey for MockPrivateKey {
+    fn from_sec1_der(bytes: &[u8]) -> Result<MockPrivateKey> {
+        Ok(MockPrivateKey(bytes.to_vec()))
+    }
+}
+
+impl EncodeEcPrivateKey for MockPrivateKey {
+    fn to_sec1_der(&self) -> Result<SecretDocument> {
+        Ok(SecretDocument::try_from(self.as_ref())?)
+    }
+}
+
+#[cfg(feature = "pem")]
+#[test]
+fn from_sec1_pem() {
+    let key = MockPrivateKey::from_sec1_pem(P256_PEM_EXAMPLE).unwrap();
+    assert_eq!(key.as_ref(), P256_DER_EXAMPLE);
+}
+
+#[cfg(feature = "std")]
+#[test]
+fn read_sec1_der_file() {
+    let key = MockPrivateKey::read_sec1_der_file("tests/examples/p256-priv.der").unwrap();
+    assert_eq!(key.as_ref(), P256_DER_EXAMPLE);
+}
+
+#[cfg(all(feature = "pem", feature = "std"))]
+#[test]
+fn read_sec1_pem_file() {
+    let key = MockPrivateKey::read_sec1_pem_file("tests/examples/p256-priv.pem").unwrap();
+    assert_eq!(key.as_ref(), P256_DER_EXAMPLE);
+}
+
+#[cfg(feature = "pem")]
+#[test]
+fn to_sec1_pem() {
+    let pem = MockPrivateKey(P256_DER_EXAMPLE.to_vec())
+        .to_sec1_pem(LineEnding::LF)
+        .unwrap();
+
+    assert_eq!(&*pem, P256_PEM_EXAMPLE);
+}
+
+#[cfg(feature = "std")]
+#[test]
+fn write_sec1_der_file() {
+    let dir = tempdir().unwrap();
+    let path = dir.path().join("example.der");
+    MockPrivateKey(P256_DER_EXAMPLE.to_vec())
+        .write_sec1_der_file(&path)
+        .unwrap();
+
+    let key = MockPrivateKey::read_sec1_der_file(&path).unwrap();
+    assert_eq!(key.as_ref(), P256_DER_EXAMPLE);
+}
+
+#[cfg(all(feature = "pem", feature = "std"))]
+#[test]
+fn write_sec1_pem_file() {
+    let dir = tempdir().unwrap();
+    let path = dir.path().join("example.pem");
+    MockPrivateKey(P256_DER_EXAMPLE.to_vec())
+        .write_sec1_pem_file(&path, LineEnding::LF)
+        .unwrap();
+
+    let pem = fs::read_to_string(path).unwrap();
+    assert_eq!(&pem, P256_PEM_EXAMPLE);
+}
diff --git a/crates/semver/.cargo-checksum.json b/crates/semver/.cargo-checksum.json
new file mode 100644
index 0000000..0652e35
--- /dev/null
+++ b/crates/semver/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.toml":"c1e3fce5c6ba7d099dd20efc20f68bfc3d937e63d87d0edcc84ad24829b9672b","LICENSE-APACHE":"62c7a1e35f56406896d7aa7ca52d0cc0d272ac022b5d2796e7d6905db8a3636a","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"de1a03443ab8f147676199856a975ec00f3f7334fc5d5d5e056ec8f3fcb61dd5","benches/parse.rs":"6531f66f80ce2fc83878f9bf84f94c42e96f1e709466f2b88be8d95a3cec1511","build.rs":"9a3d42e37b665745044b5d91c6e02dd458152e336a7013654972f4a1a0b562d9","src/backport.rs":"66db55d15d0e2808bffe4cde7cd1d99bda999b26cbe40bb6b5e43b94f9b631d2","src/display.rs":"9ba42f7a6579aa9c7dd72f2380036f5c9664592f3eacd09ea25cef291a3e64e5","src/error.rs":"895e11a9e454cab207415186b15a29dac1f0a5bf9b8dba7bd45da5bb5a6ace31","src/eval.rs":"b7e7ec976051b9f87ddf5cfdbaad64654d98d86ae0763f7d88b14eeaeac6013c","src/identifier.rs":"459725383cbd0e2d769aa947decd1f031bdc8732339783ad24eb2b44f0f5d040","src/impls.rs":"48baf552d28f6d0ce63da2d2d53a710222ed87834d44e8b0493a0827e1d718f2","src/lib.rs":"0b7e6389abeac2ce5fd7179eb5abfe120e52b9f7bf2b1e2717b061b54ab9a3e8","src/parse.rs":"920f8327a1ba0c16a28f518f2d50998faf1d6a8db379b92cb72976f176650f09","src/serde.rs":"e2a9b9dc3cd2cccc250eaffad049de418ef791bf8c4a34111a48f068353e0a37","tests/node/mod.rs":"2710d9b8daace2038b66db0f8f4cc522dee938e7cbc42d7739c31995343c32f4","tests/test_autotrait.rs":"2631b78c9c63f1829c814fa88fcab0c5d77eb4e2d9c0c418d50c33e12a511e90","tests/test_identifier.rs":"fba0428bf5c43c733d17ca30952822ed6e1af6de1e5ef11bd8fb46c6b10f65fd","tests/test_version.rs":"9587d2c397524e1d33499d9bb2725fe746406fa4d162713b7f56029942a4a84b","tests/test_version_req.rs":"1aa4ad4f58f1ec3226b334d0f82b6ebae732585dd9b2c35328b576a1b3638a8d","tests/util/mod.rs":"b35cd4e7fcc9a6fa2496d163d716155ddd379e027ef9516c1af05ce0cf605ae1"},"package":"b97ed7a9823b74f99c7742f5336af7be5ecd3eeafcb1507d1fa93347b1d589b0"}
\ No newline at end of file
diff --git a/crates/semver/Android.bp b/crates/semver/Android.bp
new file mode 100644
index 0000000..c55e571
--- /dev/null
+++ b/crates/semver/Android.bp
@@ -0,0 +1,35 @@
+// This file is generated by cargo_embargo.
+// Do not modify this file because the changes will be overridden on upgrade.
+
+package {
+    default_applicable_licenses: ["external_rust_crates_semver_license"],
+    default_team: "trendy_team_android_rust",
+}
+
+license {
+    name: "external_rust_crates_semver_license",
+    visibility: [":__subpackages__"],
+    license_kinds: ["SPDX-license-identifier-Apache-2.0"],
+    license_text: ["LICENSE"],
+}
+
+rust_library {
+    name: "libsemver",
+    crate_name: "semver",
+    cargo_env_compat: true,
+    cargo_pkg_version: "1.0.21",
+    crate_root: "src/lib.rs",
+    edition: "2018",
+    features: [
+        "default",
+        "serde",
+        "std",
+    ],
+    rustlibs: ["libserde"],
+    apex_available: [
+        "//apex_available:platform",
+        "com.android.virt",
+    ],
+    product_available: true,
+    vendor_available: true,
+}
diff --git a/crates/semver/Cargo.lock b/crates/semver/Cargo.lock
new file mode 100644
index 0000000..38f9da6
--- /dev/null
+++ b/crates/semver/Cargo.lock
@@ -0,0 +1,65 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+[[package]]
+name = "proc-macro2"
+version = "1.0.86"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "unicode-ident 1.0.12 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "quote"
+version = "1.0.37"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "proc-macro2 1.0.86 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "semver"
+version = "1.0.21"
+dependencies = [
+ "serde 1.0.209 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "serde"
+version = "1.0.209"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "serde_derive 1.0.209 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "serde_derive"
+version = "1.0.209"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "proc-macro2 1.0.86 (registry+https://github.com/rust-lang/crates.io-index)",
+ "quote 1.0.37 (registry+https://github.com/rust-lang/crates.io-index)",
+ "syn 2.0.76 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "syn"
+version = "2.0.76"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "proc-macro2 1.0.86 (registry+https://github.com/rust-lang/crates.io-index)",
+ "quote 1.0.37 (registry+https://github.com/rust-lang/crates.io-index)",
+ "unicode-ident 1.0.12 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "unicode-ident"
+version = "1.0.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[metadata]
+"checksum proc-macro2 1.0.86 (registry+https://github.com/rust-lang/crates.io-index)" = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77"
+"checksum quote 1.0.37 (registry+https://github.com/rust-lang/crates.io-index)" = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af"
+"checksum serde 1.0.209 (registry+https://github.com/rust-lang/crates.io-index)" = "99fce0ffe7310761ca6bf9faf5115afbc19688edd00171d81b1bb1b116c63e09"
+"checksum serde_derive 1.0.209 (registry+https://github.com/rust-lang/crates.io-index)" = "a5831b979fd7b5439637af1752d535ff49f4860c0f341d1baeb6faf0f4242170"
+"checksum syn 2.0.76 (registry+https://github.com/rust-lang/crates.io-index)" = "578e081a14e0cefc3279b0472138c513f37b41a08d5a3cca9b6e4e8ceb6cd525"
+"checksum unicode-ident 1.0.12 (registry+https://github.com/rust-lang/crates.io-index)" = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
diff --git a/crates/semver/Cargo.toml b/crates/semver/Cargo.toml
new file mode 100644
index 0000000..931c692
--- /dev/null
+++ b/crates/semver/Cargo.toml
@@ -0,0 +1,47 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2018"
+rust-version = "1.31"
+name = "semver"
+version = "1.0.21"
+authors = ["David Tolnay <dtolnay@gmail.com>"]
+description = "Parser and evaluator for Cargo's flavor of Semantic Versioning"
+documentation = "https://docs.rs/semver"
+readme = "README.md"
+keywords = ["cargo"]
+categories = [
+    "data-structures",
+    "no-std",
+]
+license = "MIT OR Apache-2.0"
+repository = "https://github.com/dtolnay/semver"
+
+[package.metadata.docs.rs]
+rustdoc-args = [
+    "--cfg",
+    "doc_cfg",
+    "--generate-link-to-definition",
+]
+targets = ["x86_64-unknown-linux-gnu"]
+
+[lib]
+doc-scrape-examples = false
+
+[dependencies.serde]
+version = "1.0.194"
+optional = true
+default-features = false
+
+[features]
+default = ["std"]
+std = []
diff --git a/crates/semver/LICENSE b/crates/semver/LICENSE
new file mode 120000
index 0000000..6b579aa
--- /dev/null
+++ b/crates/semver/LICENSE
@@ -0,0 +1 @@
+LICENSE-APACHE
\ No newline at end of file
diff --git a/crates/semver/LICENSE-APACHE b/crates/semver/LICENSE-APACHE
new file mode 100644
index 0000000..1b5ec8b
--- /dev/null
+++ b/crates/semver/LICENSE-APACHE
@@ -0,0 +1,176 @@
+                              Apache License
+                        Version 2.0, January 2004
+                     http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+   "License" shall mean the terms and conditions for use, reproduction,
+   and distribution as defined by Sections 1 through 9 of this document.
+
+   "Licensor" shall mean the copyright owner or entity authorized by
+   the copyright owner that is granting the License.
+
+   "Legal Entity" shall mean the union of the acting entity and all
+   other entities that control, are controlled by, or are under common
+   control with that entity. For the purposes of this definition,
+   "control" means (i) the power, direct or indirect, to cause the
+   direction or management of such entity, whether by contract or
+   otherwise, or (ii) ownership of fifty percent (50%) or more of the
+   outstanding shares, or (iii) beneficial ownership of such entity.
+
+   "You" (or "Your") shall mean an individual or Legal Entity
+   exercising permissions granted by this License.
+
+   "Source" form shall mean the preferred form for making modifications,
+   including but not limited to software source code, documentation
+   source, and configuration files.
+
+   "Object" form shall mean any form resulting from mechanical
+   transformation or translation of a Source form, including but
+   not limited to compiled object code, generated documentation,
+   and conversions to other media types.
+
+   "Work" shall mean the work of authorship, whether in Source or
+   Object form, made available under the License, as indicated by a
+   copyright notice that is included in or attached to the work
+   (an example is provided in the Appendix below).
+
+   "Derivative Works" shall mean any work, whether in Source or Object
+   form, that is based on (or derived from) the Work and for which the
+   editorial revisions, annotations, elaborations, or other modifications
+   represent, as a whole, an original work of authorship. For the purposes
+   of this License, Derivative Works shall not include works that remain
+   separable from, or merely link (or bind by name) to the interfaces of,
+   the Work and Derivative Works thereof.
+
+   "Contribution" shall mean any work of authorship, including
+   the original version of the Work and any modifications or additions
+   to that Work or Derivative Works thereof, that is intentionally
+   submitted to Licensor for inclusion in the Work by the copyright owner
+   or by an individual or Legal Entity authorized to submit on behalf of
+   the copyright owner. For the purposes of this definition, "submitted"
+   means any form of electronic, verbal, or written communication sent
+   to the Licensor or its representatives, including but not limited to
+   communication on electronic mailing lists, source code control systems,
+   and issue tracking systems that are managed by, or on behalf of, the
+   Licensor for the purpose of discussing and improving the Work, but
+   excluding communication that is conspicuously marked or otherwise
+   designated in writing by the copyright owner as "Not a Contribution."
+
+   "Contributor" shall mean Licensor and any individual or Legal Entity
+   on behalf of whom a Contribution has been received by Licensor and
+   subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+   this License, each Contributor hereby grants to You a perpetual,
+   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+   copyright license to reproduce, prepare Derivative Works of,
+   publicly display, publicly perform, sublicense, and distribute the
+   Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+   this License, each Contributor hereby grants to You a perpetual,
+   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+   (except as stated in this section) patent license to make, have made,
+   use, offer to sell, sell, import, and otherwise transfer the Work,
+   where such license applies only to those patent claims licensable
+   by such Contributor that are necessarily infringed by their
+   Contribution(s) alone or by combination of their Contribution(s)
+   with the Work to which such Contribution(s) was submitted. If You
+   institute patent litigation against any entity (including a
+   cross-claim or counterclaim in a lawsuit) alleging that the Work
+   or a Contribution incorporated within the Work constitutes direct
+   or contributory patent infringement, then any patent licenses
+   granted to You under this License for that Work shall terminate
+   as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+   Work or Derivative Works thereof in any medium, with or without
+   modifications, and in Source or Object form, provided that You
+   meet the following conditions:
+
+   (a) You must give any other recipients of the Work or
+       Derivative Works a copy of this License; and
+
+   (b) You must cause any modified files to carry prominent notices
+       stating that You changed the files; and
+
+   (c) You must retain, in the Source form of any Derivative Works
+       that You distribute, all copyright, patent, trademark, and
+       attribution notices from the Source form of the Work,
+       excluding those notices that do not pertain to any part of
+       the Derivative Works; and
+
+   (d) If the Work includes a "NOTICE" text file as part of its
+       distribution, then any Derivative Works that You distribute must
+       include a readable copy of the attribution notices contained
+       within such NOTICE file, excluding those notices that do not
+       pertain to any part of the Derivative Works, in at least one
+       of the following places: within a NOTICE text file distributed
+       as part of the Derivative Works; within the Source form or
+       documentation, if provided along with the Derivative Works; or,
+       within a display generated by the Derivative Works, if and
+       wherever such third-party notices normally appear. The contents
+       of the NOTICE file are for informational purposes only and
+       do not modify the License. You may add Your own attribution
+       notices within Derivative Works that You distribute, alongside
+       or as an addendum to the NOTICE text from the Work, provided
+       that such additional attribution notices cannot be construed
+       as modifying the License.
+
+   You may add Your own copyright statement to Your modifications and
+   may provide additional or different license terms and conditions
+   for use, reproduction, or distribution of Your modifications, or
+   for any such Derivative Works as a whole, provided Your use,
+   reproduction, and distribution of the Work otherwise complies with
+   the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+   any Contribution intentionally submitted for inclusion in the Work
+   by You to the Licensor shall be under the terms and conditions of
+   this License, without any additional terms or conditions.
+   Notwithstanding the above, nothing herein shall supersede or modify
+   the terms of any separate license agreement you may have executed
+   with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+   names, trademarks, service marks, or product names of the Licensor,
+   except as required for reasonable and customary use in describing the
+   origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+   agreed to in writing, Licensor provides the Work (and each
+   Contributor provides its Contributions) on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+   implied, including, without limitation, any warranties or conditions
+   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+   PARTICULAR PURPOSE. You are solely responsible for determining the
+   appropriateness of using or redistributing the Work and assume any
+   risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+   whether in tort (including negligence), contract, or otherwise,
+   unless required by applicable law (such as deliberate and grossly
+   negligent acts) or agreed to in writing, shall any Contributor be
+   liable to You for damages, including any direct, indirect, special,
+   incidental, or consequential damages of any character arising as a
+   result of this License or out of the use or inability to use the
+   Work (including but not limited to damages for loss of goodwill,
+   work stoppage, computer failure or malfunction, or any and all
+   other commercial damages or losses), even if such Contributor
+   has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+   the Work or Derivative Works thereof, You may choose to offer,
+   and charge a fee for, acceptance of support, warranty, indemnity,
+   or other liability obligations and/or rights consistent with this
+   License. However, in accepting such obligations, You may act only
+   on Your own behalf and on Your sole responsibility, not on behalf
+   of any other Contributor, and only if You agree to indemnify,
+   defend, and hold each Contributor harmless for any liability
+   incurred by, or claims asserted against, such Contributor by reason
+   of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
diff --git a/crates/semver/LICENSE-MIT b/crates/semver/LICENSE-MIT
new file mode 100644
index 0000000..31aa793
--- /dev/null
+++ b/crates/semver/LICENSE-MIT
@@ -0,0 +1,23 @@
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/crates/semver/METADATA b/crates/semver/METADATA
new file mode 100644
index 0000000..c963f72
--- /dev/null
+++ b/crates/semver/METADATA
@@ -0,0 +1,20 @@
+# This project was upgraded with external_updater.
+# Usage: tools/external_updater/updater.sh update external/rust/crates/semver
+# For more info, check https://cs.android.com/android/platform/superproject/+/main:tools/external_updater/README.md
+
+name: "semver"
+description: "Parser and evaluator for Cargo\'s flavor of Semantic Versioning"
+third_party {
+  license_type: NOTICE
+  last_upgrade_date {
+    year: 2024
+    month: 2
+    day: 7
+  }
+  homepage: "https://crates.io/crates/semver"
+  identifier {
+    type: "Archive"
+    value: "https://static.crates.io/crates/semver/semver-1.0.21.crate"
+    version: "1.0.21"
+  }
+}
diff --git a/crates/semver/MODULE_LICENSE_APACHE2 b/crates/semver/MODULE_LICENSE_APACHE2
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/crates/semver/MODULE_LICENSE_APACHE2
diff --git a/crates/semver/README.md b/crates/semver/README.md
new file mode 100644
index 0000000..a9a1cb8
--- /dev/null
+++ b/crates/semver/README.md
@@ -0,0 +1,84 @@
+semver
+======
+
+[<img alt="github" src="https://img.shields.io/badge/github-dtolnay/semver-8da0cb?style=for-the-badge&labelColor=555555&logo=github" height="20">](https://github.com/dtolnay/semver)
+[<img alt="crates.io" src="https://img.shields.io/crates/v/semver.svg?style=for-the-badge&color=fc8d62&logo=rust" height="20">](https://crates.io/crates/semver)
+[<img alt="docs.rs" src="https://img.shields.io/badge/docs.rs-semver-66c2a5?style=for-the-badge&labelColor=555555&logo=docs.rs" height="20">](https://docs.rs/semver)
+[<img alt="build status" src="https://img.shields.io/github/actions/workflow/status/dtolnay/semver/ci.yml?branch=master&style=for-the-badge" height="20">](https://github.com/dtolnay/semver/actions?query=branch%3Amaster)
+
+A parser and evaluator for Cargo's flavor of Semantic Versioning.
+
+Semantic Versioning (see <https://semver.org>) is a guideline for how version
+numbers are assigned and incremented. It is widely followed within the
+Cargo/crates.io ecosystem for Rust.
+
+```toml
+[dependencies]
+semver = "1.0"
+```
+
+*Compiler support: requires rustc 1.31+*
+
+<br>
+
+## Example
+
+```rust
+use semver::{BuildMetadata, Prerelease, Version, VersionReq};
+
+fn main() {
+    let req = VersionReq::parse(">=1.2.3, <1.8.0").unwrap();
+
+    // Check whether this requirement matches version 1.2.3-alpha.1 (no)
+    let version = Version {
+        major: 1,
+        minor: 2,
+        patch: 3,
+        pre: Prerelease::new("alpha.1").unwrap(),
+        build: BuildMetadata::EMPTY,
+    };
+    assert!(!req.matches(&version));
+
+    // Check whether it matches 1.3.0 (yes it does)
+    let version = Version::parse("1.3.0").unwrap();
+    assert!(req.matches(&version));
+}
+```
+
+<br>
+
+## Scope of this crate
+
+Besides Cargo, several other package ecosystems and package managers for other
+languages also use SemVer:&ensp;RubyGems/Bundler for Ruby, npm for JavaScript,
+Composer for PHP, CocoaPods for Objective-C...
+
+The `semver` crate is specifically intended to implement Cargo's interpretation
+of Semantic Versioning.
+
+Where the various tools differ in their interpretation or implementation of the
+spec, this crate follows the implementation choices made by Cargo. If you are
+operating on version numbers from some other package ecosystem, you will want to
+use a different semver library which is appropriate to that ecosystem.
+
+The extent of Cargo's SemVer support is documented in the *[Specifying
+Dependencies]* chapter of the Cargo reference.
+
+[Specifying Dependencies]: https://doc.rust-lang.org/cargo/reference/specifying-dependencies.html
+
+<br>
+
+#### License
+
+<sup>
+Licensed under either of <a href="LICENSE-APACHE">Apache License, Version
+2.0</a> or <a href="LICENSE-MIT">MIT license</a> at your option.
+</sup>
+
+<br>
+
+<sub>
+Unless you explicitly state otherwise, any contribution intentionally submitted
+for inclusion in this crate by you, as defined in the Apache-2.0 license, shall
+be dual licensed as above, without any additional terms or conditions.
+</sub>
diff --git a/crates/semver/TEST_MAPPING b/crates/semver/TEST_MAPPING
new file mode 100644
index 0000000..5366bfb
--- /dev/null
+++ b/crates/semver/TEST_MAPPING
@@ -0,0 +1,11 @@
+// Generated by update_crate_tests.py for tests that depend on this crate.
+{
+  "imports": [
+    {
+      "path": "packages/modules/Virtualization/virtualizationmanager"
+    },
+    {
+      "path": "packages/modules/Virtualization/vm"
+    }
+  ]
+}
diff --git a/crates/semver/benches/parse.rs b/crates/semver/benches/parse.rs
new file mode 100644
index 0000000..d6aded7
--- /dev/null
+++ b/crates/semver/benches/parse.rs
@@ -0,0 +1,24 @@
+#![feature(test)]
+
+extern crate test;
+
+use semver::{Prerelease, Version, VersionReq};
+use test::{black_box, Bencher};
+
+#[bench]
+fn parse_prerelease(b: &mut Bencher) {
+    let text = "x.7.z.92";
+    b.iter(|| black_box(text).parse::<Prerelease>().unwrap());
+}
+
+#[bench]
+fn parse_version(b: &mut Bencher) {
+    let text = "1.0.2021-beta+exp.sha.5114f85";
+    b.iter(|| black_box(text).parse::<Version>().unwrap());
+}
+
+#[bench]
+fn parse_version_req(b: &mut Bencher) {
+    let text = ">=1.2.3, <2.0.0";
+    b.iter(|| black_box(text).parse::<VersionReq>().unwrap());
+}
diff --git a/crates/semver/build.rs b/crates/semver/build.rs
new file mode 100644
index 0000000..81ad970
--- /dev/null
+++ b/crates/semver/build.rs
@@ -0,0 +1,75 @@
+use std::env;
+use std::process::Command;
+use std::str;
+
+fn main() {
+    println!("cargo:rerun-if-changed=build.rs");
+
+    let compiler = match rustc_minor_version() {
+        Some(compiler) => compiler,
+        None => return,
+    };
+
+    if compiler < 33 {
+        // Exhaustive integer patterns. On older compilers, a final `_` arm is
+        // required even if every possible integer value is otherwise covered.
+        // https://github.com/rust-lang/rust/issues/50907
+        println!("cargo:rustc-cfg=no_exhaustive_int_match");
+    }
+
+    if compiler < 36 {
+        // extern crate alloc.
+        // https://blog.rust-lang.org/2019/07/04/Rust-1.36.0.html#the-alloc-crate-is-stable
+        println!("cargo:rustc-cfg=no_alloc_crate");
+    }
+
+    if compiler < 39 {
+        // const Vec::new.
+        // https://doc.rust-lang.org/std/vec/struct.Vec.html#method.new
+        println!("cargo:rustc-cfg=no_const_vec_new");
+    }
+
+    if compiler < 40 {
+        // #[non_exhaustive].
+        // https://blog.rust-lang.org/2019/12/19/Rust-1.40.0.html#non_exhaustive-structs-enums-and-variants
+        println!("cargo:rustc-cfg=no_non_exhaustive");
+    }
+
+    if compiler < 45 {
+        // String::strip_prefix.
+        // https://doc.rust-lang.org/std/primitive.str.html#method.strip_prefix
+        println!("cargo:rustc-cfg=no_str_strip_prefix");
+    }
+
+    if compiler < 46 {
+        // #[track_caller].
+        // https://blog.rust-lang.org/2020/08/27/Rust-1.46.0.html#track_caller
+        println!("cargo:rustc-cfg=no_track_caller");
+    }
+
+    if compiler < 52 {
+        // #![deny(unsafe_op_in_unsafe_fn)].
+        // https://github.com/rust-lang/rust/issues/71668
+        println!("cargo:rustc-cfg=no_unsafe_op_in_unsafe_fn_lint");
+    }
+
+    if compiler < 53 {
+        // Efficient intrinsics for count-leading-zeros and count-trailing-zeros
+        // on NonZero integers stabilized in 1.53.0. On many architectures these
+        // are more efficient than counting zeros on ordinary zeroable integers.
+        // https://doc.rust-lang.org/std/num/struct.NonZeroU64.html#method.leading_zeros
+        // https://doc.rust-lang.org/std/num/struct.NonZeroU64.html#method.trailing_zeros
+        println!("cargo:rustc-cfg=no_nonzero_bitscan");
+    }
+}
+
+fn rustc_minor_version() -> Option<u32> {
+    let rustc = env::var_os("RUSTC")?;
+    let output = Command::new(rustc).arg("--version").output().ok()?;
+    let version = str::from_utf8(&output.stdout).ok()?;
+    let mut pieces = version.split('.');
+    if pieces.next() != Some("rustc 1") {
+        return None;
+    }
+    pieces.next()?.parse().ok()
+}
diff --git a/crates/semver/cargo_embargo.json b/crates/semver/cargo_embargo.json
new file mode 100644
index 0000000..a2ca437
--- /dev/null
+++ b/crates/semver/cargo_embargo.json
@@ -0,0 +1,17 @@
+{
+  "apex_available": [
+    "//apex_available:platform",
+    "com.android.virt"
+  ],
+  "features": [
+    "default",
+    "std",
+    "serde"
+  ],
+  "package": {
+    "semver": {
+      "host_supported": false
+    }
+  },
+  "run_cargo": false
+}
diff --git a/crates/semver/src/backport.rs b/crates/semver/src/backport.rs
new file mode 100644
index 0000000..b5e1d02
--- /dev/null
+++ b/crates/semver/src/backport.rs
@@ -0,0 +1,23 @@
+#[cfg(no_str_strip_prefix)] // rustc <1.45
+pub(crate) trait StripPrefixExt {
+    fn strip_prefix(&self, ch: char) -> Option<&str>;
+}
+
+#[cfg(no_str_strip_prefix)]
+impl StripPrefixExt for str {
+    fn strip_prefix(&self, ch: char) -> Option<&str> {
+        if self.starts_with(ch) {
+            Some(&self[ch.len_utf8()..])
+        } else {
+            None
+        }
+    }
+}
+
+pub(crate) use crate::alloc::vec::Vec;
+
+#[cfg(no_alloc_crate)] // rustc <1.36
+pub(crate) mod alloc {
+    pub use std::alloc;
+    pub use std::vec;
+}
diff --git a/crates/semver/src/display.rs b/crates/semver/src/display.rs
new file mode 100644
index 0000000..3c2871b
--- /dev/null
+++ b/crates/semver/src/display.rs
@@ -0,0 +1,165 @@
+use crate::{BuildMetadata, Comparator, Op, Prerelease, Version, VersionReq};
+use core::fmt::{self, Alignment, Debug, Display, Write};
+
+impl Display for Version {
+    fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+        let do_display = |formatter: &mut fmt::Formatter| -> fmt::Result {
+            write!(formatter, "{}.{}.{}", self.major, self.minor, self.patch)?;
+            if !self.pre.is_empty() {
+                write!(formatter, "-{}", self.pre)?;
+            }
+            if !self.build.is_empty() {
+                write!(formatter, "+{}", self.build)?;
+            }
+            Ok(())
+        };
+
+        let do_len = || -> usize {
+            digits(self.major)
+                + 1
+                + digits(self.minor)
+                + 1
+                + digits(self.patch)
+                + !self.pre.is_empty() as usize
+                + self.pre.len()
+                + !self.build.is_empty() as usize
+                + self.build.len()
+        };
+
+        pad(formatter, do_display, do_len)
+    }
+}
+
+impl Display for VersionReq {
+    fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+        if self.comparators.is_empty() {
+            return formatter.write_str("*");
+        }
+        for (i, comparator) in self.comparators.iter().enumerate() {
+            if i > 0 {
+                formatter.write_str(", ")?;
+            }
+            write!(formatter, "{}", comparator)?;
+        }
+        Ok(())
+    }
+}
+
+impl Display for Comparator {
+    fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+        let op = match self.op {
+            Op::Exact => "=",
+            Op::Greater => ">",
+            Op::GreaterEq => ">=",
+            Op::Less => "<",
+            Op::LessEq => "<=",
+            Op::Tilde => "~",
+            Op::Caret => "^",
+            Op::Wildcard => "",
+            #[cfg(no_non_exhaustive)]
+            Op::__NonExhaustive => unreachable!(),
+        };
+        formatter.write_str(op)?;
+        write!(formatter, "{}", self.major)?;
+        if let Some(minor) = &self.minor {
+            write!(formatter, ".{}", minor)?;
+            if let Some(patch) = &self.patch {
+                write!(formatter, ".{}", patch)?;
+                if !self.pre.is_empty() {
+                    write!(formatter, "-{}", self.pre)?;
+                }
+            } else if self.op == Op::Wildcard {
+                formatter.write_str(".*")?;
+            }
+        } else if self.op == Op::Wildcard {
+            formatter.write_str(".*")?;
+        }
+        Ok(())
+    }
+}
+
+impl Display for Prerelease {
+    fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+        formatter.write_str(self.as_str())
+    }
+}
+
+impl Display for BuildMetadata {
+    fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+        formatter.write_str(self.as_str())
+    }
+}
+
+impl Debug for Version {
+    fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+        let mut debug = formatter.debug_struct("Version");
+        debug
+            .field("major", &self.major)
+            .field("minor", &self.minor)
+            .field("patch", &self.patch);
+        if !self.pre.is_empty() {
+            debug.field("pre", &self.pre);
+        }
+        if !self.build.is_empty() {
+            debug.field("build", &self.build);
+        }
+        debug.finish()
+    }
+}
+
+impl Debug for Prerelease {
+    fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+        write!(formatter, "Prerelease(\"{}\")", self)
+    }
+}
+
+impl Debug for BuildMetadata {
+    fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+        write!(formatter, "BuildMetadata(\"{}\")", self)
+    }
+}
+
+fn pad(
+    formatter: &mut fmt::Formatter,
+    do_display: impl FnOnce(&mut fmt::Formatter) -> fmt::Result,
+    do_len: impl FnOnce() -> usize,
+) -> fmt::Result {
+    let min_width = match formatter.width() {
+        Some(min_width) => min_width,
+        None => return do_display(formatter),
+    };
+
+    let len = do_len();
+    if len >= min_width {
+        return do_display(formatter);
+    }
+
+    let default_align = Alignment::Left;
+    let align = formatter.align().unwrap_or(default_align);
+    let padding = min_width - len;
+    let (pre_pad, post_pad) = match align {
+        Alignment::Left => (0, padding),
+        Alignment::Right => (padding, 0),
+        Alignment::Center => (padding / 2, (padding + 1) / 2),
+    };
+
+    let fill = formatter.fill();
+    for _ in 0..pre_pad {
+        formatter.write_char(fill)?;
+    }
+
+    do_display(formatter)?;
+
+    for _ in 0..post_pad {
+        formatter.write_char(fill)?;
+    }
+    Ok(())
+}
+
+fn digits(val: u64) -> usize {
+    if val < 10 {
+        1
+    } else {
+        1 + digits(val / 10)
+    }
+}
diff --git a/crates/semver/src/error.rs b/crates/semver/src/error.rs
new file mode 100644
index 0000000..93b05ee
--- /dev/null
+++ b/crates/semver/src/error.rs
@@ -0,0 +1,126 @@
+use crate::parse::Error;
+use core::fmt::{self, Debug, Display};
+
+pub(crate) enum ErrorKind {
+    Empty,
+    UnexpectedEnd(Position),
+    UnexpectedChar(Position, char),
+    UnexpectedCharAfter(Position, char),
+    ExpectedCommaFound(Position, char),
+    LeadingZero(Position),
+    Overflow(Position),
+    EmptySegment(Position),
+    IllegalCharacter(Position),
+    WildcardNotTheOnlyComparator(char),
+    UnexpectedAfterWildcard,
+    ExcessiveComparators,
+}
+
+#[derive(Copy, Clone, Eq, PartialEq)]
+pub(crate) enum Position {
+    Major,
+    Minor,
+    Patch,
+    Pre,
+    Build,
+}
+
+#[cfg(feature = "std")]
+#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
+impl std::error::Error for Error {}
+
+impl Display for Error {
+    fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+        match &self.kind {
+            ErrorKind::Empty => formatter.write_str("empty string, expected a semver version"),
+            ErrorKind::UnexpectedEnd(pos) => {
+                write!(formatter, "unexpected end of input while parsing {}", pos)
+            }
+            ErrorKind::UnexpectedChar(pos, ch) => {
+                write!(
+                    formatter,
+                    "unexpected character {} while parsing {}",
+                    QuotedChar(*ch),
+                    pos,
+                )
+            }
+            ErrorKind::UnexpectedCharAfter(pos, ch) => {
+                write!(
+                    formatter,
+                    "unexpected character {} after {}",
+                    QuotedChar(*ch),
+                    pos,
+                )
+            }
+            ErrorKind::ExpectedCommaFound(pos, ch) => {
+                write!(
+                    formatter,
+                    "expected comma after {}, found {}",
+                    pos,
+                    QuotedChar(*ch),
+                )
+            }
+            ErrorKind::LeadingZero(pos) => {
+                write!(formatter, "invalid leading zero in {}", pos)
+            }
+            ErrorKind::Overflow(pos) => {
+                write!(formatter, "value of {} exceeds u64::MAX", pos)
+            }
+            ErrorKind::EmptySegment(pos) => {
+                write!(formatter, "empty identifier segment in {}", pos)
+            }
+            ErrorKind::IllegalCharacter(pos) => {
+                write!(formatter, "unexpected character in {}", pos)
+            }
+            ErrorKind::WildcardNotTheOnlyComparator(ch) => {
+                write!(
+                    formatter,
+                    "wildcard req ({}) must be the only comparator in the version req",
+                    ch,
+                )
+            }
+            ErrorKind::UnexpectedAfterWildcard => {
+                formatter.write_str("unexpected character after wildcard in version req")
+            }
+            ErrorKind::ExcessiveComparators => {
+                formatter.write_str("excessive number of version comparators")
+            }
+        }
+    }
+}
+
+impl Display for Position {
+    fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+        formatter.write_str(match self {
+            Position::Major => "major version number",
+            Position::Minor => "minor version number",
+            Position::Patch => "patch version number",
+            Position::Pre => "pre-release identifier",
+            Position::Build => "build metadata",
+        })
+    }
+}
+
+impl Debug for Error {
+    fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+        formatter.write_str("Error(\"")?;
+        Display::fmt(self, formatter)?;
+        formatter.write_str("\")")?;
+        Ok(())
+    }
+}
+
+struct QuotedChar(char);
+
+impl Display for QuotedChar {
+    fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+        // Standard library versions prior to https://github.com/rust-lang/rust/pull/95345
+        // print character 0 as '\u{0}'. We prefer '\0' to keep error messages
+        // the same across all supported Rust versions.
+        if self.0 == '\0' {
+            formatter.write_str("'\\0'")
+        } else {
+            write!(formatter, "{:?}", self.0)
+        }
+    }
+}
diff --git a/crates/semver/src/eval.rs b/crates/semver/src/eval.rs
new file mode 100644
index 0000000..e6e3894
--- /dev/null
+++ b/crates/semver/src/eval.rs
@@ -0,0 +1,181 @@
+use crate::{Comparator, Op, Version, VersionReq};
+
+pub(crate) fn matches_req(req: &VersionReq, ver: &Version) -> bool {
+    for cmp in &req.comparators {
+        if !matches_impl(cmp, ver) {
+            return false;
+        }
+    }
+
+    if ver.pre.is_empty() {
+        return true;
+    }
+
+    // If a version has a prerelease tag (for example, 1.2.3-alpha.3) then it
+    // will only be allowed to satisfy req if at least one comparator with the
+    // same major.minor.patch also has a prerelease tag.
+    for cmp in &req.comparators {
+        if pre_is_compatible(cmp, ver) {
+            return true;
+        }
+    }
+
+    false
+}
+
+pub(crate) fn matches_comparator(cmp: &Comparator, ver: &Version) -> bool {
+    matches_impl(cmp, ver) && (ver.pre.is_empty() || pre_is_compatible(cmp, ver))
+}
+
+fn matches_impl(cmp: &Comparator, ver: &Version) -> bool {
+    match cmp.op {
+        Op::Exact | Op::Wildcard => matches_exact(cmp, ver),
+        Op::Greater => matches_greater(cmp, ver),
+        Op::GreaterEq => matches_exact(cmp, ver) || matches_greater(cmp, ver),
+        Op::Less => matches_less(cmp, ver),
+        Op::LessEq => matches_exact(cmp, ver) || matches_less(cmp, ver),
+        Op::Tilde => matches_tilde(cmp, ver),
+        Op::Caret => matches_caret(cmp, ver),
+        #[cfg(no_non_exhaustive)]
+        Op::__NonExhaustive => unreachable!(),
+    }
+}
+
+fn matches_exact(cmp: &Comparator, ver: &Version) -> bool {
+    if ver.major != cmp.major {
+        return false;
+    }
+
+    if let Some(minor) = cmp.minor {
+        if ver.minor != minor {
+            return false;
+        }
+    }
+
+    if let Some(patch) = cmp.patch {
+        if ver.patch != patch {
+            return false;
+        }
+    }
+
+    ver.pre == cmp.pre
+}
+
+fn matches_greater(cmp: &Comparator, ver: &Version) -> bool {
+    if ver.major != cmp.major {
+        return ver.major > cmp.major;
+    }
+
+    match cmp.minor {
+        None => return false,
+        Some(minor) => {
+            if ver.minor != minor {
+                return ver.minor > minor;
+            }
+        }
+    }
+
+    match cmp.patch {
+        None => return false,
+        Some(patch) => {
+            if ver.patch != patch {
+                return ver.patch > patch;
+            }
+        }
+    }
+
+    ver.pre > cmp.pre
+}
+
+fn matches_less(cmp: &Comparator, ver: &Version) -> bool {
+    if ver.major != cmp.major {
+        return ver.major < cmp.major;
+    }
+
+    match cmp.minor {
+        None => return false,
+        Some(minor) => {
+            if ver.minor != minor {
+                return ver.minor < minor;
+            }
+        }
+    }
+
+    match cmp.patch {
+        None => return false,
+        Some(patch) => {
+            if ver.patch != patch {
+                return ver.patch < patch;
+            }
+        }
+    }
+
+    ver.pre < cmp.pre
+}
+
+fn matches_tilde(cmp: &Comparator, ver: &Version) -> bool {
+    if ver.major != cmp.major {
+        return false;
+    }
+
+    if let Some(minor) = cmp.minor {
+        if ver.minor != minor {
+            return false;
+        }
+    }
+
+    if let Some(patch) = cmp.patch {
+        if ver.patch != patch {
+            return ver.patch > patch;
+        }
+    }
+
+    ver.pre >= cmp.pre
+}
+
+fn matches_caret(cmp: &Comparator, ver: &Version) -> bool {
+    if ver.major != cmp.major {
+        return false;
+    }
+
+    let minor = match cmp.minor {
+        None => return true,
+        Some(minor) => minor,
+    };
+
+    let patch = match cmp.patch {
+        None => {
+            if cmp.major > 0 {
+                return ver.minor >= minor;
+            } else {
+                return ver.minor == minor;
+            }
+        }
+        Some(patch) => patch,
+    };
+
+    if cmp.major > 0 {
+        if ver.minor != minor {
+            return ver.minor > minor;
+        } else if ver.patch != patch {
+            return ver.patch > patch;
+        }
+    } else if minor > 0 {
+        if ver.minor != minor {
+            return false;
+        } else if ver.patch != patch {
+            return ver.patch > patch;
+        }
+    } else if ver.minor != minor || ver.patch != patch {
+        return false;
+    }
+
+    ver.pre >= cmp.pre
+}
+
+fn pre_is_compatible(cmp: &Comparator, ver: &Version) -> bool {
+    cmp.major == ver.major
+        && cmp.minor == Some(ver.minor)
+        && cmp.patch == Some(ver.patch)
+        && !cmp.pre.is_empty()
+}
diff --git a/crates/semver/src/identifier.rs b/crates/semver/src/identifier.rs
new file mode 100644
index 0000000..0273ae6
--- /dev/null
+++ b/crates/semver/src/identifier.rs
@@ -0,0 +1,422 @@
+// This module implements Identifier, a short-optimized string allowed to
+// contain only the ASCII characters hyphen, dot, 0-9, A-Z, a-z.
+//
+// As of mid-2021, the distribution of pre-release lengths on crates.io is:
+//
+//     length  count         length  count         length  count
+//        0  355929            11      81            24       2
+//        1     208            12      48            25       6
+//        2     236            13      55            26      10
+//        3    1909            14      25            27       4
+//        4    1284            15      15            28       1
+//        5    1742            16      35            30       1
+//        6    3440            17       9            31       5
+//        7    5624            18       6            32       1
+//        8    1321            19      12            36       2
+//        9     179            20       2            37     379
+//       10      65            23      11
+//
+// and the distribution of build metadata lengths is:
+//
+//     length  count         length  count         length  count
+//        0  364445             8    7725            18       1
+//        1      72             9      16            19       1
+//        2       7            10      85            20       1
+//        3      28            11      17            22       4
+//        4       9            12      10            26       1
+//        5      68            13       9            27       1
+//        6      73            14      10            40       5
+//        7      53            15       6
+//
+// Therefore it really behooves us to be able to use the entire 8 bytes of a
+// pointer for inline storage. For both pre-release and build metadata there are
+// vastly more strings with length exactly 8 bytes than the sum over all lengths
+// longer than 8 bytes.
+//
+// To differentiate the inline representation from the heap allocated long
+// representation, we'll allocate heap pointers with 2-byte alignment so that
+// they are guaranteed to have an unset least significant bit. Then in the repr
+// we store for pointers, we rotate a 1 into the most significant bit of the
+// most significant byte, which is never set for an ASCII byte.
+//
+// Inline repr:
+//
+//     0xxxxxxx 0xxxxxxx 0xxxxxxx 0xxxxxxx 0xxxxxxx 0xxxxxxx 0xxxxxxx 0xxxxxxx
+//
+// Heap allocated repr:
+//
+//     1ppppppp pppppppp pppppppp pppppppp pppppppp pppppppp pppppppp pppppppp 0
+//     ^ most significant bit   least significant bit of orig ptr, rotated out ^
+//
+// Since the most significant bit doubles as a sign bit for the similarly sized
+// signed integer type, the CPU has an efficient instruction for inspecting it,
+// meaning we can differentiate between an inline repr and a heap allocated repr
+// in one instruction. Effectively an inline repr always looks like a positive
+// i64 while a heap allocated repr always looks like a negative i64.
+//
+// For the inline repr, we store \0 padding on the end of the stored characters,
+// and thus the string length is readily determined efficiently by a cttz (count
+// trailing zeros) or bsf (bit scan forward) instruction.
+//
+// For the heap allocated repr, the length is encoded as a base-128 varint at
+// the head of the allocation.
+//
+// Empty strings are stored as an all-1 bit pattern, corresponding to -1i64.
+// Consequently the all-0 bit pattern is never a legal representation in any
+// repr, leaving it available as a niche for downstream code. For example this
+// allows size_of::<Version>() == size_of::<Option<Version>>().
+
+use crate::alloc::alloc::{alloc, dealloc, handle_alloc_error, Layout};
+use core::isize;
+use core::mem;
+use core::num::{NonZeroU64, NonZeroUsize};
+use core::ptr::{self, NonNull};
+use core::slice;
+use core::str;
+use core::usize;
+
+const PTR_BYTES: usize = mem::size_of::<NonNull<u8>>();
+
+// If pointers are already 8 bytes or bigger, then 0. If pointers are smaller
+// than 8 bytes, then Identifier will contain a byte array to raise its size up
+// to 8 bytes total.
+const TAIL_BYTES: usize = 8 * (PTR_BYTES < 8) as usize - PTR_BYTES * (PTR_BYTES < 8) as usize;
+
+#[repr(C, align(8))]
+pub(crate) struct Identifier {
+    head: NonNull<u8>,
+    tail: [u8; TAIL_BYTES],
+}
+
+impl Identifier {
+    pub(crate) const fn empty() -> Self {
+        // This is a separate constant because unsafe function calls are not
+        // allowed in a const fn body, only in a const, until later rustc than
+        // what we support.
+        const HEAD: NonNull<u8> = unsafe { NonNull::new_unchecked(!0 as *mut u8) };
+
+        // `mov rax, -1`
+        Identifier {
+            head: HEAD,
+            tail: [!0; TAIL_BYTES],
+        }
+    }
+
+    // SAFETY: string must be ASCII and not contain \0 bytes.
+    pub(crate) unsafe fn new_unchecked(string: &str) -> Self {
+        let len = string.len();
+        debug_assert!(len <= isize::MAX as usize);
+        match len as u64 {
+            0 => Self::empty(),
+            1..=8 => {
+                let mut bytes = [0u8; mem::size_of::<Identifier>()];
+                // SAFETY: string is big enough to read len bytes, bytes is big
+                // enough to write len bytes, and they do not overlap.
+                unsafe { ptr::copy_nonoverlapping(string.as_ptr(), bytes.as_mut_ptr(), len) };
+                // SAFETY: the head field is nonzero because the input string
+                // was at least 1 byte of ASCII and did not contain \0.
+                unsafe { mem::transmute::<[u8; mem::size_of::<Identifier>()], Identifier>(bytes) }
+            }
+            9..=0xff_ffff_ffff_ffff => {
+                // SAFETY: len is in a range that does not contain 0.
+                let size = bytes_for_varint(unsafe { NonZeroUsize::new_unchecked(len) }) + len;
+                let align = 2;
+                // On 32-bit and 16-bit architecture, check for size overflowing
+                // isize::MAX. Making an allocation request bigger than this to
+                // the allocator is considered UB. All allocations (including
+                // static ones) are limited to isize::MAX so we're guaranteed
+                // len <= isize::MAX, and we know bytes_for_varint(len) <= 5
+                // because 128**5 > isize::MAX, which means the only problem
+                // that can arise is when isize::MAX - 5 <= len <= isize::MAX.
+                // This is pretty much guaranteed to be malicious input so we
+                // don't need to care about returning a good error message.
+                if mem::size_of::<usize>() < 8 {
+                    let max_alloc = usize::MAX / 2 - align;
+                    assert!(size <= max_alloc);
+                }
+                // SAFETY: align is not zero, align is a power of two, and
+                // rounding size up to align does not overflow isize::MAX.
+                let layout = unsafe { Layout::from_size_align_unchecked(size, align) };
+                // SAFETY: layout's size is nonzero.
+                let ptr = unsafe { alloc(layout) };
+                if ptr.is_null() {
+                    handle_alloc_error(layout);
+                }
+                let mut write = ptr;
+                let mut varint_remaining = len;
+                while varint_remaining > 0 {
+                    // SAFETY: size is bytes_for_varint(len) bytes + len bytes.
+                    // This is writing the first bytes_for_varint(len) bytes.
+                    unsafe { ptr::write(write, varint_remaining as u8 | 0x80) };
+                    varint_remaining >>= 7;
+                    // SAFETY: still in bounds of the same allocation.
+                    write = unsafe { write.add(1) };
+                }
+                // SAFETY: size is bytes_for_varint(len) bytes + len bytes. This
+                // is writing to the last len bytes.
+                unsafe { ptr::copy_nonoverlapping(string.as_ptr(), write, len) };
+                Identifier {
+                    head: ptr_to_repr(ptr),
+                    tail: [0; TAIL_BYTES],
+                }
+            }
+            0x100_0000_0000_0000..=0xffff_ffff_ffff_ffff => {
+                unreachable!("please refrain from storing >64 petabytes of text in semver version");
+            }
+            #[cfg(no_exhaustive_int_match)] // rustc <1.33
+            _ => unreachable!(),
+        }
+    }
+
+    pub(crate) fn is_empty(&self) -> bool {
+        // `cmp rdi, -1` -- basically: `repr as i64 == -1`
+        let empty = Self::empty();
+        let is_empty = self.head == empty.head && self.tail == empty.tail;
+        // The empty representation does nothing on Drop. We can't let this one
+        // drop normally because `impl Drop for Identifier` calls is_empty; that
+        // would be an infinite recursion.
+        mem::forget(empty);
+        is_empty
+    }
+
+    fn is_inline(&self) -> bool {
+        // `test rdi, rdi` -- basically: `repr as i64 >= 0`
+        self.head.as_ptr() as usize >> (PTR_BYTES * 8 - 1) == 0
+    }
+
+    fn is_empty_or_inline(&self) -> bool {
+        // `cmp rdi, -2` -- basically: `repr as i64 > -2`
+        self.is_empty() || self.is_inline()
+    }
+
+    pub(crate) fn as_str(&self) -> &str {
+        if self.is_empty() {
+            ""
+        } else if self.is_inline() {
+            // SAFETY: repr is in the inline representation.
+            unsafe { inline_as_str(self) }
+        } else {
+            // SAFETY: repr is in the heap allocated representation.
+            unsafe { ptr_as_str(&self.head) }
+        }
+    }
+}
+
+impl Clone for Identifier {
+    fn clone(&self) -> Self {
+        if self.is_empty_or_inline() {
+            Identifier {
+                head: self.head,
+                tail: self.tail,
+            }
+        } else {
+            let ptr = repr_to_ptr(self.head);
+            // SAFETY: ptr is one of our own heap allocations.
+            let len = unsafe { decode_len(ptr) };
+            let size = bytes_for_varint(len) + len.get();
+            let align = 2;
+            // SAFETY: align is not zero, align is a power of two, and rounding
+            // size up to align does not overflow isize::MAX. This is just
+            // duplicating a previous allocation where all of these guarantees
+            // were already made.
+            let layout = unsafe { Layout::from_size_align_unchecked(size, align) };
+            // SAFETY: layout's size is nonzero.
+            let clone = unsafe { alloc(layout) };
+            if clone.is_null() {
+                handle_alloc_error(layout);
+            }
+            // SAFETY: new allocation cannot overlap the previous one (this was
+            // not a realloc). The argument ptrs are readable/writeable
+            // respectively for size bytes.
+            unsafe { ptr::copy_nonoverlapping(ptr, clone, size) }
+            Identifier {
+                head: ptr_to_repr(clone),
+                tail: [0; TAIL_BYTES],
+            }
+        }
+    }
+}
+
+impl Drop for Identifier {
+    fn drop(&mut self) {
+        if self.is_empty_or_inline() {
+            return;
+        }
+        let ptr = repr_to_ptr_mut(self.head);
+        // SAFETY: ptr is one of our own heap allocations.
+        let len = unsafe { decode_len(ptr) };
+        let size = bytes_for_varint(len) + len.get();
+        let align = 2;
+        // SAFETY: align is not zero, align is a power of two, and rounding
+        // size up to align does not overflow usize::MAX. These guarantees were
+        // made when originally allocating this memory.
+        let layout = unsafe { Layout::from_size_align_unchecked(size, align) };
+        // SAFETY: ptr was previously allocated by the same allocator with the
+        // same layout.
+        unsafe { dealloc(ptr, layout) }
+    }
+}
+
+impl PartialEq for Identifier {
+    fn eq(&self, rhs: &Self) -> bool {
+        if self.is_empty_or_inline() {
+            // Fast path (most common)
+            self.head == rhs.head && self.tail == rhs.tail
+        } else if rhs.is_empty_or_inline() {
+            false
+        } else {
+            // SAFETY: both reprs are in the heap allocated representation.
+            unsafe { ptr_as_str(&self.head) == ptr_as_str(&rhs.head) }
+        }
+    }
+}
+
+unsafe impl Send for Identifier {}
+unsafe impl Sync for Identifier {}
+
+// We use heap pointers that are 2-byte aligned, meaning they have an
+// insignificant 0 in the least significant bit. We take advantage of that
+// unneeded bit to rotate a 1 into the most significant bit to make the repr
+// distinguishable from ASCII bytes.
+fn ptr_to_repr(original: *mut u8) -> NonNull<u8> {
+    // `mov eax, 1`
+    // `shld rax, rdi, 63`
+    let modified = (original as usize | 1).rotate_right(1);
+
+    // `original + (modified - original)`, but being mindful of provenance.
+    let diff = modified.wrapping_sub(original as usize);
+    let modified = original.wrapping_add(diff);
+
+    // SAFETY: the most significant bit of repr is known to be set, so the value
+    // is not zero.
+    unsafe { NonNull::new_unchecked(modified) }
+}
+
+// Shift out the 1 previously placed into the most significant bit of the least
+// significant byte. Shift in a low 0 bit to reconstruct the original 2-byte
+// aligned pointer.
+fn repr_to_ptr(modified: NonNull<u8>) -> *const u8 {
+    // `lea rax, [rdi + rdi]`
+    let modified = modified.as_ptr();
+    let original = (modified as usize) << 1;
+
+    // `modified + (original - modified)`, but being mindful of provenance.
+    let diff = original.wrapping_sub(modified as usize);
+    modified.wrapping_add(diff)
+}
+
+fn repr_to_ptr_mut(repr: NonNull<u8>) -> *mut u8 {
+    repr_to_ptr(repr) as *mut u8
+}
+
+// Compute the length of the inline string, assuming the argument is in short
+// string representation. Short strings are stored as 1 to 8 nonzero ASCII
+// bytes, followed by \0 padding for the remaining bytes.
+//
+// SAFETY: the identifier must indeed be in the inline representation.
+unsafe fn inline_len(repr: &Identifier) -> NonZeroUsize {
+    // SAFETY: Identifier's layout is align(8) and at least size 8. We're doing
+    // an aligned read of the first 8 bytes from it. The bytes are not all zero
+    // because inline strings are at least 1 byte long and cannot contain \0.
+    let repr = unsafe { ptr::read(repr as *const Identifier as *const NonZeroU64) };
+
+    // Rustc >=1.53 has intrinsics for counting zeros on a non-zeroable integer.
+    // On many architectures these are more efficient than counting on ordinary
+    // zeroable integers (bsf vs cttz). On rustc <1.53 without those intrinsics,
+    // we count zeros in the u64 rather than the NonZeroU64.
+    #[cfg(no_nonzero_bitscan)]
+    let repr = repr.get();
+
+    #[cfg(target_endian = "little")]
+    let zero_bits_on_string_end = repr.leading_zeros();
+    #[cfg(target_endian = "big")]
+    let zero_bits_on_string_end = repr.trailing_zeros();
+
+    let nonzero_bytes = 8 - zero_bits_on_string_end as usize / 8;
+
+    // SAFETY: repr is nonzero, so it has at most 63 zero bits on either end,
+    // thus at least one nonzero byte.
+    unsafe { NonZeroUsize::new_unchecked(nonzero_bytes) }
+}
+
+// SAFETY: repr must be in the inline representation, i.e. at least 1 and at
+// most 8 nonzero ASCII bytes padded on the end with \0 bytes.
+unsafe fn inline_as_str(repr: &Identifier) -> &str {
+    let ptr = repr as *const Identifier as *const u8;
+    let len = unsafe { inline_len(repr) }.get();
+    // SAFETY: we are viewing the nonzero ASCII prefix of the inline repr's
+    // contents as a slice of bytes. Input/output lifetimes are correctly
+    // associated.
+    let slice = unsafe { slice::from_raw_parts(ptr, len) };
+    // SAFETY: the string contents are known to be only ASCII bytes, which are
+    // always valid UTF-8.
+    unsafe { str::from_utf8_unchecked(slice) }
+}
+
+// Decode varint. Varints consist of between one and eight base-128 digits, each
+// of which is stored in a byte with most significant bit set. Adjacent to the
+// varint in memory there is guaranteed to be at least 9 ASCII bytes, each of
+// which has an unset most significant bit.
+//
+// SAFETY: ptr must be one of our own heap allocations, with the varint header
+// already written.
+unsafe fn decode_len(ptr: *const u8) -> NonZeroUsize {
+    // SAFETY: There is at least one byte of varint followed by at least 9 bytes
+    // of string content, which is at least 10 bytes total for the allocation,
+    // so reading the first two is no problem.
+    let [first, second] = unsafe { ptr::read(ptr as *const [u8; 2]) };
+    if second < 0x80 {
+        // SAFETY: the length of this heap allocated string has been encoded as
+        // one base-128 digit, so the length is at least 9 and at most 127. It
+        // cannot be zero.
+        unsafe { NonZeroUsize::new_unchecked((first & 0x7f) as usize) }
+    } else {
+        return unsafe { decode_len_cold(ptr) };
+
+        // Identifiers 128 bytes or longer. This is not exercised by any crate
+        // version currently published to crates.io.
+        #[cold]
+        #[inline(never)]
+        unsafe fn decode_len_cold(mut ptr: *const u8) -> NonZeroUsize {
+            let mut len = 0;
+            let mut shift = 0;
+            loop {
+                // SAFETY: varint continues while there are bytes having the
+                // most significant bit set, i.e. until we start hitting the
+                // ASCII string content with msb unset.
+                let byte = unsafe { *ptr };
+                if byte < 0x80 {
+                    // SAFETY: the string length is known to be 128 bytes or
+                    // longer.
+                    return unsafe { NonZeroUsize::new_unchecked(len) };
+                }
+                // SAFETY: still in bounds of the same allocation.
+                ptr = unsafe { ptr.add(1) };
+                len += ((byte & 0x7f) as usize) << shift;
+                shift += 7;
+            }
+        }
+    }
+}
+
+// SAFETY: repr must be in the heap allocated representation, with varint header
+// and string contents already written.
+unsafe fn ptr_as_str(repr: &NonNull<u8>) -> &str {
+    let ptr = repr_to_ptr(*repr);
+    let len = unsafe { decode_len(ptr) };
+    let header = bytes_for_varint(len);
+    let slice = unsafe { slice::from_raw_parts(ptr.add(header), len.get()) };
+    // SAFETY: all identifier contents are ASCII bytes, which are always valid
+    // UTF-8.
+    unsafe { str::from_utf8_unchecked(slice) }
+}
+
+// Number of base-128 digits required for the varint representation of a length.
+fn bytes_for_varint(len: NonZeroUsize) -> usize {
+    #[cfg(no_nonzero_bitscan)] // rustc <1.53
+    let len = len.get();
+
+    let usize_bits = mem::size_of::<usize>() * 8;
+    let len_bits = usize_bits - len.leading_zeros() as usize;
+    (len_bits + 6) / 7
+}
diff --git a/crates/semver/src/impls.rs b/crates/semver/src/impls.rs
new file mode 100644
index 0000000..cc4fd41
--- /dev/null
+++ b/crates/semver/src/impls.rs
@@ -0,0 +1,156 @@
+use crate::backport::*;
+use crate::identifier::Identifier;
+use crate::{BuildMetadata, Comparator, Prerelease, VersionReq};
+use core::cmp::Ordering;
+use core::hash::{Hash, Hasher};
+use core::iter::FromIterator;
+use core::ops::Deref;
+
+impl Default for Identifier {
+    fn default() -> Self {
+        Identifier::empty()
+    }
+}
+
+impl Eq for Identifier {}
+
+impl Hash for Identifier {
+    fn hash<H: Hasher>(&self, hasher: &mut H) {
+        self.as_str().hash(hasher);
+    }
+}
+
+impl Deref for Prerelease {
+    type Target = str;
+
+    fn deref(&self) -> &Self::Target {
+        self.identifier.as_str()
+    }
+}
+
+impl Deref for BuildMetadata {
+    type Target = str;
+
+    fn deref(&self) -> &Self::Target {
+        self.identifier.as_str()
+    }
+}
+
+impl PartialOrd for Prerelease {
+    fn partial_cmp(&self, rhs: &Self) -> Option<Ordering> {
+        Some(self.cmp(rhs))
+    }
+}
+
+impl PartialOrd for BuildMetadata {
+    fn partial_cmp(&self, rhs: &Self) -> Option<Ordering> {
+        Some(self.cmp(rhs))
+    }
+}
+
+impl Ord for Prerelease {
+    fn cmp(&self, rhs: &Self) -> Ordering {
+        match self.is_empty() {
+            true if rhs.is_empty() => return Ordering::Equal,
+            // A real release compares greater than prerelease.
+            true => return Ordering::Greater,
+            // Prerelease compares less than the real release.
+            false if rhs.is_empty() => return Ordering::Less,
+            false => {}
+        }
+
+        let lhs = self.as_str().split('.');
+        let mut rhs = rhs.as_str().split('.');
+
+        for lhs in lhs {
+            let rhs = match rhs.next() {
+                // Spec: "A larger set of pre-release fields has a higher
+                // precedence than a smaller set, if all of the preceding
+                // identifiers are equal."
+                None => return Ordering::Greater,
+                Some(rhs) => rhs,
+            };
+
+            let string_cmp = || Ord::cmp(lhs, rhs);
+            let is_ascii_digit = |b: u8| b.is_ascii_digit();
+            let ordering = match (
+                lhs.bytes().all(is_ascii_digit),
+                rhs.bytes().all(is_ascii_digit),
+            ) {
+                // Respect numeric ordering, for example 99 < 100. Spec says:
+                // "Identifiers consisting of only digits are compared
+                // numerically."
+                (true, true) => Ord::cmp(&lhs.len(), &rhs.len()).then_with(string_cmp),
+                // Spec: "Numeric identifiers always have lower precedence than
+                // non-numeric identifiers."
+                (true, false) => return Ordering::Less,
+                (false, true) => return Ordering::Greater,
+                // Spec: "Identifiers with letters or hyphens are compared
+                // lexically in ASCII sort order."
+                (false, false) => string_cmp(),
+            };
+
+            if ordering != Ordering::Equal {
+                return ordering;
+            }
+        }
+
+        if rhs.next().is_none() {
+            Ordering::Equal
+        } else {
+            Ordering::Less
+        }
+    }
+}
+
+impl Ord for BuildMetadata {
+    fn cmp(&self, rhs: &Self) -> Ordering {
+        let lhs = self.as_str().split('.');
+        let mut rhs = rhs.as_str().split('.');
+
+        for lhs in lhs {
+            let rhs = match rhs.next() {
+                None => return Ordering::Greater,
+                Some(rhs) => rhs,
+            };
+
+            let is_ascii_digit = |b: u8| b.is_ascii_digit();
+            let ordering = match (
+                lhs.bytes().all(is_ascii_digit),
+                rhs.bytes().all(is_ascii_digit),
+            ) {
+                (true, true) => {
+                    // 0 < 00 < 1 < 01 < 001 < 2 < 02 < 002 < 10
+                    let lhval = lhs.trim_start_matches('0');
+                    let rhval = rhs.trim_start_matches('0');
+                    Ord::cmp(&lhval.len(), &rhval.len())
+                        .then_with(|| Ord::cmp(lhval, rhval))
+                        .then_with(|| Ord::cmp(&lhs.len(), &rhs.len()))
+                }
+                (true, false) => return Ordering::Less,
+                (false, true) => return Ordering::Greater,
+                (false, false) => Ord::cmp(lhs, rhs),
+            };
+
+            if ordering != Ordering::Equal {
+                return ordering;
+            }
+        }
+
+        if rhs.next().is_none() {
+            Ordering::Equal
+        } else {
+            Ordering::Less
+        }
+    }
+}
+
+impl FromIterator<Comparator> for VersionReq {
+    fn from_iter<I>(iter: I) -> Self
+    where
+        I: IntoIterator<Item = Comparator>,
+    {
+        let comparators = Vec::from_iter(iter);
+        VersionReq { comparators }
+    }
+}
diff --git a/crates/semver/src/lib.rs b/crates/semver/src/lib.rs
new file mode 100644
index 0000000..952cc2b
--- /dev/null
+++ b/crates/semver/src/lib.rs
@@ -0,0 +1,580 @@
+//! [![github]](https://github.com/dtolnay/semver)&ensp;[![crates-io]](https://crates.io/crates/semver)&ensp;[![docs-rs]](https://docs.rs/semver)
+//!
+//! [github]: https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github
+//! [crates-io]: https://img.shields.io/badge/crates.io-fc8d62?style=for-the-badge&labelColor=555555&logo=rust
+//! [docs-rs]: https://img.shields.io/badge/docs.rs-66c2a5?style=for-the-badge&labelColor=555555&logo=docs.rs
+//!
+//! <br>
+//!
+//! A parser and evaluator for Cargo's flavor of Semantic Versioning.
+//!
+//! Semantic Versioning (see <https://semver.org>) is a guideline for how
+//! version numbers are assigned and incremented. It is widely followed within
+//! the Cargo/crates.io ecosystem for Rust.
+//!
+//! <br>
+//!
+//! # Example
+//!
+//! ```
+//! use semver::{BuildMetadata, Prerelease, Version, VersionReq};
+//!
+//! fn main() {
+//!     let req = VersionReq::parse(">=1.2.3, <1.8.0").unwrap();
+//!
+//!     // Check whether this requirement matches version 1.2.3-alpha.1 (no)
+//!     let version = Version {
+//!         major: 1,
+//!         minor: 2,
+//!         patch: 3,
+//!         pre: Prerelease::new("alpha.1").unwrap(),
+//!         build: BuildMetadata::EMPTY,
+//!     };
+//!     assert!(!req.matches(&version));
+//!
+//!     // Check whether it matches 1.3.0 (yes it does)
+//!     let version = Version::parse("1.3.0").unwrap();
+//!     assert!(req.matches(&version));
+//! }
+//! ```
+//!
+//! <br><br>
+//!
+//! # Scope of this crate
+//!
+//! Besides Cargo, several other package ecosystems and package managers for
+//! other languages also use SemVer:&ensp;RubyGems/Bundler for Ruby, npm for
+//! JavaScript, Composer for PHP, CocoaPods for Objective-C...
+//!
+//! The `semver` crate is specifically intended to implement Cargo's
+//! interpretation of Semantic Versioning.
+//!
+//! Where the various tools differ in their interpretation or implementation of
+//! the spec, this crate follows the implementation choices made by Cargo. If
+//! you are operating on version numbers from some other package ecosystem, you
+//! will want to use a different semver library which is appropriate to that
+//! ecosystem.
+//!
+//! The extent of Cargo's SemVer support is documented in the *[Specifying
+//! Dependencies]* chapter of the Cargo reference.
+//!
+//! [Specifying Dependencies]: https://doc.rust-lang.org/cargo/reference/specifying-dependencies.html
+
+#![doc(html_root_url = "https://docs.rs/semver/1.0.21")]
+#![cfg_attr(doc_cfg, feature(doc_cfg))]
+#![cfg_attr(all(not(feature = "std"), not(no_alloc_crate)), no_std)]
+#![cfg_attr(not(no_unsafe_op_in_unsafe_fn_lint), deny(unsafe_op_in_unsafe_fn))]
+#![cfg_attr(no_unsafe_op_in_unsafe_fn_lint, allow(unused_unsafe))]
+#![cfg_attr(no_str_strip_prefix, allow(unstable_name_collisions))]
+#![allow(
+    clippy::cast_lossless,
+    clippy::cast_possible_truncation,
+    clippy::doc_markdown,
+    clippy::items_after_statements,
+    clippy::manual_map,
+    clippy::match_bool,
+    clippy::missing_errors_doc,
+    clippy::must_use_candidate,
+    clippy::needless_doctest_main,
+    clippy::ptr_as_ptr,
+    clippy::redundant_else,
+    clippy::semicolon_if_nothing_returned, // https://github.com/rust-lang/rust-clippy/issues/7324
+    clippy::similar_names,
+    clippy::unnested_or_patterns,
+    clippy::unseparated_literal_suffix,
+    clippy::wildcard_imports
+)]
+
+#[cfg(not(no_alloc_crate))]
+extern crate alloc;
+
+mod backport;
+mod display;
+mod error;
+mod eval;
+mod identifier;
+mod impls;
+mod parse;
+
+#[cfg(feature = "serde")]
+mod serde;
+
+use crate::alloc::vec::Vec;
+use crate::identifier::Identifier;
+use core::cmp::Ordering;
+use core::str::FromStr;
+
+#[allow(unused_imports)]
+use crate::backport::*;
+
+pub use crate::parse::Error;
+
+/// **SemVer version** as defined by <https://semver.org>.
+///
+/// # Syntax
+///
+/// - The major, minor, and patch numbers may be any integer 0 through u64::MAX.
+///   When representing a SemVer version as a string, each number is written as
+///   a base 10 integer. For example, `1.0.119`.
+///
+/// - Leading zeros are forbidden in those positions. For example `1.01.00` is
+///   invalid as a SemVer version.
+///
+/// - The pre-release identifier, if present, must conform to the syntax
+///   documented for [`Prerelease`].
+///
+/// - The build metadata, if present, must conform to the syntax documented for
+///   [`BuildMetadata`].
+///
+/// - Whitespace is not allowed anywhere in the version.
+///
+/// # Total ordering
+///
+/// Given any two SemVer versions, one is less than, greater than, or equal to
+/// the other. Versions may be compared against one another using Rust's usual
+/// comparison operators.
+///
+/// - The major, minor, and patch number are compared numerically from left to
+/// right, lexicographically ordered as a 3-tuple of integers. So for example
+/// version `1.5.0` is less than version `1.19.0`, despite the fact that
+/// "1.19.0" &lt; "1.5.0" as ASCIIbetically compared strings and 1.19 &lt; 1.5
+/// as real numbers.
+///
+/// - When major, minor, and patch are equal, a pre-release version is
+///   considered less than the ordinary release:&ensp;version `1.0.0-alpha.1` is
+///   less than version `1.0.0`.
+///
+/// - Two pre-releases of the same major, minor, patch are compared by
+///   lexicographic ordering of dot-separated components of the pre-release
+///   string.
+///
+///   - Identifiers consisting of only digits are compared
+///     numerically:&ensp;`1.0.0-pre.8` is less than `1.0.0-pre.12`.
+///
+///   - Identifiers that contain a letter or hyphen are compared in ASCII sort
+///     order:&ensp;`1.0.0-pre12` is less than `1.0.0-pre8`.
+///
+///   - Any numeric identifier is always less than any non-numeric
+///     identifier:&ensp;`1.0.0-pre.1` is less than `1.0.0-pre.x`.
+///
+/// Example:&ensp;`1.0.0-alpha`&ensp;&lt;&ensp;`1.0.0-alpha.1`&ensp;&lt;&ensp;`1.0.0-alpha.beta`&ensp;&lt;&ensp;`1.0.0-beta`&ensp;&lt;&ensp;`1.0.0-beta.2`&ensp;&lt;&ensp;`1.0.0-beta.11`&ensp;&lt;&ensp;`1.0.0-rc.1`&ensp;&lt;&ensp;`1.0.0`
+#[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
+pub struct Version {
+    pub major: u64,
+    pub minor: u64,
+    pub patch: u64,
+    pub pre: Prerelease,
+    pub build: BuildMetadata,
+}
+
+/// **SemVer version requirement** describing the intersection of some version
+/// comparators, such as `>=1.2.3, <1.8`.
+///
+/// # Syntax
+///
+/// - Either `*` (meaning "any"), or one or more comma-separated comparators.
+///
+/// - A [`Comparator`] is an operator ([`Op`]) and a partial version, separated
+///   by optional whitespace. For example `>=1.0.0` or `>=1.0`.
+///
+/// - Build metadata is syntactically permitted on the partial versions, but is
+///   completely ignored, as it's never relevant to whether any comparator
+///   matches a particular version.
+///
+/// - Whitespace is permitted around commas and around operators. Whitespace is
+///   not permitted within a partial version, i.e. anywhere between the major
+///   version number and its minor, patch, pre-release, or build metadata.
+#[derive(Clone, Eq, PartialEq, Hash, Debug)]
+#[cfg_attr(no_const_vec_new, derive(Default))]
+pub struct VersionReq {
+    pub comparators: Vec<Comparator>,
+}
+
+/// A pair of comparison operator and partial version, such as `>=1.2`. Forms
+/// one piece of a VersionReq.
+#[derive(Clone, Eq, PartialEq, Hash, Debug)]
+pub struct Comparator {
+    pub op: Op,
+    pub major: u64,
+    pub minor: Option<u64>,
+    /// Patch is only allowed if minor is Some.
+    pub patch: Option<u64>,
+    /// Non-empty pre-release is only allowed if patch is Some.
+    pub pre: Prerelease,
+}
+
+/// SemVer comparison operator: `=`, `>`, `>=`, `<`, `<=`, `~`, `^`, `*`.
+///
+/// # Op::Exact
+/// - &ensp;**`=I.J.K`**&emsp;&mdash;&emsp;exactly the version I.J.K
+/// - &ensp;**`=I.J`**&emsp;&mdash;&emsp;equivalent to `>=I.J.0, <I.(J+1).0`
+/// - &ensp;**`=I`**&emsp;&mdash;&emsp;equivalent to `>=I.0.0, <(I+1).0.0`
+///
+/// # Op::Greater
+/// - &ensp;**`>I.J.K`**
+/// - &ensp;**`>I.J`**&emsp;&mdash;&emsp;equivalent to `>=I.(J+1).0`
+/// - &ensp;**`>I`**&emsp;&mdash;&emsp;equivalent to `>=(I+1).0.0`
+///
+/// # Op::GreaterEq
+/// - &ensp;**`>=I.J.K`**
+/// - &ensp;**`>=I.J`**&emsp;&mdash;&emsp;equivalent to `>=I.J.0`
+/// - &ensp;**`>=I`**&emsp;&mdash;&emsp;equivalent to `>=I.0.0`
+///
+/// # Op::Less
+/// - &ensp;**`<I.J.K`**
+/// - &ensp;**`<I.J`**&emsp;&mdash;&emsp;equivalent to `<I.J.0`
+/// - &ensp;**`<I`**&emsp;&mdash;&emsp;equivalent to `<I.0.0`
+///
+/// # Op::LessEq
+/// - &ensp;**`<=I.J.K`**
+/// - &ensp;**`<=I.J`**&emsp;&mdash;&emsp;equivalent to `<I.(J+1).0`
+/// - &ensp;**`<=I`**&emsp;&mdash;&emsp;equivalent to `<(I+1).0.0`
+///
+/// # Op::Tilde&emsp;("patch" updates)
+/// *Tilde requirements allow the **patch** part of the semver version (the third number) to increase.*
+/// - &ensp;**`~I.J.K`**&emsp;&mdash;&emsp;equivalent to `>=I.J.K, <I.(J+1).0`
+/// - &ensp;**`~I.J`**&emsp;&mdash;&emsp;equivalent to `=I.J`
+/// - &ensp;**`~I`**&emsp;&mdash;&emsp;equivalent to `=I`
+///
+/// # Op::Caret&emsp;("compatible" updates)
+/// *Caret requirements allow parts that are **right of the first nonzero** part of the semver version to increase.*
+/// - &ensp;**`^I.J.K`**&ensp;(for I\>0)&emsp;&mdash;&emsp;equivalent to `>=I.J.K, <(I+1).0.0`
+/// - &ensp;**`^0.J.K`**&ensp;(for J\>0)&emsp;&mdash;&emsp;equivalent to `>=0.J.K, <0.(J+1).0`
+/// - &ensp;**`^0.0.K`**&emsp;&mdash;&emsp;equivalent to `=0.0.K`
+/// - &ensp;**`^I.J`**&ensp;(for I\>0 or J\>0)&emsp;&mdash;&emsp;equivalent to `^I.J.0`
+/// - &ensp;**`^0.0`**&emsp;&mdash;&emsp;equivalent to `=0.0`
+/// - &ensp;**`^I`**&emsp;&mdash;&emsp;equivalent to `=I`
+///
+/// # Op::Wildcard
+/// - &ensp;**`I.J.*`**&emsp;&mdash;&emsp;equivalent to `=I.J`
+/// - &ensp;**`I.*`**&ensp;or&ensp;**`I.*.*`**&emsp;&mdash;&emsp;equivalent to `=I`
+#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)]
+#[cfg_attr(not(no_non_exhaustive), non_exhaustive)]
+pub enum Op {
+    Exact,
+    Greater,
+    GreaterEq,
+    Less,
+    LessEq,
+    Tilde,
+    Caret,
+    Wildcard,
+
+    #[cfg(no_non_exhaustive)] // rustc <1.40
+    #[doc(hidden)]
+    __NonExhaustive,
+}
+
+/// Optional pre-release identifier on a version string. This comes after `-` in
+/// a SemVer version, like `1.0.0-alpha.1`
+///
+/// # Examples
+///
+/// Some real world pre-release idioms drawn from crates.io:
+///
+/// - **[mio]** <code>0.7.0-<b>alpha.1</b></code> &mdash; the most common style
+///   for numbering pre-releases.
+///
+/// - **[pest]** <code>1.0.0-<b>beta.8</b></code>,&ensp;<code>1.0.0-<b>rc.0</b></code>
+///   &mdash; this crate makes a distinction between betas and release
+///   candidates.
+///
+/// - **[sassers]** <code>0.11.0-<b>shitshow</b></code> &mdash; ???.
+///
+/// - **[atomic-utils]** <code>0.0.0-<b>reserved</b></code> &mdash; a squatted
+///   crate name.
+///
+/// [mio]: https://crates.io/crates/mio
+/// [pest]: https://crates.io/crates/pest
+/// [atomic-utils]: https://crates.io/crates/atomic-utils
+/// [sassers]: https://crates.io/crates/sassers
+///
+/// *Tip:* Be aware that if you are planning to number your own pre-releases,
+/// you should prefer to separate the numeric part from any non-numeric
+/// identifiers by using a dot in between. That is, prefer pre-releases
+/// `alpha.1`, `alpha.2`, etc rather than `alpha1`, `alpha2` etc. The SemVer
+/// spec's rule for pre-release precedence has special treatment of numeric
+/// components in the pre-release string, but only if there are no non-digit
+/// characters in the same dot-separated component. So you'd have `alpha.2` &lt;
+/// `alpha.11` as intended, but `alpha11` &lt; `alpha2`.
+///
+/// # Syntax
+///
+/// Pre-release strings are a series of dot separated identifiers immediately
+/// following the patch version. Identifiers must comprise only ASCII
+/// alphanumerics and hyphens: `0-9`, `A-Z`, `a-z`, `-`. Identifiers must not be
+/// empty. Numeric identifiers must not include leading zeros.
+///
+/// # Total ordering
+///
+/// Pre-releases have a total order defined by the SemVer spec. It uses
+/// lexicographic ordering of dot-separated components. Identifiers consisting
+/// of only digits are compared numerically. Otherwise, identifiers are compared
+/// in ASCII sort order. Any numeric identifier is always less than any
+/// non-numeric identifier.
+///
+/// Example:&ensp;`alpha`&ensp;&lt;&ensp;`alpha.85`&ensp;&lt;&ensp;`alpha.90`&ensp;&lt;&ensp;`alpha.200`&ensp;&lt;&ensp;`alpha.0a`&ensp;&lt;&ensp;`alpha.1a0`&ensp;&lt;&ensp;`alpha.a`&ensp;&lt;&ensp;`beta`
+#[derive(Default, Clone, Eq, PartialEq, Hash)]
+pub struct Prerelease {
+    identifier: Identifier,
+}
+
+/// Optional build metadata identifier. This comes after `+` in a SemVer
+/// version, as in `0.8.1+zstd.1.5.0`.
+///
+/// # Examples
+///
+/// Some real world build metadata idioms drawn from crates.io:
+///
+/// - **[libgit2-sys]** <code>0.12.20+<b>1.1.0</b></code> &mdash; for this
+///   crate, the build metadata indicates the version of the C libgit2 library
+///   that the Rust crate is built against.
+///
+/// - **[mashup]** <code>0.1.13+<b>deprecated</b></code> &mdash; just the word
+///   "deprecated" for a crate that has been superseded by another. Eventually
+///   people will take notice of this in Cargo's build output where it lists the
+///   crates being compiled.
+///
+/// - **[google-bigquery2]** <code>2.0.4+<b>20210327</b></code> &mdash; this
+///   library is automatically generated from an official API schema, and the
+///   build metadata indicates the date on which that schema was last captured.
+///
+/// - **[fbthrift-git]** <code>0.0.6+<b>c7fcc0e</b></code> &mdash; this crate is
+///   published from snapshots of a big company monorepo. In monorepo
+///   development, there is no concept of versions, and all downstream code is
+///   just updated atomically in the same commit that breaking changes to a
+///   library are landed. Therefore for crates.io purposes, every published
+///   version must be assumed to be incompatible with the previous. The build
+///   metadata provides the source control hash of the snapshotted code.
+///
+/// [libgit2-sys]: https://crates.io/crates/libgit2-sys
+/// [mashup]: https://crates.io/crates/mashup
+/// [google-bigquery2]: https://crates.io/crates/google-bigquery2
+/// [fbthrift-git]: https://crates.io/crates/fbthrift-git
+///
+/// # Syntax
+///
+/// Build metadata is a series of dot separated identifiers immediately
+/// following the patch or pre-release version. Identifiers must comprise only
+/// ASCII alphanumerics and hyphens: `0-9`, `A-Z`, `a-z`, `-`. Identifiers must
+/// not be empty. Leading zeros *are* allowed, unlike any other place in the
+/// SemVer grammar.
+///
+/// # Total ordering
+///
+/// Build metadata is ignored in evaluating `VersionReq`; it plays no role in
+/// whether a `Version` matches any one of the comparison operators.
+///
+/// However for comparing build metadatas among one another, they do have a
+/// total order which is determined by lexicographic ordering of dot-separated
+/// components. Identifiers consisting of only digits are compared numerically.
+/// Otherwise, identifiers are compared in ASCII sort order. Any numeric
+/// identifier is always less than any non-numeric identifier.
+///
+/// Example:&ensp;`demo`&ensp;&lt;&ensp;`demo.85`&ensp;&lt;&ensp;`demo.90`&ensp;&lt;&ensp;`demo.090`&ensp;&lt;&ensp;`demo.200`&ensp;&lt;&ensp;`demo.1a0`&ensp;&lt;&ensp;`demo.a`&ensp;&lt;&ensp;`memo`
+#[derive(Default, Clone, Eq, PartialEq, Hash)]
+pub struct BuildMetadata {
+    identifier: Identifier,
+}
+
+impl Version {
+    /// Create `Version` with an empty pre-release and build metadata.
+    ///
+    /// Equivalent to:
+    ///
+    /// ```
+    /// # use semver::{BuildMetadata, Prerelease, Version};
+    /// #
+    /// # const fn new(major: u64, minor: u64, patch: u64) -> Version {
+    /// Version {
+    ///     major,
+    ///     minor,
+    ///     patch,
+    ///     pre: Prerelease::EMPTY,
+    ///     build: BuildMetadata::EMPTY,
+    /// }
+    /// # }
+    /// ```
+    pub const fn new(major: u64, minor: u64, patch: u64) -> Self {
+        Version {
+            major,
+            minor,
+            patch,
+            pre: Prerelease::EMPTY,
+            build: BuildMetadata::EMPTY,
+        }
+    }
+
+    /// Create `Version` by parsing from string representation.
+    ///
+    /// # Errors
+    ///
+    /// Possible reasons for the parse to fail include:
+    ///
+    /// - `1.0` &mdash; too few numeric components. A SemVer version must have
+    ///   exactly three. If you are looking at something that has fewer than
+    ///   three numbers in it, it's possible it is a `VersionReq` instead (with
+    ///   an implicit default `^` comparison operator).
+    ///
+    /// - `1.0.01` &mdash; a numeric component has a leading zero.
+    ///
+    /// - `1.0.unknown` &mdash; unexpected character in one of the components.
+    ///
+    /// - `1.0.0-` or `1.0.0+` &mdash; the pre-release or build metadata are
+    ///   indicated present but empty.
+    ///
+    /// - `1.0.0-alpha_123` &mdash; pre-release or build metadata have something
+    ///   outside the allowed characters, which are `0-9`, `A-Z`, `a-z`, `-`,
+    ///   and `.` (dot).
+    ///
+    /// - `23456789999999999999.0.0` &mdash; overflow of a u64.
+    pub fn parse(text: &str) -> Result<Self, Error> {
+        Version::from_str(text)
+    }
+
+    /// Compare the major, minor, patch, and pre-release value of two versions,
+    /// disregarding build metadata. Versions that differ only in build metadata
+    /// are considered equal. This comparison is what the SemVer spec refers to
+    /// as "precedence".
+    ///
+    /// # Example
+    ///
+    /// ```
+    /// use semver::Version;
+    ///
+    /// let mut versions = [
+    ///     "1.20.0+c144a98".parse::<Version>().unwrap(),
+    ///     "1.20.0".parse().unwrap(),
+    ///     "1.0.0".parse().unwrap(),
+    ///     "1.0.0-alpha".parse().unwrap(),
+    ///     "1.20.0+bc17664".parse().unwrap(),
+    /// ];
+    ///
+    /// // This is a stable sort, so it preserves the relative order of equal
+    /// // elements. The three 1.20.0 versions differ only in build metadata so
+    /// // they are not reordered relative to one another.
+    /// versions.sort_by(Version::cmp_precedence);
+    /// assert_eq!(versions, [
+    ///     "1.0.0-alpha".parse().unwrap(),
+    ///     "1.0.0".parse().unwrap(),
+    ///     "1.20.0+c144a98".parse().unwrap(),
+    ///     "1.20.0".parse().unwrap(),
+    ///     "1.20.0+bc17664".parse().unwrap(),
+    /// ]);
+    ///
+    /// // Totally order the versions, including comparing the build metadata.
+    /// versions.sort();
+    /// assert_eq!(versions, [
+    ///     "1.0.0-alpha".parse().unwrap(),
+    ///     "1.0.0".parse().unwrap(),
+    ///     "1.20.0".parse().unwrap(),
+    ///     "1.20.0+bc17664".parse().unwrap(),
+    ///     "1.20.0+c144a98".parse().unwrap(),
+    /// ]);
+    /// ```
+    pub fn cmp_precedence(&self, other: &Self) -> Ordering {
+        Ord::cmp(
+            &(self.major, self.minor, self.patch, &self.pre),
+            &(other.major, other.minor, other.patch, &other.pre),
+        )
+    }
+}
+
+impl VersionReq {
+    /// A `VersionReq` with no constraint on the version numbers it matches.
+    /// Equivalent to `VersionReq::parse("*").unwrap()`.
+    ///
+    /// In terms of comparators this is equivalent to `>=0.0.0`.
+    ///
+    /// Counterintuitively a `*` VersionReq does not match every possible
+    /// version number. In particular, in order for *any* `VersionReq` to match
+    /// a pre-release version, the `VersionReq` must contain at least one
+    /// `Comparator` that has an explicit major, minor, and patch version
+    /// identical to the pre-release being matched, and that has a nonempty
+    /// pre-release component. Since `*` is not written with an explicit major,
+    /// minor, and patch version, and does not contain a nonempty pre-release
+    /// component, it does not match any pre-release versions.
+    #[cfg(not(no_const_vec_new))] // rustc <1.39
+    pub const STAR: Self = VersionReq {
+        comparators: Vec::new(),
+    };
+
+    /// Create `VersionReq` by parsing from string representation.
+    ///
+    /// # Errors
+    ///
+    /// Possible reasons for the parse to fail include:
+    ///
+    /// - `>a.b` &mdash; unexpected characters in the partial version.
+    ///
+    /// - `@1.0.0` &mdash; unrecognized comparison operator.
+    ///
+    /// - `^1.0.0, ` &mdash; unexpected end of input.
+    ///
+    /// - `>=1.0 <2.0` &mdash; missing comma between comparators.
+    ///
+    /// - `*.*` &mdash; unsupported wildcard syntax.
+    pub fn parse(text: &str) -> Result<Self, Error> {
+        VersionReq::from_str(text)
+    }
+
+    /// Evaluate whether the given `Version` satisfies the version requirement
+    /// described by `self`.
+    pub fn matches(&self, version: &Version) -> bool {
+        eval::matches_req(self, version)
+    }
+}
+
+/// The default VersionReq is the same as [`VersionReq::STAR`].
+#[cfg(not(no_const_vec_new))]
+impl Default for VersionReq {
+    fn default() -> Self {
+        VersionReq::STAR
+    }
+}
+
+impl Comparator {
+    pub fn parse(text: &str) -> Result<Self, Error> {
+        Comparator::from_str(text)
+    }
+
+    pub fn matches(&self, version: &Version) -> bool {
+        eval::matches_comparator(self, version)
+    }
+}
+
+impl Prerelease {
+    pub const EMPTY: Self = Prerelease {
+        identifier: Identifier::empty(),
+    };
+
+    pub fn new(text: &str) -> Result<Self, Error> {
+        Prerelease::from_str(text)
+    }
+
+    pub fn as_str(&self) -> &str {
+        self.identifier.as_str()
+    }
+
+    pub fn is_empty(&self) -> bool {
+        self.identifier.is_empty()
+    }
+}
+
+impl BuildMetadata {
+    pub const EMPTY: Self = BuildMetadata {
+        identifier: Identifier::empty(),
+    };
+
+    pub fn new(text: &str) -> Result<Self, Error> {
+        BuildMetadata::from_str(text)
+    }
+
+    pub fn as_str(&self) -> &str {
+        self.identifier.as_str()
+    }
+
+    pub fn is_empty(&self) -> bool {
+        self.identifier.is_empty()
+    }
+}
diff --git a/crates/semver/src/parse.rs b/crates/semver/src/parse.rs
new file mode 100644
index 0000000..e92d87a
--- /dev/null
+++ b/crates/semver/src/parse.rs
@@ -0,0 +1,409 @@
+use crate::backport::*;
+use crate::error::{ErrorKind, Position};
+use crate::identifier::Identifier;
+use crate::{BuildMetadata, Comparator, Op, Prerelease, Version, VersionReq};
+use core::str::FromStr;
+
+/// Error parsing a SemVer version or version requirement.
+///
+/// # Example
+///
+/// ```
+/// use semver::Version;
+///
+/// fn main() {
+///     let err = Version::parse("1.q.r").unwrap_err();
+///
+///     // "unexpected character 'q' while parsing minor version number"
+///     eprintln!("{}", err);
+/// }
+/// ```
+pub struct Error {
+    pub(crate) kind: ErrorKind,
+}
+
+impl FromStr for Version {
+    type Err = Error;
+
+    fn from_str(text: &str) -> Result<Self, Self::Err> {
+        if text.is_empty() {
+            return Err(Error::new(ErrorKind::Empty));
+        }
+
+        let mut pos = Position::Major;
+        let (major, text) = numeric_identifier(text, pos)?;
+        let text = dot(text, pos)?;
+
+        pos = Position::Minor;
+        let (minor, text) = numeric_identifier(text, pos)?;
+        let text = dot(text, pos)?;
+
+        pos = Position::Patch;
+        let (patch, text) = numeric_identifier(text, pos)?;
+
+        if text.is_empty() {
+            return Ok(Version::new(major, minor, patch));
+        }
+
+        let (pre, text) = if let Some(text) = text.strip_prefix('-') {
+            pos = Position::Pre;
+            let (pre, text) = prerelease_identifier(text)?;
+            if pre.is_empty() {
+                return Err(Error::new(ErrorKind::EmptySegment(pos)));
+            }
+            (pre, text)
+        } else {
+            (Prerelease::EMPTY, text)
+        };
+
+        let (build, text) = if let Some(text) = text.strip_prefix('+') {
+            pos = Position::Build;
+            let (build, text) = build_identifier(text)?;
+            if build.is_empty() {
+                return Err(Error::new(ErrorKind::EmptySegment(pos)));
+            }
+            (build, text)
+        } else {
+            (BuildMetadata::EMPTY, text)
+        };
+
+        if let Some(unexpected) = text.chars().next() {
+            return Err(Error::new(ErrorKind::UnexpectedCharAfter(pos, unexpected)));
+        }
+
+        Ok(Version {
+            major,
+            minor,
+            patch,
+            pre,
+            build,
+        })
+    }
+}
+
+impl FromStr for VersionReq {
+    type Err = Error;
+
+    fn from_str(text: &str) -> Result<Self, Self::Err> {
+        let text = text.trim_start_matches(' ');
+        if let Some((ch, text)) = wildcard(text) {
+            let rest = text.trim_start_matches(' ');
+            if rest.is_empty() {
+                #[cfg(not(no_const_vec_new))]
+                return Ok(VersionReq::STAR);
+                #[cfg(no_const_vec_new)] // rustc <1.39
+                return Ok(VersionReq {
+                    comparators: Vec::new(),
+                });
+            } else if rest.starts_with(',') {
+                return Err(Error::new(ErrorKind::WildcardNotTheOnlyComparator(ch)));
+            } else {
+                return Err(Error::new(ErrorKind::UnexpectedAfterWildcard));
+            }
+        }
+
+        let depth = 0;
+        let mut comparators = Vec::new();
+        let len = version_req(text, &mut comparators, depth)?;
+        unsafe { comparators.set_len(len) }
+        Ok(VersionReq { comparators })
+    }
+}
+
+impl FromStr for Comparator {
+    type Err = Error;
+
+    fn from_str(text: &str) -> Result<Self, Self::Err> {
+        let text = text.trim_start_matches(' ');
+        let (comparator, pos, rest) = comparator(text)?;
+        if !rest.is_empty() {
+            let unexpected = rest.chars().next().unwrap();
+            return Err(Error::new(ErrorKind::UnexpectedCharAfter(pos, unexpected)));
+        }
+        Ok(comparator)
+    }
+}
+
+impl FromStr for Prerelease {
+    type Err = Error;
+
+    fn from_str(text: &str) -> Result<Self, Self::Err> {
+        let (pre, rest) = prerelease_identifier(text)?;
+        if !rest.is_empty() {
+            return Err(Error::new(ErrorKind::IllegalCharacter(Position::Pre)));
+        }
+        Ok(pre)
+    }
+}
+
+impl FromStr for BuildMetadata {
+    type Err = Error;
+
+    fn from_str(text: &str) -> Result<Self, Self::Err> {
+        let (build, rest) = build_identifier(text)?;
+        if !rest.is_empty() {
+            return Err(Error::new(ErrorKind::IllegalCharacter(Position::Build)));
+        }
+        Ok(build)
+    }
+}
+
+impl Error {
+    fn new(kind: ErrorKind) -> Self {
+        Error { kind }
+    }
+}
+
+impl Op {
+    const DEFAULT: Self = Op::Caret;
+}
+
+fn numeric_identifier(input: &str, pos: Position) -> Result<(u64, &str), Error> {
+    let mut len = 0;
+    let mut value = 0u64;
+
+    while let Some(&digit) = input.as_bytes().get(len) {
+        if digit < b'0' || digit > b'9' {
+            break;
+        }
+        if value == 0 && len > 0 {
+            return Err(Error::new(ErrorKind::LeadingZero(pos)));
+        }
+        match value
+            .checked_mul(10)
+            .and_then(|value| value.checked_add((digit - b'0') as u64))
+        {
+            Some(sum) => value = sum,
+            None => return Err(Error::new(ErrorKind::Overflow(pos))),
+        }
+        len += 1;
+    }
+
+    if len > 0 {
+        Ok((value, &input[len..]))
+    } else if let Some(unexpected) = input[len..].chars().next() {
+        Err(Error::new(ErrorKind::UnexpectedChar(pos, unexpected)))
+    } else {
+        Err(Error::new(ErrorKind::UnexpectedEnd(pos)))
+    }
+}
+
+fn wildcard(input: &str) -> Option<(char, &str)> {
+    if let Some(rest) = input.strip_prefix('*') {
+        Some(('*', rest))
+    } else if let Some(rest) = input.strip_prefix('x') {
+        Some(('x', rest))
+    } else if let Some(rest) = input.strip_prefix('X') {
+        Some(('X', rest))
+    } else {
+        None
+    }
+}
+
+fn dot(input: &str, pos: Position) -> Result<&str, Error> {
+    if let Some(rest) = input.strip_prefix('.') {
+        Ok(rest)
+    } else if let Some(unexpected) = input.chars().next() {
+        Err(Error::new(ErrorKind::UnexpectedCharAfter(pos, unexpected)))
+    } else {
+        Err(Error::new(ErrorKind::UnexpectedEnd(pos)))
+    }
+}
+
+fn prerelease_identifier(input: &str) -> Result<(Prerelease, &str), Error> {
+    let (string, rest) = identifier(input, Position::Pre)?;
+    let identifier = unsafe { Identifier::new_unchecked(string) };
+    Ok((Prerelease { identifier }, rest))
+}
+
+fn build_identifier(input: &str) -> Result<(BuildMetadata, &str), Error> {
+    let (string, rest) = identifier(input, Position::Build)?;
+    let identifier = unsafe { Identifier::new_unchecked(string) };
+    Ok((BuildMetadata { identifier }, rest))
+}
+
+fn identifier(input: &str, pos: Position) -> Result<(&str, &str), Error> {
+    let mut accumulated_len = 0;
+    let mut segment_len = 0;
+    let mut segment_has_nondigit = false;
+
+    loop {
+        match input.as_bytes().get(accumulated_len + segment_len) {
+            Some(b'A'..=b'Z') | Some(b'a'..=b'z') | Some(b'-') => {
+                segment_len += 1;
+                segment_has_nondigit = true;
+            }
+            Some(b'0'..=b'9') => {
+                segment_len += 1;
+            }
+            boundary => {
+                if segment_len == 0 {
+                    if accumulated_len == 0 && boundary != Some(&b'.') {
+                        return Ok(("", input));
+                    } else {
+                        return Err(Error::new(ErrorKind::EmptySegment(pos)));
+                    }
+                }
+                if pos == Position::Pre
+                    && segment_len > 1
+                    && !segment_has_nondigit
+                    && input[accumulated_len..].starts_with('0')
+                {
+                    return Err(Error::new(ErrorKind::LeadingZero(pos)));
+                }
+                accumulated_len += segment_len;
+                if boundary == Some(&b'.') {
+                    accumulated_len += 1;
+                    segment_len = 0;
+                    segment_has_nondigit = false;
+                } else {
+                    return Ok(input.split_at(accumulated_len));
+                }
+            }
+        }
+    }
+}
+
+fn op(input: &str) -> (Op, &str) {
+    let bytes = input.as_bytes();
+    if bytes.first() == Some(&b'=') {
+        (Op::Exact, &input[1..])
+    } else if bytes.first() == Some(&b'>') {
+        if bytes.get(1) == Some(&b'=') {
+            (Op::GreaterEq, &input[2..])
+        } else {
+            (Op::Greater, &input[1..])
+        }
+    } else if bytes.first() == Some(&b'<') {
+        if bytes.get(1) == Some(&b'=') {
+            (Op::LessEq, &input[2..])
+        } else {
+            (Op::Less, &input[1..])
+        }
+    } else if bytes.first() == Some(&b'~') {
+        (Op::Tilde, &input[1..])
+    } else if bytes.first() == Some(&b'^') {
+        (Op::Caret, &input[1..])
+    } else {
+        (Op::DEFAULT, input)
+    }
+}
+
+fn comparator(input: &str) -> Result<(Comparator, Position, &str), Error> {
+    let (mut op, text) = op(input);
+    let default_op = input.len() == text.len();
+    let text = text.trim_start_matches(' ');
+
+    let mut pos = Position::Major;
+    let (major, text) = numeric_identifier(text, pos)?;
+    let mut has_wildcard = false;
+
+    let (minor, text) = if let Some(text) = text.strip_prefix('.') {
+        pos = Position::Minor;
+        if let Some((_, text)) = wildcard(text) {
+            has_wildcard = true;
+            if default_op {
+                op = Op::Wildcard;
+            }
+            (None, text)
+        } else {
+            let (minor, text) = numeric_identifier(text, pos)?;
+            (Some(minor), text)
+        }
+    } else {
+        (None, text)
+    };
+
+    let (patch, text) = if let Some(text) = text.strip_prefix('.') {
+        pos = Position::Patch;
+        if let Some((_, text)) = wildcard(text) {
+            if default_op {
+                op = Op::Wildcard;
+            }
+            (None, text)
+        } else if has_wildcard {
+            return Err(Error::new(ErrorKind::UnexpectedAfterWildcard));
+        } else {
+            let (patch, text) = numeric_identifier(text, pos)?;
+            (Some(patch), text)
+        }
+    } else {
+        (None, text)
+    };
+
+    let (pre, text) = if patch.is_some() && text.starts_with('-') {
+        pos = Position::Pre;
+        let text = &text[1..];
+        let (pre, text) = prerelease_identifier(text)?;
+        if pre.is_empty() {
+            return Err(Error::new(ErrorKind::EmptySegment(pos)));
+        }
+        (pre, text)
+    } else {
+        (Prerelease::EMPTY, text)
+    };
+
+    let text = if patch.is_some() && text.starts_with('+') {
+        pos = Position::Build;
+        let text = &text[1..];
+        let (build, text) = build_identifier(text)?;
+        if build.is_empty() {
+            return Err(Error::new(ErrorKind::EmptySegment(pos)));
+        }
+        text
+    } else {
+        text
+    };
+
+    let text = text.trim_start_matches(' ');
+
+    let comparator = Comparator {
+        op,
+        major,
+        minor,
+        patch,
+        pre,
+    };
+
+    Ok((comparator, pos, text))
+}
+
+fn version_req(input: &str, out: &mut Vec<Comparator>, depth: usize) -> Result<usize, Error> {
+    let (comparator, pos, text) = match comparator(input) {
+        Ok(success) => success,
+        Err(mut error) => {
+            if let Some((ch, mut rest)) = wildcard(input) {
+                rest = rest.trim_start_matches(' ');
+                if rest.is_empty() || rest.starts_with(',') {
+                    error.kind = ErrorKind::WildcardNotTheOnlyComparator(ch);
+                }
+            }
+            return Err(error);
+        }
+    };
+
+    if text.is_empty() {
+        out.reserve_exact(depth + 1);
+        unsafe { out.as_mut_ptr().add(depth).write(comparator) }
+        return Ok(depth + 1);
+    }
+
+    let text = if let Some(text) = text.strip_prefix(',') {
+        text.trim_start_matches(' ')
+    } else {
+        let unexpected = text.chars().next().unwrap();
+        return Err(Error::new(ErrorKind::ExpectedCommaFound(pos, unexpected)));
+    };
+
+    const MAX_COMPARATORS: usize = 32;
+    if depth + 1 == MAX_COMPARATORS {
+        return Err(Error::new(ErrorKind::ExcessiveComparators));
+    }
+
+    // Recurse to collect parsed Comparator objects on the stack. We perform a
+    // single allocation to allocate exactly the right sized Vec only once the
+    // total number of comparators is known.
+    let len = version_req(text, out, depth + 1)?;
+    unsafe { out.as_mut_ptr().add(depth).write(comparator) }
+    Ok(len)
+}
diff --git a/crates/semver/src/serde.rs b/crates/semver/src/serde.rs
new file mode 100644
index 0000000..1fcc7d8
--- /dev/null
+++ b/crates/semver/src/serde.rs
@@ -0,0 +1,109 @@
+use crate::{Comparator, Version, VersionReq};
+use core::fmt;
+use serde::de::{Deserialize, Deserializer, Error, Visitor};
+use serde::ser::{Serialize, Serializer};
+
+impl Serialize for Version {
+    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+    where
+        S: Serializer,
+    {
+        serializer.collect_str(self)
+    }
+}
+
+impl Serialize for VersionReq {
+    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+    where
+        S: Serializer,
+    {
+        serializer.collect_str(self)
+    }
+}
+
+impl Serialize for Comparator {
+    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+    where
+        S: Serializer,
+    {
+        serializer.collect_str(self)
+    }
+}
+
+impl<'de> Deserialize<'de> for Version {
+    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+    where
+        D: Deserializer<'de>,
+    {
+        struct VersionVisitor;
+
+        impl<'de> Visitor<'de> for VersionVisitor {
+            type Value = Version;
+
+            fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+                formatter.write_str("semver version")
+            }
+
+            fn visit_str<E>(self, string: &str) -> Result<Self::Value, E>
+            where
+                E: Error,
+            {
+                string.parse().map_err(Error::custom)
+            }
+        }
+
+        deserializer.deserialize_str(VersionVisitor)
+    }
+}
+
+impl<'de> Deserialize<'de> for VersionReq {
+    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+    where
+        D: Deserializer<'de>,
+    {
+        struct VersionReqVisitor;
+
+        impl<'de> Visitor<'de> for VersionReqVisitor {
+            type Value = VersionReq;
+
+            fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+                formatter.write_str("semver version")
+            }
+
+            fn visit_str<E>(self, string: &str) -> Result<Self::Value, E>
+            where
+                E: Error,
+            {
+                string.parse().map_err(Error::custom)
+            }
+        }
+
+        deserializer.deserialize_str(VersionReqVisitor)
+    }
+}
+
+impl<'de> Deserialize<'de> for Comparator {
+    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+    where
+        D: Deserializer<'de>,
+    {
+        struct ComparatorVisitor;
+
+        impl<'de> Visitor<'de> for ComparatorVisitor {
+            type Value = Comparator;
+
+            fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+                formatter.write_str("semver comparator")
+            }
+
+            fn visit_str<E>(self, string: &str) -> Result<Self::Value, E>
+            where
+                E: Error,
+            {
+                string.parse().map_err(Error::custom)
+            }
+        }
+
+        deserializer.deserialize_str(ComparatorVisitor)
+    }
+}
diff --git a/crates/semver/tests/node/mod.rs b/crates/semver/tests/node/mod.rs
new file mode 100644
index 0000000..eb50673
--- /dev/null
+++ b/crates/semver/tests/node/mod.rs
@@ -0,0 +1,43 @@
+#![cfg(test_node_semver)]
+
+use semver::Version;
+use std::fmt::{self, Display};
+use std::process::Command;
+
+#[derive(Default, Eq, PartialEq, Hash, Debug)]
+pub(super) struct VersionReq(semver::VersionReq);
+
+impl VersionReq {
+    pub(super) const STAR: Self = VersionReq(semver::VersionReq::STAR);
+
+    pub(super) fn matches(&self, version: &Version) -> bool {
+        let out = Command::new("node")
+            .arg("-e")
+            .arg(format!(
+                "console.log(require('semver').satisfies('{}', '{}'))",
+                version,
+                self.to_string().replace(',', ""),
+            ))
+            .output()
+            .unwrap();
+        if out.stdout == b"true\n" {
+            true
+        } else if out.stdout == b"false\n" {
+            false
+        } else {
+            let s = String::from_utf8_lossy(&out.stdout) + String::from_utf8_lossy(&out.stderr);
+            panic!("unexpected output: {}", s);
+        }
+    }
+}
+
+impl Display for VersionReq {
+    fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+        Display::fmt(&self.0, formatter)
+    }
+}
+
+#[cfg_attr(not(no_track_caller), track_caller)]
+pub(super) fn req(text: &str) -> VersionReq {
+    VersionReq(crate::util::req(text))
+}
diff --git a/crates/semver/tests/test_autotrait.rs b/crates/semver/tests/test_autotrait.rs
new file mode 100644
index 0000000..5d16689
--- /dev/null
+++ b/crates/semver/tests/test_autotrait.rs
@@ -0,0 +1,14 @@
+#![allow(clippy::extra_unused_type_parameters)]
+
+fn assert_send_sync<T: Send + Sync>() {}
+
+#[test]
+fn test() {
+    assert_send_sync::<semver::BuildMetadata>();
+    assert_send_sync::<semver::Comparator>();
+    assert_send_sync::<semver::Error>();
+    assert_send_sync::<semver::Prerelease>();
+    assert_send_sync::<semver::Version>();
+    assert_send_sync::<semver::VersionReq>();
+    assert_send_sync::<semver::Op>();
+}
diff --git a/crates/semver/tests/test_identifier.rs b/crates/semver/tests/test_identifier.rs
new file mode 100644
index 0000000..40d8596
--- /dev/null
+++ b/crates/semver/tests/test_identifier.rs
@@ -0,0 +1,51 @@
+#![allow(
+    clippy::eq_op,
+    clippy::needless_pass_by_value,
+    clippy::toplevel_ref_arg,
+    clippy::wildcard_imports
+)]
+
+mod util;
+
+use crate::util::*;
+use semver::Prerelease;
+
+#[test]
+fn test_new() {
+    fn test(identifier: Prerelease, expected: &str) {
+        assert_eq!(identifier.is_empty(), expected.is_empty());
+        assert_eq!(identifier.len(), expected.len());
+        assert_eq!(identifier.as_str(), expected);
+        assert_eq!(identifier, identifier);
+        assert_eq!(identifier, identifier.clone());
+    }
+
+    let ref mut string = String::new();
+    let limit = if cfg!(miri) { 40 } else { 280 }; // miri is slow
+    for _ in 0..limit {
+        test(prerelease(string), string);
+        string.push('1');
+    }
+
+    if !cfg!(miri) {
+        let ref string = string.repeat(20000);
+        test(prerelease(string), string);
+    }
+}
+
+#[test]
+fn test_eq() {
+    assert_eq!(prerelease("-"), prerelease("-"));
+    assert_ne!(prerelease("a"), prerelease("aa"));
+    assert_ne!(prerelease("aa"), prerelease("a"));
+    assert_ne!(prerelease("aaaaaaaaa"), prerelease("a"));
+    assert_ne!(prerelease("a"), prerelease("aaaaaaaaa"));
+    assert_ne!(prerelease("aaaaaaaaa"), prerelease("bbbbbbbbb"));
+    assert_ne!(build_metadata("1"), build_metadata("001"));
+}
+
+#[test]
+fn test_prerelease() {
+    let err = prerelease_err("1.b\0");
+    assert_to_string(err, "unexpected character in pre-release identifier");
+}
diff --git a/crates/semver/tests/test_version.rs b/crates/semver/tests/test_version.rs
new file mode 100644
index 0000000..991087f
--- /dev/null
+++ b/crates/semver/tests/test_version.rs
@@ -0,0 +1,250 @@
+#![allow(
+    clippy::nonminimal_bool,
+    clippy::too_many_lines,
+    clippy::wildcard_imports
+)]
+
+mod util;
+
+use crate::util::*;
+use semver::{BuildMetadata, Prerelease, Version};
+
+#[test]
+fn test_parse() {
+    let err = version_err("");
+    assert_to_string(err, "empty string, expected a semver version");
+
+    let err = version_err("  ");
+    assert_to_string(
+        err,
+        "unexpected character ' ' while parsing major version number",
+    );
+
+    let err = version_err("1");
+    assert_to_string(
+        err,
+        "unexpected end of input while parsing major version number",
+    );
+
+    let err = version_err("1.2");
+    assert_to_string(
+        err,
+        "unexpected end of input while parsing minor version number",
+    );
+
+    let err = version_err("1.2.3-");
+    assert_to_string(err, "empty identifier segment in pre-release identifier");
+
+    let err = version_err("a.b.c");
+    assert_to_string(
+        err,
+        "unexpected character 'a' while parsing major version number",
+    );
+
+    let err = version_err("1.2.3 abc");
+    assert_to_string(err, "unexpected character ' ' after patch version number");
+
+    let err = version_err("1.2.3-01");
+    assert_to_string(err, "invalid leading zero in pre-release identifier");
+
+    let err = version_err("1.2.3++");
+    assert_to_string(err, "empty identifier segment in build metadata");
+
+    let err = version_err("07");
+    assert_to_string(err, "invalid leading zero in major version number");
+
+    let err = version_err("111111111111111111111.0.0");
+    assert_to_string(err, "value of major version number exceeds u64::MAX");
+
+    let err = version_err("8\0");
+    assert_to_string(err, "unexpected character '\\0' after major version number");
+
+    let parsed = version("1.2.3");
+    let expected = Version::new(1, 2, 3);
+    assert_eq!(parsed, expected);
+    let expected = Version {
+        major: 1,
+        minor: 2,
+        patch: 3,
+        pre: Prerelease::EMPTY,
+        build: BuildMetadata::EMPTY,
+    };
+    assert_eq!(parsed, expected);
+
+    let parsed = version("1.2.3-alpha1");
+    let expected = Version {
+        major: 1,
+        minor: 2,
+        patch: 3,
+        pre: prerelease("alpha1"),
+        build: BuildMetadata::EMPTY,
+    };
+    assert_eq!(parsed, expected);
+
+    let parsed = version("1.2.3+build5");
+    let expected = Version {
+        major: 1,
+        minor: 2,
+        patch: 3,
+        pre: Prerelease::EMPTY,
+        build: build_metadata("build5"),
+    };
+    assert_eq!(parsed, expected);
+
+    let parsed = version("1.2.3+5build");
+    let expected = Version {
+        major: 1,
+        minor: 2,
+        patch: 3,
+        pre: Prerelease::EMPTY,
+        build: build_metadata("5build"),
+    };
+    assert_eq!(parsed, expected);
+
+    let parsed = version("1.2.3-alpha1+build5");
+    let expected = Version {
+        major: 1,
+        minor: 2,
+        patch: 3,
+        pre: prerelease("alpha1"),
+        build: build_metadata("build5"),
+    };
+    assert_eq!(parsed, expected);
+
+    let parsed = version("1.2.3-1.alpha1.9+build5.7.3aedf");
+    let expected = Version {
+        major: 1,
+        minor: 2,
+        patch: 3,
+        pre: prerelease("1.alpha1.9"),
+        build: build_metadata("build5.7.3aedf"),
+    };
+    assert_eq!(parsed, expected);
+
+    let parsed = version("1.2.3-0a.alpha1.9+05build.7.3aedf");
+    let expected = Version {
+        major: 1,
+        minor: 2,
+        patch: 3,
+        pre: prerelease("0a.alpha1.9"),
+        build: build_metadata("05build.7.3aedf"),
+    };
+    assert_eq!(parsed, expected);
+
+    let parsed = version("0.4.0-beta.1+0851523");
+    let expected = Version {
+        major: 0,
+        minor: 4,
+        patch: 0,
+        pre: prerelease("beta.1"),
+        build: build_metadata("0851523"),
+    };
+    assert_eq!(parsed, expected);
+
+    // for https://nodejs.org/dist/index.json, where some older npm versions are "1.1.0-beta-10"
+    let parsed = version("1.1.0-beta-10");
+    let expected = Version {
+        major: 1,
+        minor: 1,
+        patch: 0,
+        pre: prerelease("beta-10"),
+        build: BuildMetadata::EMPTY,
+    };
+    assert_eq!(parsed, expected);
+}
+
+#[test]
+fn test_eq() {
+    assert_eq!(version("1.2.3"), version("1.2.3"));
+    assert_eq!(version("1.2.3-alpha1"), version("1.2.3-alpha1"));
+    assert_eq!(version("1.2.3+build.42"), version("1.2.3+build.42"));
+    assert_eq!(version("1.2.3-alpha1+42"), version("1.2.3-alpha1+42"));
+}
+
+#[test]
+fn test_ne() {
+    assert_ne!(version("0.0.0"), version("0.0.1"));
+    assert_ne!(version("0.0.0"), version("0.1.0"));
+    assert_ne!(version("0.0.0"), version("1.0.0"));
+    assert_ne!(version("1.2.3-alpha"), version("1.2.3-beta"));
+    assert_ne!(version("1.2.3+23"), version("1.2.3+42"));
+}
+
+#[test]
+fn test_display() {
+    assert_to_string(version("1.2.3"), "1.2.3");
+    assert_to_string(version("1.2.3-alpha1"), "1.2.3-alpha1");
+    assert_to_string(version("1.2.3+build.42"), "1.2.3+build.42");
+    assert_to_string(version("1.2.3-alpha1+42"), "1.2.3-alpha1+42");
+}
+
+#[test]
+fn test_lt() {
+    assert!(version("0.0.0") < version("1.2.3-alpha2"));
+    assert!(version("1.0.0") < version("1.2.3-alpha2"));
+    assert!(version("1.2.0") < version("1.2.3-alpha2"));
+    assert!(version("1.2.3-alpha1") < version("1.2.3"));
+    assert!(version("1.2.3-alpha1") < version("1.2.3-alpha2"));
+    assert!(!(version("1.2.3-alpha2") < version("1.2.3-alpha2")));
+    assert!(version("1.2.3+23") < version("1.2.3+42"));
+}
+
+#[test]
+fn test_le() {
+    assert!(version("0.0.0") <= version("1.2.3-alpha2"));
+    assert!(version("1.0.0") <= version("1.2.3-alpha2"));
+    assert!(version("1.2.0") <= version("1.2.3-alpha2"));
+    assert!(version("1.2.3-alpha1") <= version("1.2.3-alpha2"));
+    assert!(version("1.2.3-alpha2") <= version("1.2.3-alpha2"));
+    assert!(version("1.2.3+23") <= version("1.2.3+42"));
+}
+
+#[test]
+fn test_gt() {
+    assert!(version("1.2.3-alpha2") > version("0.0.0"));
+    assert!(version("1.2.3-alpha2") > version("1.0.0"));
+    assert!(version("1.2.3-alpha2") > version("1.2.0"));
+    assert!(version("1.2.3-alpha2") > version("1.2.3-alpha1"));
+    assert!(version("1.2.3") > version("1.2.3-alpha2"));
+    assert!(!(version("1.2.3-alpha2") > version("1.2.3-alpha2")));
+    assert!(!(version("1.2.3+23") > version("1.2.3+42")));
+}
+
+#[test]
+fn test_ge() {
+    assert!(version("1.2.3-alpha2") >= version("0.0.0"));
+    assert!(version("1.2.3-alpha2") >= version("1.0.0"));
+    assert!(version("1.2.3-alpha2") >= version("1.2.0"));
+    assert!(version("1.2.3-alpha2") >= version("1.2.3-alpha1"));
+    assert!(version("1.2.3-alpha2") >= version("1.2.3-alpha2"));
+    assert!(!(version("1.2.3+23") >= version("1.2.3+42")));
+}
+
+#[test]
+fn test_spec_order() {
+    let vs = [
+        "1.0.0-alpha",
+        "1.0.0-alpha.1",
+        "1.0.0-alpha.beta",
+        "1.0.0-beta",
+        "1.0.0-beta.2",
+        "1.0.0-beta.11",
+        "1.0.0-rc.1",
+        "1.0.0",
+    ];
+    let mut i = 1;
+    while i < vs.len() {
+        let a = version(vs[i - 1]);
+        let b = version(vs[i]);
+        assert!(a < b, "nope {:?} < {:?}", a, b);
+        i += 1;
+    }
+}
+
+#[test]
+fn test_align() {
+    let version = version("1.2.3-rc1");
+    assert_eq!("1.2.3-rc1           ", format!("{:20}", version));
+    assert_eq!("*****1.2.3-rc1******", format!("{:*^20}", version));
+    assert_eq!("           1.2.3-rc1", format!("{:>20}", version));
+}
diff --git a/crates/semver/tests/test_version_req.rs b/crates/semver/tests/test_version_req.rs
new file mode 100644
index 0000000..1ed2358
--- /dev/null
+++ b/crates/semver/tests/test_version_req.rs
@@ -0,0 +1,485 @@
+#![allow(
+    clippy::missing_panics_doc,
+    clippy::shadow_unrelated,
+    clippy::toplevel_ref_arg,
+    clippy::wildcard_imports
+)]
+
+mod node;
+mod util;
+
+use crate::util::*;
+use std::collections::hash_map::DefaultHasher;
+use std::hash::{Hash, Hasher};
+
+#[cfg(test_node_semver)]
+use node::{req, VersionReq};
+#[cfg(not(test_node_semver))]
+use semver::VersionReq;
+
+#[cfg_attr(not(no_track_caller), track_caller)]
+fn assert_match_all(req: &VersionReq, versions: &[&str]) {
+    for string in versions {
+        let parsed = version(string);
+        assert!(req.matches(&parsed), "did not match {}", string);
+    }
+}
+
+#[cfg_attr(not(no_track_caller), track_caller)]
+fn assert_match_none(req: &VersionReq, versions: &[&str]) {
+    for string in versions {
+        let parsed = version(string);
+        assert!(!req.matches(&parsed), "matched {}", string);
+    }
+}
+
+#[test]
+fn test_basic() {
+    let ref r = req("1.0.0");
+    assert_to_string(r, "^1.0.0");
+    assert_match_all(r, &["1.0.0", "1.1.0", "1.0.1"]);
+    assert_match_none(r, &["0.9.9", "0.10.0", "0.1.0", "1.0.0-pre", "1.0.1-pre"]);
+}
+
+#[test]
+#[cfg(not(no_const_vec_new))]
+fn test_default() {
+    let ref r = VersionReq::default();
+    assert_eq!(r, &VersionReq::STAR);
+}
+
+#[test]
+fn test_exact() {
+    let ref r = req("=1.0.0");
+    assert_to_string(r, "=1.0.0");
+    assert_match_all(r, &["1.0.0"]);
+    assert_match_none(r, &["1.0.1", "0.9.9", "0.10.0", "0.1.0", "1.0.0-pre"]);
+
+    let ref r = req("=0.9.0");
+    assert_to_string(r, "=0.9.0");
+    assert_match_all(r, &["0.9.0"]);
+    assert_match_none(r, &["0.9.1", "1.9.0", "0.0.9", "0.9.0-pre"]);
+
+    let ref r = req("=0.0.2");
+    assert_to_string(r, "=0.0.2");
+    assert_match_all(r, &["0.0.2"]);
+    assert_match_none(r, &["0.0.1", "0.0.3", "0.0.2-pre"]);
+
+    let ref r = req("=0.1.0-beta2.a");
+    assert_to_string(r, "=0.1.0-beta2.a");
+    assert_match_all(r, &["0.1.0-beta2.a"]);
+    assert_match_none(r, &["0.9.1", "0.1.0", "0.1.1-beta2.a", "0.1.0-beta2"]);
+
+    let ref r = req("=0.1.0+meta");
+    assert_to_string(r, "=0.1.0");
+    assert_match_all(r, &["0.1.0", "0.1.0+meta", "0.1.0+any"]);
+}
+
+#[test]
+pub fn test_greater_than() {
+    let ref r = req(">= 1.0.0");
+    assert_to_string(r, ">=1.0.0");
+    assert_match_all(r, &["1.0.0", "2.0.0"]);
+    assert_match_none(r, &["0.1.0", "0.0.1", "1.0.0-pre", "2.0.0-pre"]);
+
+    let ref r = req(">= 2.1.0-alpha2");
+    assert_to_string(r, ">=2.1.0-alpha2");
+    assert_match_all(r, &["2.1.0-alpha2", "2.1.0-alpha3", "2.1.0", "3.0.0"]);
+    assert_match_none(
+        r,
+        &["2.0.0", "2.1.0-alpha1", "2.0.0-alpha2", "3.0.0-alpha2"],
+    );
+}
+
+#[test]
+pub fn test_less_than() {
+    let ref r = req("< 1.0.0");
+    assert_to_string(r, "<1.0.0");
+    assert_match_all(r, &["0.1.0", "0.0.1"]);
+    assert_match_none(r, &["1.0.0", "1.0.0-beta", "1.0.1", "0.9.9-alpha"]);
+
+    let ref r = req("<= 2.1.0-alpha2");
+    assert_match_all(r, &["2.1.0-alpha2", "2.1.0-alpha1", "2.0.0", "1.0.0"]);
+    assert_match_none(
+        r,
+        &["2.1.0", "2.2.0-alpha1", "2.0.0-alpha2", "1.0.0-alpha2"],
+    );
+
+    let ref r = req(">1.0.0-alpha, <1.0.0");
+    assert_match_all(r, &["1.0.0-beta"]);
+
+    let ref r = req(">1.0.0-alpha, <1.0");
+    assert_match_none(r, &["1.0.0-beta"]);
+
+    let ref r = req(">1.0.0-alpha, <1");
+    assert_match_none(r, &["1.0.0-beta"]);
+}
+
+#[test]
+pub fn test_multiple() {
+    let ref r = req("> 0.0.9, <= 2.5.3");
+    assert_to_string(r, ">0.0.9, <=2.5.3");
+    assert_match_all(r, &["0.0.10", "1.0.0", "2.5.3"]);
+    assert_match_none(r, &["0.0.8", "2.5.4"]);
+
+    let ref r = req("0.3.0, 0.4.0");
+    assert_to_string(r, "^0.3.0, ^0.4.0");
+    assert_match_none(r, &["0.0.8", "0.3.0", "0.4.0"]);
+
+    let ref r = req("<= 0.2.0, >= 0.5.0");
+    assert_to_string(r, "<=0.2.0, >=0.5.0");
+    assert_match_none(r, &["0.0.8", "0.3.0", "0.5.1"]);
+
+    let ref r = req("0.1.0, 0.1.4, 0.1.6");
+    assert_to_string(r, "^0.1.0, ^0.1.4, ^0.1.6");
+    assert_match_all(r, &["0.1.6", "0.1.9"]);
+    assert_match_none(r, &["0.1.0", "0.1.4", "0.2.0"]);
+
+    let err = req_err("> 0.1.0,");
+    assert_to_string(
+        err,
+        "unexpected end of input while parsing major version number",
+    );
+
+    let err = req_err("> 0.3.0, ,");
+    assert_to_string(
+        err,
+        "unexpected character ',' while parsing major version number",
+    );
+
+    let ref r = req(">=0.5.1-alpha3, <0.6");
+    assert_to_string(r, ">=0.5.1-alpha3, <0.6");
+    assert_match_all(
+        r,
+        &[
+            "0.5.1-alpha3",
+            "0.5.1-alpha4",
+            "0.5.1-beta",
+            "0.5.1",
+            "0.5.5",
+        ],
+    );
+    assert_match_none(
+        r,
+        &["0.5.1-alpha1", "0.5.2-alpha3", "0.5.5-pre", "0.5.0-pre"],
+    );
+    assert_match_none(r, &["0.6.0", "0.6.0-pre"]);
+
+    // https://github.com/steveklabnik/semver/issues/56
+    let err = req_err("1.2.3 - 2.3.4");
+    assert_to_string(err, "expected comma after patch version number, found '-'");
+
+    let err = req_err(">1, >2, >3, >4, >5, >6, >7, >8, >9, >10, >11, >12, >13, >14, >15, >16, >17, >18, >19, >20, >21, >22, >23, >24, >25, >26, >27, >28, >29, >30, >31, >32, >33");
+    assert_to_string(err, "excessive number of version comparators");
+}
+
+#[test]
+pub fn test_whitespace_delimited_comparator_sets() {
+    // https://github.com/steveklabnik/semver/issues/55
+    let err = req_err("> 0.0.9 <= 2.5.3");
+    assert_to_string(err, "expected comma after patch version number, found '<'");
+}
+
+#[test]
+pub fn test_tilde() {
+    let ref r = req("~1");
+    assert_match_all(r, &["1.0.0", "1.0.1", "1.1.1"]);
+    assert_match_none(r, &["0.9.1", "2.9.0", "0.0.9"]);
+
+    let ref r = req("~1.2");
+    assert_match_all(r, &["1.2.0", "1.2.1"]);
+    assert_match_none(r, &["1.1.1", "1.3.0", "0.0.9"]);
+
+    let ref r = req("~1.2.2");
+    assert_match_all(r, &["1.2.2", "1.2.4"]);
+    assert_match_none(r, &["1.2.1", "1.9.0", "1.0.9", "2.0.1", "0.1.3"]);
+
+    let ref r = req("~1.2.3-beta.2");
+    assert_match_all(r, &["1.2.3", "1.2.4", "1.2.3-beta.2", "1.2.3-beta.4"]);
+    assert_match_none(r, &["1.3.3", "1.1.4", "1.2.3-beta.1", "1.2.4-beta.2"]);
+}
+
+#[test]
+pub fn test_caret() {
+    let ref r = req("^1");
+    assert_match_all(r, &["1.1.2", "1.1.0", "1.2.1", "1.0.1"]);
+    assert_match_none(r, &["0.9.1", "2.9.0", "0.1.4"]);
+    assert_match_none(r, &["1.0.0-beta1", "0.1.0-alpha", "1.0.1-pre"]);
+
+    let ref r = req("^1.1");
+    assert_match_all(r, &["1.1.2", "1.1.0", "1.2.1"]);
+    assert_match_none(r, &["0.9.1", "2.9.0", "1.0.1", "0.1.4"]);
+
+    let ref r = req("^1.1.2");
+    assert_match_all(r, &["1.1.2", "1.1.4", "1.2.1"]);
+    assert_match_none(r, &["0.9.1", "2.9.0", "1.1.1", "0.0.1"]);
+    assert_match_none(r, &["1.1.2-alpha1", "1.1.3-alpha1", "2.9.0-alpha1"]);
+
+    let ref r = req("^0.1.2");
+    assert_match_all(r, &["0.1.2", "0.1.4"]);
+    assert_match_none(r, &["0.9.1", "2.9.0", "1.1.1", "0.0.1"]);
+    assert_match_none(r, &["0.1.2-beta", "0.1.3-alpha", "0.2.0-pre"]);
+
+    let ref r = req("^0.5.1-alpha3");
+    assert_match_all(
+        r,
+        &[
+            "0.5.1-alpha3",
+            "0.5.1-alpha4",
+            "0.5.1-beta",
+            "0.5.1",
+            "0.5.5",
+        ],
+    );
+    assert_match_none(
+        r,
+        &[
+            "0.5.1-alpha1",
+            "0.5.2-alpha3",
+            "0.5.5-pre",
+            "0.5.0-pre",
+            "0.6.0",
+        ],
+    );
+
+    let ref r = req("^0.0.2");
+    assert_match_all(r, &["0.0.2"]);
+    assert_match_none(r, &["0.9.1", "2.9.0", "1.1.1", "0.0.1", "0.1.4"]);
+
+    let ref r = req("^0.0");
+    assert_match_all(r, &["0.0.2", "0.0.0"]);
+    assert_match_none(r, &["0.9.1", "2.9.0", "1.1.1", "0.1.4"]);
+
+    let ref r = req("^0");
+    assert_match_all(r, &["0.9.1", "0.0.2", "0.0.0"]);
+    assert_match_none(r, &["2.9.0", "1.1.1"]);
+
+    let ref r = req("^1.4.2-beta.5");
+    assert_match_all(
+        r,
+        &["1.4.2", "1.4.3", "1.4.2-beta.5", "1.4.2-beta.6", "1.4.2-c"],
+    );
+    assert_match_none(
+        r,
+        &[
+            "0.9.9",
+            "2.0.0",
+            "1.4.2-alpha",
+            "1.4.2-beta.4",
+            "1.4.3-beta.5",
+        ],
+    );
+}
+
+#[test]
+pub fn test_wildcard() {
+    let err = req_err("");
+    assert_to_string(
+        err,
+        "unexpected end of input while parsing major version number",
+    );
+
+    let ref r = req("*");
+    assert_match_all(r, &["0.9.1", "2.9.0", "0.0.9", "1.0.1", "1.1.1"]);
+    assert_match_none(r, &["1.0.0-pre"]);
+
+    for s in &["x", "X"] {
+        assert_eq!(*r, req(s));
+    }
+
+    let ref r = req("1.*");
+    assert_match_all(r, &["1.2.0", "1.2.1", "1.1.1", "1.3.0"]);
+    assert_match_none(r, &["0.0.9", "1.2.0-pre"]);
+
+    for s in &["1.x", "1.X", "1.*.*"] {
+        assert_eq!(*r, req(s));
+    }
+
+    let ref r = req("1.2.*");
+    assert_match_all(r, &["1.2.0", "1.2.2", "1.2.4"]);
+    assert_match_none(r, &["1.9.0", "1.0.9", "2.0.1", "0.1.3", "1.2.2-pre"]);
+
+    for s in &["1.2.x", "1.2.X"] {
+        assert_eq!(*r, req(s));
+    }
+}
+
+#[test]
+pub fn test_logical_or() {
+    // https://github.com/steveklabnik/semver/issues/57
+    let err = req_err("=1.2.3 || =2.3.4");
+    assert_to_string(err, "expected comma after patch version number, found '|'");
+
+    let err = req_err("1.1 || =1.2.3");
+    assert_to_string(err, "expected comma after minor version number, found '|'");
+
+    let err = req_err("6.* || 8.* || >= 10.*");
+    assert_to_string(err, "expected comma after minor version number, found '|'");
+}
+
+#[test]
+pub fn test_any() {
+    #[cfg(not(no_const_vec_new))]
+    let ref r = VersionReq::STAR;
+    #[cfg(no_const_vec_new)]
+    let ref r = VersionReq {
+        comparators: Vec::new(),
+    };
+    assert_match_all(r, &["0.0.1", "0.1.0", "1.0.0"]);
+}
+
+#[test]
+pub fn test_pre() {
+    let ref r = req("=2.1.1-really.0");
+    assert_match_all(r, &["2.1.1-really.0"]);
+}
+
+#[test]
+pub fn test_parse() {
+    let err = req_err("\0");
+    assert_to_string(
+        err,
+        "unexpected character '\\0' while parsing major version number",
+    );
+
+    let err = req_err(">= >= 0.0.2");
+    assert_to_string(
+        err,
+        "unexpected character '>' while parsing major version number",
+    );
+
+    let err = req_err(">== 0.0.2");
+    assert_to_string(
+        err,
+        "unexpected character '=' while parsing major version number",
+    );
+
+    let err = req_err("a.0.0");
+    assert_to_string(
+        err,
+        "unexpected character 'a' while parsing major version number",
+    );
+
+    let err = req_err("1.0.0-");
+    assert_to_string(err, "empty identifier segment in pre-release identifier");
+
+    let err = req_err(">=");
+    assert_to_string(
+        err,
+        "unexpected end of input while parsing major version number",
+    );
+}
+
+#[test]
+fn test_comparator_parse() {
+    let parsed = comparator("1.2.3-alpha");
+    assert_to_string(parsed, "^1.2.3-alpha");
+
+    let parsed = comparator("2.X");
+    assert_to_string(parsed, "2.*");
+
+    let parsed = comparator("2");
+    assert_to_string(parsed, "^2");
+
+    let parsed = comparator("2.x.x");
+    assert_to_string(parsed, "2.*");
+
+    let err = comparator_err("1.2.3-01");
+    assert_to_string(err, "invalid leading zero in pre-release identifier");
+
+    let err = comparator_err("1.2.3+4.");
+    assert_to_string(err, "empty identifier segment in build metadata");
+
+    let err = comparator_err(">");
+    assert_to_string(
+        err,
+        "unexpected end of input while parsing major version number",
+    );
+
+    let err = comparator_err("1.");
+    assert_to_string(
+        err,
+        "unexpected end of input while parsing minor version number",
+    );
+
+    let err = comparator_err("1.*.");
+    assert_to_string(err, "unexpected character after wildcard in version req");
+
+    let err = comparator_err("1.2.3+4ÿ");
+    assert_to_string(err, "unexpected character 'ÿ' after build metadata");
+}
+
+#[test]
+fn test_cargo3202() {
+    let ref r = req("0.*.*");
+    assert_to_string(r, "0.*");
+    assert_match_all(r, &["0.5.0"]);
+
+    let ref r = req("0.0.*");
+    assert_to_string(r, "0.0.*");
+}
+
+#[test]
+fn test_digit_after_wildcard() {
+    let err = req_err("*.1");
+    assert_to_string(err, "unexpected character after wildcard in version req");
+
+    let err = req_err("1.*.1");
+    assert_to_string(err, "unexpected character after wildcard in version req");
+
+    let err = req_err(">=1.*.1");
+    assert_to_string(err, "unexpected character after wildcard in version req");
+}
+
+#[test]
+fn test_eq_hash() {
+    fn calculate_hash(value: impl Hash) -> u64 {
+        let mut hasher = DefaultHasher::new();
+        value.hash(&mut hasher);
+        hasher.finish()
+    }
+
+    assert!(req("^1") == req("^1"));
+    assert!(calculate_hash(req("^1")) == calculate_hash(req("^1")));
+    assert!(req("^1") != req("^2"));
+}
+
+#[test]
+fn test_leading_digit_in_pre_and_build() {
+    for op in &["=", ">", ">=", "<", "<=", "~", "^"] {
+        // digit then alpha
+        req(&format!("{} 1.2.3-1a", op));
+        req(&format!("{} 1.2.3+1a", op));
+
+        // digit then alpha (leading zero)
+        req(&format!("{} 1.2.3-01a", op));
+        req(&format!("{} 1.2.3+01", op));
+
+        // multiple
+        req(&format!("{} 1.2.3-1+1", op));
+        req(&format!("{} 1.2.3-1-1+1-1-1", op));
+        req(&format!("{} 1.2.3-1a+1a", op));
+        req(&format!("{} 1.2.3-1a-1a+1a-1a-1a", op));
+    }
+}
+
+#[test]
+fn test_wildcard_and_another() {
+    let err = req_err("*, 0.20.0-any");
+    assert_to_string(
+        err,
+        "wildcard req (*) must be the only comparator in the version req",
+    );
+
+    let err = req_err("0.20.0-any, *");
+    assert_to_string(
+        err,
+        "wildcard req (*) must be the only comparator in the version req",
+    );
+
+    let err = req_err("0.20.0-any, *, 1.0");
+    assert_to_string(
+        err,
+        "wildcard req (*) must be the only comparator in the version req",
+    );
+}
diff --git a/crates/semver/tests/util/mod.rs b/crates/semver/tests/util/mod.rs
new file mode 100644
index 0000000..07d691f
--- /dev/null
+++ b/crates/semver/tests/util/mod.rs
@@ -0,0 +1,54 @@
+#![allow(dead_code)]
+
+use semver::{BuildMetadata, Comparator, Error, Prerelease, Version, VersionReq};
+use std::fmt::Display;
+
+#[cfg_attr(not(no_track_caller), track_caller)]
+pub(super) fn version(text: &str) -> Version {
+    Version::parse(text).unwrap()
+}
+
+#[cfg_attr(not(no_track_caller), track_caller)]
+pub(super) fn version_err(text: &str) -> Error {
+    Version::parse(text).unwrap_err()
+}
+
+#[cfg_attr(not(no_track_caller), track_caller)]
+pub(super) fn req(text: &str) -> VersionReq {
+    VersionReq::parse(text).unwrap()
+}
+
+#[cfg_attr(not(no_track_caller), track_caller)]
+pub(super) fn req_err(text: &str) -> Error {
+    VersionReq::parse(text).unwrap_err()
+}
+
+#[cfg_attr(not(no_track_caller), track_caller)]
+pub(super) fn comparator(text: &str) -> Comparator {
+    Comparator::parse(text).unwrap()
+}
+
+#[cfg_attr(not(no_track_caller), track_caller)]
+pub(super) fn comparator_err(text: &str) -> Error {
+    Comparator::parse(text).unwrap_err()
+}
+
+#[cfg_attr(not(no_track_caller), track_caller)]
+pub(super) fn prerelease(text: &str) -> Prerelease {
+    Prerelease::new(text).unwrap()
+}
+
+#[cfg_attr(not(no_track_caller), track_caller)]
+pub(super) fn prerelease_err(text: &str) -> Error {
+    Prerelease::new(text).unwrap_err()
+}
+
+#[cfg_attr(not(no_track_caller), track_caller)]
+pub(super) fn build_metadata(text: &str) -> BuildMetadata {
+    BuildMetadata::new(text).unwrap()
+}
+
+#[cfg_attr(not(no_track_caller), track_caller)]
+pub(super) fn assert_to_string(value: impl Display, expected: &str) {
+    assert_eq!(value.to_string(), expected);
+}
diff --git a/crates/serde_cbor/.cargo-checksum.json b/crates/serde_cbor/.cargo-checksum.json
new file mode 100644
index 0000000..03e08b7
--- /dev/null
+++ b/crates/serde_cbor/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"CONTRIBUTING.md":"bcbbc1fd8aefd2af15d432b551ddc32b0b832c1ad669eeedfaffb2092448c080","Cargo.lock":"b900f78562d5ae2ffffc0e8f739328df268f0fb80696018eb5df8e5e633b733e","Cargo.toml":"522e55ca99d851f9a3e7361f090451fc87c6097320c77bd574a80df27c183078","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"e03e58ea9205f51989b7a50f450051b24e6516cc1f0b920222dcda992072be99","README.md":"abb494d9608a40bac62da0bbd4680c0f03d960922d92261fb6492a1ee448d5c6","examples/readme.rs":"2e356830c62e84605d6b7efa07a0266ad96ed960fde09d4ae241c642ff662ab2","examples/tags.rs":"a1e1e4de8a5b09c54f96a56b8d7dc749fff9c73facb1dcad05540bc83f34dbd2","examples/tux.cbor":"3251bceb0a182543de2129cd920b2af597c481513a0624887a7daeb468c530f8","src/de.rs":"433d2e10bf1be80f881dfd5355a0df1a1c6b1e09a2d36c4d6f39894b13b42e0b","src/error.rs":"9247283d47617626c90a0d32bba40b54a18c6bb0ecce37057d7423ff9158d223","src/lib.rs":"efe97da47a332789d29eb4480a65ae00b4ccd9e6ba22d2b2c945cbeb97edf2b5","src/read.rs":"001754714cf10e9691e3284bf8170348346858be8e5d72ee845853b928588c7c","src/ser.rs":"0b7cb6162e104bfea55f6bffa591d19876236a798134fec8dbabb1c2d4a22363","src/tags.rs":"8d83efd96daca49ac51f24d41b986f4247fcd2a011855ffc99e9388834bd31a3","src/value/de.rs":"986784dcf015464b01f669949f823ba0e29b7414f6d56cad3311cee036d1a954","src/value/mod.rs":"0afa696a6ed01f97380f84e4e219090393bc8cc52cd9c11f4db3eb5b1fb85e7f","src/value/ser.rs":"f5d505125ae496c99d611c24cb0905199b3b66b949ae18bb48863317b686f301","src/write.rs":"6bcc413fe531799cd979d954615e73dd87e185082858a63c5aef73f0fbe08806","tests/bennofs.rs":"2211b234f442e909659f0610fcb20e7cf746bc6c648fe258e5300ebcd6a4251f","tests/canonical.rs":"37fcbe3a1956c72e4aef808acb2e194f09ae8aaa752c6a94cf6d06731c19948d","tests/crash.cbor":"8aeb60947fc5d43e80f3c884fc6cb37c9021cc691492d055a14e55c69b366d1f","tests/de.rs":"545cf5f5bd1ab0b3b4c05c4985baf40ad419dc1568f28549d17eacf5efc7beb7","tests/enum.rs":"699886e517546396979c9843b6d1c513b4370af81e2b0d3bcf0727435c5c0a8a","tests/kietaub.cbor":"98146fa75d5970eea896baa19b5aeda31695d6ceeed4b23aa67fc13142123a9a","tests/ser.rs":"7eadf27fb58b8bd498f0be8e1670d76de36e3f21b565daeb8ec9cf30140c068e","tests/std_types.rs":"549e357ad5062eb8ae298df3bbbd0e8ee87ee0434f8c67dc53642ff2b2171e40","tests/tags.rs":"e847b9ab49f47a81a09e34d4af3bd25bb546e408915f033dc06b03b275f68065","tests/value.rs":"540469dccb6c6f3542b75934707203dff93132a7e160de798a21f16caf16075e"},"package":"2bef2ebfde456fb76bbcf9f59315333decc4fda0b2b44b420243c11e0f5ec1f5"}
\ No newline at end of file
diff --git a/crates/serde_cbor/Android.bp b/crates/serde_cbor/Android.bp
new file mode 100644
index 0000000..3050356
--- /dev/null
+++ b/crates/serde_cbor/Android.bp
@@ -0,0 +1,247 @@
+// This file is generated by cargo_embargo.
+// Do not modify this file because the changes will be overridden on upgrade.
+
+package {
+    default_applicable_licenses: ["external_rust_crates_serde_cbor_license"],
+    default_team: "trendy_team_android_rust",
+}
+
+license {
+    name: "external_rust_crates_serde_cbor_license",
+    visibility: [":__subpackages__"],
+    license_kinds: ["SPDX-license-identifier-Apache-2.0"],
+    license_text: ["LICENSE"],
+}
+
+rust_library {
+    name: "libserde_cbor",
+    host_supported: true,
+    crate_name: "serde_cbor",
+    cargo_env_compat: true,
+    cargo_pkg_version: "0.11.2",
+    crate_root: "src/lib.rs",
+    edition: "2018",
+    features: [
+        "default",
+        "std",
+        "tags",
+    ],
+    rustlibs: [
+        "libhalf",
+        "libserde",
+    ],
+    apex_available: [
+        "//apex_available:platform",
+        "//apex_available:anyapex",
+    ],
+    product_available: true,
+    vendor_available: true,
+}
+
+rust_test {
+    name: "serde_cbor_test_tests_bennofs",
+    host_supported: true,
+    crate_name: "bennofs",
+    cargo_env_compat: true,
+    cargo_pkg_version: "0.11.2",
+    crate_root: "tests/bennofs.rs",
+    test_suites: ["general-tests"],
+    auto_gen_config: true,
+    test_options: {
+        unit_test: true,
+    },
+    edition: "2018",
+    features: [
+        "default",
+        "std",
+        "tags",
+    ],
+    rustlibs: [
+        "libhalf",
+        "libserde",
+        "libserde_cbor",
+    ],
+    proc_macros: ["libserde_derive"],
+}
+
+rust_test {
+    name: "serde_cbor_test_tests_canonical",
+    host_supported: true,
+    crate_name: "canonical",
+    cargo_env_compat: true,
+    cargo_pkg_version: "0.11.2",
+    crate_root: "tests/canonical.rs",
+    test_suites: ["general-tests"],
+    auto_gen_config: true,
+    test_options: {
+        unit_test: true,
+    },
+    edition: "2018",
+    features: [
+        "default",
+        "std",
+        "tags",
+    ],
+    rustlibs: [
+        "libhalf",
+        "libserde",
+        "libserde_cbor",
+    ],
+    proc_macros: ["libserde_derive"],
+}
+
+rust_test {
+    name: "serde_cbor_test_tests_de",
+    host_supported: true,
+    crate_name: "de",
+    cargo_env_compat: true,
+    cargo_pkg_version: "0.11.2",
+    crate_root: "tests/de.rs",
+    test_suites: ["general-tests"],
+    auto_gen_config: true,
+    test_options: {
+        unit_test: true,
+    },
+    edition: "2018",
+    features: [
+        "default",
+        "std",
+        "tags",
+    ],
+    rustlibs: [
+        "libhalf",
+        "libserde",
+        "libserde_cbor",
+    ],
+    proc_macros: ["libserde_derive"],
+}
+
+rust_test {
+    name: "serde_cbor_test_tests_enum",
+    host_supported: true,
+    crate_name: "enum",
+    cargo_env_compat: true,
+    cargo_pkg_version: "0.11.2",
+    crate_root: "tests/enum.rs",
+    test_suites: ["general-tests"],
+    auto_gen_config: true,
+    test_options: {
+        unit_test: true,
+    },
+    edition: "2018",
+    features: [
+        "default",
+        "std",
+        "tags",
+    ],
+    rustlibs: [
+        "libhalf",
+        "libserde",
+        "libserde_cbor",
+    ],
+    proc_macros: ["libserde_derive"],
+}
+
+rust_test {
+    name: "serde_cbor_test_tests_ser",
+    host_supported: true,
+    crate_name: "ser",
+    cargo_env_compat: true,
+    cargo_pkg_version: "0.11.2",
+    crate_root: "tests/ser.rs",
+    test_suites: ["general-tests"],
+    auto_gen_config: true,
+    test_options: {
+        unit_test: true,
+    },
+    edition: "2018",
+    features: [
+        "default",
+        "std",
+        "tags",
+    ],
+    rustlibs: [
+        "libhalf",
+        "libserde",
+        "libserde_cbor",
+    ],
+    proc_macros: ["libserde_derive"],
+}
+
+rust_test {
+    name: "serde_cbor_test_tests_std_types",
+    host_supported: true,
+    crate_name: "std_types",
+    cargo_env_compat: true,
+    cargo_pkg_version: "0.11.2",
+    crate_root: "tests/std_types.rs",
+    test_suites: ["general-tests"],
+    auto_gen_config: true,
+    test_options: {
+        unit_test: true,
+    },
+    edition: "2018",
+    features: [
+        "default",
+        "std",
+        "tags",
+    ],
+    rustlibs: [
+        "libhalf",
+        "libserde",
+        "libserde_cbor",
+    ],
+    proc_macros: ["libserde_derive"],
+}
+
+rust_test {
+    name: "serde_cbor_test_tests_tags",
+    host_supported: true,
+    crate_name: "tags",
+    cargo_env_compat: true,
+    cargo_pkg_version: "0.11.2",
+    crate_root: "tests/tags.rs",
+    test_suites: ["general-tests"],
+    auto_gen_config: true,
+    test_options: {
+        unit_test: true,
+    },
+    edition: "2018",
+    features: [
+        "default",
+        "std",
+        "tags",
+    ],
+    rustlibs: [
+        "libhalf",
+        "libserde",
+        "libserde_cbor",
+    ],
+    proc_macros: ["libserde_derive"],
+}
+
+rust_test {
+    name: "serde_cbor_test_tests_value",
+    host_supported: true,
+    crate_name: "value",
+    cargo_env_compat: true,
+    cargo_pkg_version: "0.11.2",
+    crate_root: "tests/value.rs",
+    test_suites: ["general-tests"],
+    auto_gen_config: true,
+    test_options: {
+        unit_test: true,
+    },
+    edition: "2018",
+    features: [
+        "default",
+        "std",
+        "tags",
+    ],
+    rustlibs: [
+        "libhalf",
+        "libserde",
+        "libserde_cbor",
+    ],
+    proc_macros: ["libserde_derive"],
+}
diff --git a/crates/serde_cbor/CONTRIBUTING.md b/crates/serde_cbor/CONTRIBUTING.md
new file mode 100644
index 0000000..1489ade
--- /dev/null
+++ b/crates/serde_cbor/CONTRIBUTING.md
@@ -0,0 +1,29 @@
+# Contributing to Serde CBOR
+Thanks for your interest!
+There are many ways to help:
+
+* write an issue about a problem you encountered
+* submit a pull request
+* add documentation and examples
+
+## Pull Requests
+
+Code should be easy to understand and documented.
+For new features and fixed bugs please add a test to one of the files in `test/`.
+The tests are run on Travis CI to catch regressions early.
+Format your code with `cargo fmt` before committing.
+Currently Serde CBOR does not contain `unsafe` code and I would like to keep it this way.
+
+## Making a Release
+
+* [ ] Make sure the crate compiles and all tests pass.
+* [ ] (Optional) Test that the fuzzer works and fuzz the crate for some time.
+* [ ] Write a list with all changes made since the last release
+* [ ] Increment the version number in `Cargo.toml` and the `README.md`. Bugfixes increase the patch version while new features or an increased minimum Rust version require a new minor version.
+* [ ] Check that the file `examples/readme.rs` and the example from the `README.md` match.
+* [ ] Commit the changes.
+* [ ] Add a git tag with the new version number:
+    `git tag "v42.0.2"`
+* [ ] Push the changes: `git push --tags`
+* [ ] Run `cargo publish`
+* [ ] Add a new release to GitHub with a list of changes.
\ No newline at end of file
diff --git a/crates/serde_cbor/Cargo.lock b/crates/serde_cbor/Cargo.lock
new file mode 100644
index 0000000..b91ed7c
--- /dev/null
+++ b/crates/serde_cbor/Cargo.lock
@@ -0,0 +1,68 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+[[package]]
+name = "half"
+version = "1.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9ff54597ea139063f4225f1ec47011b03c9de4a486957ff3fc506881dac951d0"
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0319972dcae462681daf4da1adeeaa066e3ebd29c69be96c6abb1259d2ee2bcc"
+dependencies = [
+ "unicode-xid",
+]
+
+[[package]]
+name = "quote"
+version = "1.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "053a8c8bcc71fcce321828dc897a98ab9760bef03a4fc36693c231e5b3216cfe"
+dependencies = [
+ "proc-macro2",
+]
+
+[[package]]
+name = "serde"
+version = "1.0.104"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "414115f25f818d7dfccec8ee535d76949ae78584fc4f79a6f45a904bf8ab4449"
+
+[[package]]
+name = "serde_cbor"
+version = "0.11.2"
+dependencies = [
+ "half",
+ "serde",
+ "serde_derive",
+]
+
+[[package]]
+name = "serde_derive"
+version = "1.0.104"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "128f9e303a5a29922045a830221b8f78ec74a5f544944f3d5984f8ec3895ef64"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "syn"
+version = "1.0.13"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1e4ff033220a41d1a57d8125eab57bf5263783dfdcc18688b1dacc6ce9651ef8"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-xid",
+]
+
+[[package]]
+name = "unicode-xid"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c"
diff --git a/crates/serde_cbor/Cargo.toml b/crates/serde_cbor/Cargo.toml
new file mode 100644
index 0000000..2960dbe
--- /dev/null
+++ b/crates/serde_cbor/Cargo.toml
@@ -0,0 +1,44 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies
+#
+# If you believe there's an error in this file please file an
+# issue against the rust-lang/cargo repository. If you're
+# editing this file be aware that the upstream Cargo.toml
+# will likely look very different (and much more reasonable)
+
+[package]
+edition = "2018"
+name = "serde_cbor"
+version = "0.11.2"
+authors = ["Pyfisch <pyfisch@posteo.org>", "Steven Fackler <sfackler@gmail.com>"]
+description = "CBOR support for serde."
+readme = "README.md"
+keywords = ["serde", "cbor", "serialization", "no_std"]
+categories = ["encoding"]
+license = "MIT/Apache-2.0"
+repository = "https://github.com/pyfisch/cbor"
+[dependencies.half]
+version = "1.2.0"
+
+[dependencies.serde]
+version = "1.0.14"
+default-features = false
+[dev-dependencies.serde_derive]
+version = "1.0.14"
+default-features = false
+
+[features]
+alloc = ["serde/alloc"]
+default = ["std"]
+std = ["serde/std"]
+tags = []
+unsealed_read_write = []
+[badges.maintenance]
+status = "as-is"
+
+[badges.travis-ci]
+repository = "pyfisch/cbor"
diff --git a/crates/serde_cbor/LICENSE b/crates/serde_cbor/LICENSE
new file mode 120000
index 0000000..6b579aa
--- /dev/null
+++ b/crates/serde_cbor/LICENSE
@@ -0,0 +1 @@
+LICENSE-APACHE
\ No newline at end of file
diff --git a/crates/serde_cbor/LICENSE-APACHE b/crates/serde_cbor/LICENSE-APACHE
new file mode 100644
index 0000000..16fe87b
--- /dev/null
+++ b/crates/serde_cbor/LICENSE-APACHE
@@ -0,0 +1,201 @@
+                              Apache License
+                        Version 2.0, January 2004
+                     http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+   "License" shall mean the terms and conditions for use, reproduction,
+   and distribution as defined by Sections 1 through 9 of this document.
+
+   "Licensor" shall mean the copyright owner or entity authorized by
+   the copyright owner that is granting the License.
+
+   "Legal Entity" shall mean the union of the acting entity and all
+   other entities that control, are controlled by, or are under common
+   control with that entity. For the purposes of this definition,
+   "control" means (i) the power, direct or indirect, to cause the
+   direction or management of such entity, whether by contract or
+   otherwise, or (ii) ownership of fifty percent (50%) or more of the
+   outstanding shares, or (iii) beneficial ownership of such entity.
+
+   "You" (or "Your") shall mean an individual or Legal Entity
+   exercising permissions granted by this License.
+
+   "Source" form shall mean the preferred form for making modifications,
+   including but not limited to software source code, documentation
+   source, and configuration files.
+
+   "Object" form shall mean any form resulting from mechanical
+   transformation or translation of a Source form, including but
+   not limited to compiled object code, generated documentation,
+   and conversions to other media types.
+
+   "Work" shall mean the work of authorship, whether in Source or
+   Object form, made available under the License, as indicated by a
+   copyright notice that is included in or attached to the work
+   (an example is provided in the Appendix below).
+
+   "Derivative Works" shall mean any work, whether in Source or Object
+   form, that is based on (or derived from) the Work and for which the
+   editorial revisions, annotations, elaborations, or other modifications
+   represent, as a whole, an original work of authorship. For the purposes
+   of this License, Derivative Works shall not include works that remain
+   separable from, or merely link (or bind by name) to the interfaces of,
+   the Work and Derivative Works thereof.
+
+   "Contribution" shall mean any work of authorship, including
+   the original version of the Work and any modifications or additions
+   to that Work or Derivative Works thereof, that is intentionally
+   submitted to Licensor for inclusion in the Work by the copyright owner
+   or by an individual or Legal Entity authorized to submit on behalf of
+   the copyright owner. For the purposes of this definition, "submitted"
+   means any form of electronic, verbal, or written communication sent
+   to the Licensor or its representatives, including but not limited to
+   communication on electronic mailing lists, source code control systems,
+   and issue tracking systems that are managed by, or on behalf of, the
+   Licensor for the purpose of discussing and improving the Work, but
+   excluding communication that is conspicuously marked or otherwise
+   designated in writing by the copyright owner as "Not a Contribution."
+
+   "Contributor" shall mean Licensor and any individual or Legal Entity
+   on behalf of whom a Contribution has been received by Licensor and
+   subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+   this License, each Contributor hereby grants to You a perpetual,
+   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+   copyright license to reproduce, prepare Derivative Works of,
+   publicly display, publicly perform, sublicense, and distribute the
+   Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+   this License, each Contributor hereby grants to You a perpetual,
+   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+   (except as stated in this section) patent license to make, have made,
+   use, offer to sell, sell, import, and otherwise transfer the Work,
+   where such license applies only to those patent claims licensable
+   by such Contributor that are necessarily infringed by their
+   Contribution(s) alone or by combination of their Contribution(s)
+   with the Work to which such Contribution(s) was submitted. If You
+   institute patent litigation against any entity (including a
+   cross-claim or counterclaim in a lawsuit) alleging that the Work
+   or a Contribution incorporated within the Work constitutes direct
+   or contributory patent infringement, then any patent licenses
+   granted to You under this License for that Work shall terminate
+   as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+   Work or Derivative Works thereof in any medium, with or without
+   modifications, and in Source or Object form, provided that You
+   meet the following conditions:
+
+   (a) You must give any other recipients of the Work or
+       Derivative Works a copy of this License; and
+
+   (b) You must cause any modified files to carry prominent notices
+       stating that You changed the files; and
+
+   (c) You must retain, in the Source form of any Derivative Works
+       that You distribute, all copyright, patent, trademark, and
+       attribution notices from the Source form of the Work,
+       excluding those notices that do not pertain to any part of
+       the Derivative Works; and
+
+   (d) If the Work includes a "NOTICE" text file as part of its
+       distribution, then any Derivative Works that You distribute must
+       include a readable copy of the attribution notices contained
+       within such NOTICE file, excluding those notices that do not
+       pertain to any part of the Derivative Works, in at least one
+       of the following places: within a NOTICE text file distributed
+       as part of the Derivative Works; within the Source form or
+       documentation, if provided along with the Derivative Works; or,
+       within a display generated by the Derivative Works, if and
+       wherever such third-party notices normally appear. The contents
+       of the NOTICE file are for informational purposes only and
+       do not modify the License. You may add Your own attribution
+       notices within Derivative Works that You distribute, alongside
+       or as an addendum to the NOTICE text from the Work, provided
+       that such additional attribution notices cannot be construed
+       as modifying the License.
+
+   You may add Your own copyright statement to Your modifications and
+   may provide additional or different license terms and conditions
+   for use, reproduction, or distribution of Your modifications, or
+   for any such Derivative Works as a whole, provided Your use,
+   reproduction, and distribution of the Work otherwise complies with
+   the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+   any Contribution intentionally submitted for inclusion in the Work
+   by You to the Licensor shall be under the terms and conditions of
+   this License, without any additional terms or conditions.
+   Notwithstanding the above, nothing herein shall supersede or modify
+   the terms of any separate license agreement you may have executed
+   with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+   names, trademarks, service marks, or product names of the Licensor,
+   except as required for reasonable and customary use in describing the
+   origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+   agreed to in writing, Licensor provides the Work (and each
+   Contributor provides its Contributions) on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+   implied, including, without limitation, any warranties or conditions
+   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+   PARTICULAR PURPOSE. You are solely responsible for determining the
+   appropriateness of using or redistributing the Work and assume any
+   risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+   whether in tort (including negligence), contract, or otherwise,
+   unless required by applicable law (such as deliberate and grossly
+   negligent acts) or agreed to in writing, shall any Contributor be
+   liable to You for damages, including any direct, indirect, special,
+   incidental, or consequential damages of any character arising as a
+   result of this License or out of the use or inability to use the
+   Work (including but not limited to damages for loss of goodwill,
+   work stoppage, computer failure or malfunction, or any and all
+   other commercial damages or losses), even if such Contributor
+   has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+   the Work or Derivative Works thereof, You may choose to offer,
+   and charge a fee for, acceptance of support, warranty, indemnity,
+   or other liability obligations and/or rights consistent with this
+   License. However, in accepting such obligations, You may act only
+   on Your own behalf and on Your sole responsibility, not on behalf
+   of any other Contributor, and only if You agree to indemnify,
+   defend, and hold each Contributor harmless for any liability
+   incurred by, or claims asserted against, such Contributor by reason
+   of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+   To apply the Apache License to your work, attach the following
+   boilerplate notice, with the fields enclosed by brackets "[]"
+   replaced with your own identifying information. (Don't include
+   the brackets!)  The text should be enclosed in the appropriate
+   comment syntax for the file format. We also recommend that a
+   file or class name and description of purpose be included on the
+   same "printed page" as the copyright notice for easier
+   identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+	http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/crates/serde_cbor/LICENSE-MIT b/crates/serde_cbor/LICENSE-MIT
new file mode 100644
index 0000000..b1b75fa
--- /dev/null
+++ b/crates/serde_cbor/LICENSE-MIT
@@ -0,0 +1,19 @@
+Copyright (c) 2015 Pyfisch
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/crates/serde_cbor/METADATA b/crates/serde_cbor/METADATA
new file mode 100644
index 0000000..6997441
--- /dev/null
+++ b/crates/serde_cbor/METADATA
@@ -0,0 +1,19 @@
+name: "serde_cbor"
+description: "CBOR support for serde."
+third_party {
+  url {
+    type: HOMEPAGE
+    value: "https://crates.io/crates/serde_cbor"
+  }
+  url {
+    type: ARCHIVE
+    value: "https://static.crates.io/crates/serde_cbor/serde_cbor-0.11.2.crate"
+  }
+  version: "0.11.2"
+  license_type: NOTICE
+  last_upgrade_date {
+    year: 2021
+    month: 9
+    day: 22
+  }
+}
diff --git a/crates/serde_cbor/MODULE_LICENSE_APACHE2 b/crates/serde_cbor/MODULE_LICENSE_APACHE2
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/crates/serde_cbor/MODULE_LICENSE_APACHE2
diff --git a/crates/serde_cbor/README.md b/crates/serde_cbor/README.md
new file mode 100644
index 0000000..b49e8ad
--- /dev/null
+++ b/crates/serde_cbor/README.md
@@ -0,0 +1,97 @@
+# Serde CBOR
+[![Build Status](https://travis-ci.org/pyfisch/cbor.svg?branch=master)](https://travis-ci.org/pyfisch/cbor)
+[![Crates.io](https://img.shields.io/crates/v/serde_cbor.svg)](https://crates.io/crates/serde_cbor)
+[![Documentation](https://docs.rs/serde_cbor/badge.svg)](https://docs.rs/serde_cbor)
+
+## PROJECT IS ARCHIVED
+
+After almost 6 years it is time to retire this crate.
+This implementation of CBOR for serde is used in hundreds of projects with widely differing needs.
+Besides the standard features it contains code for no-std environments, a packed encoding and CBOR tags.
+However while these features are useful to many people they sometimes interact poorly with each others and with optional features of serde itself.
+Because I don't use the crate myself and because of the potential for new errors I have been reluctant to accept any changes or additional features for the crate.
+Since this situation is unlikely to change anytime soon and no one else stepped up to maintain this crate I am archiving the repository today.
+If the crate works for you there is no need to switch to another implementation.
+However if you encounter problems or for new projects I recommend you take a look at these crates:
+
+* [ciborium](https://crates.io/crates/ciborium)
+* [minicbor](https://crates.io/crates/minicbor)
+
+~~ Pyfisch, August 2021
+
+
+
+This crate implements the Concise Binary Object Representation from [RFC 7049].
+It builds on [Serde], the generic serialization framework for Rust.
+CBOR provides a binary encoding for a superset
+of the JSON data model that is small and very fast to parse.
+
+[RFC 7049]: https://tools.ietf.org/html/rfc7049
+[Serde]: https://github.com/serde-rs/serde
+
+## Usage
+
+Serde CBOR supports Rust 1.40 and up. Add this to your `Cargo.toml`:
+```toml
+[dependencies]
+serde_cbor = "0.11.2"
+```
+
+Storing and loading Rust types is easy and requires only
+minimal modifications to the program code.
+
+```rust
+use serde_derive::{Deserialize, Serialize};
+use std::error::Error;
+use std::fs::File;
+
+// Types annotated with `Serialize` can be stored as CBOR.
+// To be able to load them again add `Deserialize`.
+#[derive(Debug, Serialize, Deserialize)]
+struct Mascot {
+    name: String,
+    species: String,
+    year_of_birth: u32,
+}
+
+fn main() -> Result<(), Box<dyn Error>> {
+    let ferris = Mascot {
+        name: "Ferris".to_owned(),
+        species: "crab".to_owned(),
+        year_of_birth: 2015,
+    };
+
+    let ferris_file = File::create("examples/ferris.cbor")?;
+    // Write Ferris to the given file.
+    // Instead of a file you can use any type that implements `io::Write`
+    // like a HTTP body, database connection etc.
+    serde_cbor::to_writer(ferris_file, &ferris)?;
+
+    let tux_file = File::open("examples/tux.cbor")?;
+    // Load Tux from a file.
+    // Serde CBOR performs roundtrip serialization meaning that
+    // the data will not change in any way.
+    let tux: Mascot = serde_cbor::from_reader(tux_file)?;
+
+    println!("{:?}", tux);
+    // prints: Mascot { name: "Tux", species: "penguin", year_of_birth: 1996 }
+
+    Ok(())
+}
+```
+
+There are a lot of options available to customize the format.
+To operate on untyped CBOR values have a look at the `Value` type.
+
+## License
+Licensed under either of
+
+ * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
+ * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
+
+at your option.
+
+### Contribution
+Unless you explicitly state otherwise, any contribution intentionally submitted
+for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any
+additional terms or conditions.
diff --git a/crates/serde_cbor/TEST_MAPPING b/crates/serde_cbor/TEST_MAPPING
new file mode 100644
index 0000000..a757518
--- /dev/null
+++ b/crates/serde_cbor/TEST_MAPPING
@@ -0,0 +1,84 @@
+// Generated by update_crate_tests.py for tests that depend on this crate.
+{
+  "imports": [
+    {
+      "path": "external/rust/crates/base64"
+    },
+    {
+      "path": "external/rust/crates/tinytemplate"
+    },
+    {
+      "path": "external/rust/crates/tinyvec"
+    },
+    {
+      "path": "external/rust/crates/unicode-xid"
+    },
+    {
+      "path": "packages/modules/Virtualization/avmd"
+    },
+    {
+      "path": "packages/modules/Virtualization/microdroid_manager"
+    },
+    {
+      "path": "system/security/diced"
+    },
+    {
+      "path": "system/security/keystore2"
+    },
+    {
+      "path": "system/security/keystore2/legacykeystore"
+    }
+  ],
+  "presubmit": [
+    {
+      "name": "serde_cbor_test_tests_bennofs"
+    },
+    {
+      "name": "serde_cbor_test_tests_canonical"
+    },
+    {
+      "name": "serde_cbor_test_tests_de"
+    },
+    {
+      "name": "serde_cbor_test_tests_enum"
+    },
+    {
+      "name": "serde_cbor_test_tests_ser"
+    },
+    {
+      "name": "serde_cbor_test_tests_std_types"
+    },
+    {
+      "name": "serde_cbor_test_tests_tags"
+    },
+    {
+      "name": "serde_cbor_test_tests_value"
+    }
+  ],
+  "presubmit-rust": [
+    {
+      "name": "serde_cbor_test_tests_bennofs"
+    },
+    {
+      "name": "serde_cbor_test_tests_canonical"
+    },
+    {
+      "name": "serde_cbor_test_tests_de"
+    },
+    {
+      "name": "serde_cbor_test_tests_enum"
+    },
+    {
+      "name": "serde_cbor_test_tests_ser"
+    },
+    {
+      "name": "serde_cbor_test_tests_std_types"
+    },
+    {
+      "name": "serde_cbor_test_tests_tags"
+    },
+    {
+      "name": "serde_cbor_test_tests_value"
+    }
+  ]
+}
diff --git a/crates/serde_cbor/cargo_embargo.json b/crates/serde_cbor/cargo_embargo.json
new file mode 100644
index 0000000..abdd3d0
--- /dev/null
+++ b/crates/serde_cbor/cargo_embargo.json
@@ -0,0 +1,7 @@
+{
+  "features": [
+    "default",
+    "tags"
+  ],
+  "tests": true
+}
diff --git a/crates/serde_cbor/examples/readme.rs b/crates/serde_cbor/examples/readme.rs
new file mode 100644
index 0000000..7689394
--- /dev/null
+++ b/crates/serde_cbor/examples/readme.rs
@@ -0,0 +1,39 @@
+// NOTE: This file should be kept in sync with README.md
+
+use serde_derive::{Deserialize, Serialize};
+use std::error::Error;
+use std::fs::File;
+
+// Types annotated with `Serialize` can be stored as CBOR.
+// To be able to load them again add `Deserialize`.
+#[derive(Debug, Serialize, Deserialize)]
+struct Mascot {
+    name: String,
+    species: String,
+    year_of_birth: u32,
+}
+
+fn main() -> Result<(), Box<dyn Error>> {
+    let ferris = Mascot {
+        name: "Ferris".to_owned(),
+        species: "crab".to_owned(),
+        year_of_birth: 2015,
+    };
+
+    let ferris_file = File::create("examples/ferris.cbor")?;
+    // Write Ferris to the given file.
+    // Instead of a file you can use any type that implements `io::Write`
+    // like a HTTP body, database connection etc.
+    serde_cbor::to_writer(ferris_file, &ferris)?;
+
+    let tux_file = File::open("examples/tux.cbor")?;
+    // Load Tux from a file.
+    // Serde CBOR performs roundtrip serialization meaning that
+    // the data will not change in any way.
+    let tux: Mascot = serde_cbor::from_reader(tux_file)?;
+
+    println!("{:?}", tux);
+    // prints: Mascot { name: "Tux", species: "penguin", year_of_birth: 1996 }
+
+    Ok(())
+}
diff --git a/crates/serde_cbor/examples/tags.rs b/crates/serde_cbor/examples/tags.rs
new file mode 100644
index 0000000..9281b9b
--- /dev/null
+++ b/crates/serde_cbor/examples/tags.rs
@@ -0,0 +1,84 @@
+use serde::de::{Deserialize, Deserializer};
+use serde::ser::{Serialize, Serializer};
+use serde_cbor::tags::Tagged;
+use serde_cbor::Value;
+use serde_derive::{Deserialize, Serialize};
+use std::error::Error;
+
+/// https://tools.ietf.org/html/rfc7049#section-2.4.1
+#[derive(Debug, PartialEq)]
+struct Date(String);
+
+impl Serialize for Date {
+    fn serialize<S: Serializer>(&self, s: S) -> Result<S::Ok, S::Error> {
+        Tagged::new(Some(0), &self.0).serialize(s)
+    }
+}
+
+impl<'de> Deserialize<'de> for Date {
+    fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
+        let tagged = Tagged::<String>::deserialize(deserializer)?;
+        match tagged.tag {
+            Some(0) | None => Ok(Date(tagged.value)),
+            Some(_) => Err(serde::de::Error::custom("unexpected tag")),
+        }
+    }
+}
+
+/// https://tools.ietf.org/html/rfc7049#section-2.4.4.3
+#[derive(Debug, PartialEq)]
+struct Uri(String);
+
+impl Serialize for Uri {
+    fn serialize<S: Serializer>(&self, s: S) -> Result<S::Ok, S::Error> {
+        Tagged::new(Some(32), &self.0).serialize(s)
+    }
+}
+impl<'de> Deserialize<'de> for Uri {
+    fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
+        let tagged = Tagged::<String>::deserialize(deserializer)?;
+        match tagged.tag {
+            // allow deserialization even if there is no tag. Allows roundtrip via other formats such as json
+            Some(0) | None => Ok(Uri(tagged.value)),
+            Some(_) => Err(serde::de::Error::custom("unexpected tag")),
+        }
+    }
+}
+
+#[derive(Debug, Serialize, Deserialize, PartialEq)]
+struct Bookmark {
+    title: String,
+    link: Uri,
+    created: Date,
+}
+
+fn main() -> Result<(), Box<dyn Error>> {
+    let bookmark = Bookmark {
+        title: "The Example Domain".into(),
+        link: Uri("http://example.org/".into()),
+        created: Date("2003-12-13T18:30:02Z".into()),
+    };
+
+    // serialize the struct to bytes
+    let bytes1 = serde_cbor::to_vec(&bookmark)?;
+    // deserialize to a serde_cbor::Value
+    let value1: Value = serde_cbor::from_slice(&bytes1)?;
+    println!("{:?}", value1);
+    // serialize the value to bytes
+    let bytes2 = serde_cbor::to_vec(&value1)?;
+    // deserialize to a serde_cbor::Value
+    let value2: Value = serde_cbor::from_slice(&bytes2)?;
+    println!("{:?}", value2);
+    // deserialize to a Bookmark
+    let result: Bookmark = serde_cbor::from_slice(&bytes2)?;
+
+    // check that the roundtrip was successful
+    assert_eq!(value1, value2);
+    assert_eq!(bookmark, result);
+
+    // check that going via a format that does not support tags does work
+    // let json = serde_json::to_vec(&bookmark)?;
+    // let result: Bookmark = serde_json::from_slice(&json)?;
+    // assert_eq!(bookmark, result);
+    Ok(())
+}
diff --git a/crates/serde_cbor/examples/tux.cbor b/crates/serde_cbor/examples/tux.cbor
new file mode 100644
index 0000000..c3331aa
--- /dev/null
+++ b/crates/serde_cbor/examples/tux.cbor
@@ -0,0 +1 @@
+£dnamecTuxgspeciesgpenguinmyear_of_birthÌ
\ No newline at end of file
diff --git a/crates/serde_cbor/src/de.rs b/crates/serde_cbor/src/de.rs
new file mode 100644
index 0000000..170e059
--- /dev/null
+++ b/crates/serde_cbor/src/de.rs
@@ -0,0 +1,1360 @@
+//! Deserialization.
+
+use core::f32;
+use core::marker::PhantomData;
+use core::result;
+use core::str;
+use half::f16;
+use serde::de;
+#[cfg(feature = "std")]
+use std::io;
+
+use crate::error::{Error, ErrorCode, Result};
+#[cfg(not(feature = "unsealed_read_write"))]
+use crate::read::EitherLifetime;
+#[cfg(feature = "unsealed_read_write")]
+pub use crate::read::EitherLifetime;
+#[cfg(feature = "std")]
+pub use crate::read::IoRead;
+use crate::read::Offset;
+#[cfg(any(feature = "std", feature = "alloc"))]
+pub use crate::read::SliceRead;
+pub use crate::read::{MutSliceRead, Read, SliceReadFixed};
+#[cfg(feature = "tags")]
+use crate::tags::set_tag;
+/// Decodes a value from CBOR data in a slice.
+///
+/// # Examples
+///
+/// Deserialize a `String`
+///
+/// ```
+/// # use serde_cbor::de;
+/// let v: Vec<u8> = vec![0x66, 0x66, 0x6f, 0x6f, 0x62, 0x61, 0x72];
+/// let value: String = de::from_slice(&v[..]).unwrap();
+/// assert_eq!(value, "foobar");
+/// ```
+///
+/// Deserialize a borrowed string with zero copies.
+///
+/// ```
+/// # use serde_cbor::de;
+/// let v: Vec<u8> = vec![0x66, 0x66, 0x6f, 0x6f, 0x62, 0x61, 0x72];
+/// let value: &str = de::from_slice(&v[..]).unwrap();
+/// assert_eq!(value, "foobar");
+/// ```
+#[cfg(any(feature = "std", feature = "alloc"))]
+pub fn from_slice<'a, T>(slice: &'a [u8]) -> Result<T>
+where
+    T: de::Deserialize<'a>,
+{
+    let mut deserializer = Deserializer::from_slice(slice);
+    let value = de::Deserialize::deserialize(&mut deserializer)?;
+    deserializer.end()?;
+    Ok(value)
+}
+
+// When the "std" feature is enabled there should be little to no need to ever use this function,
+// as `from_slice` covers all use cases (at the expense of being less efficient).
+/// Decode a value from CBOR data in a mutable slice.
+///
+/// This can be used in analogy to `from_slice`. Unlike `from_slice`, this will use the slice's
+/// mutability to rearrange data in it in order to resolve indefinite byte or text strings without
+/// resorting to allocations.
+pub fn from_mut_slice<'a, T>(slice: &'a mut [u8]) -> Result<T>
+where
+    T: de::Deserialize<'a>,
+{
+    let mut deserializer = Deserializer::from_mut_slice(slice);
+    let value = de::Deserialize::deserialize(&mut deserializer)?;
+    deserializer.end()?;
+    Ok(value)
+}
+
+// When the "std" feature is enabled there should be little to no need to ever use this function,
+// as `from_slice` covers all use cases and is much more reliable (at the expense of being less
+// efficient).
+/// Decode a value from CBOR data using a scratch buffer.
+///
+/// Users should generally prefer to use `from_slice` or `from_mut_slice` over this function,
+/// as decoding may fail when the scratch buffer turns out to be too small.
+///
+/// A realistic use case for this method would be decoding in a `no_std` environment from an
+/// immutable slice that is too large to copy.
+pub fn from_slice_with_scratch<'a, 'b, T>(slice: &'a [u8], scratch: &'b mut [u8]) -> Result<T>
+where
+    T: de::Deserialize<'a>,
+{
+    let mut deserializer = Deserializer::from_slice_with_scratch(slice, scratch);
+    let value = de::Deserialize::deserialize(&mut deserializer)?;
+    deserializer.end()?;
+    Ok(value)
+}
+
+/// Decodes a value from CBOR data in a reader.
+///
+/// # Examples
+///
+/// Deserialize a `String`
+///
+/// ```
+/// # use serde_cbor::de;
+/// let v: Vec<u8> = vec![0x66, 0x66, 0x6f, 0x6f, 0x62, 0x61, 0x72];
+/// let value: String = de::from_reader(&v[..]).unwrap();
+/// assert_eq!(value, "foobar");
+/// ```
+///
+/// Note that `from_reader` cannot borrow data:
+///
+/// ```compile_fail
+/// # use serde_cbor::de;
+/// let v: Vec<u8> = vec![0x66, 0x66, 0x6f, 0x6f, 0x62, 0x61, 0x72];
+/// let value: &str = de::from_reader(&v[..]).unwrap();
+/// assert_eq!(value, "foobar");
+/// ```
+#[cfg(feature = "std")]
+pub fn from_reader<T, R>(reader: R) -> Result<T>
+where
+    T: de::DeserializeOwned,
+    R: io::Read,
+{
+    let mut deserializer = Deserializer::from_reader(reader);
+    let value = de::Deserialize::deserialize(&mut deserializer)?;
+    deserializer.end()?;
+    Ok(value)
+}
+
+/// A Serde `Deserialize`r of CBOR data.
+#[derive(Debug)]
+pub struct Deserializer<R> {
+    read: R,
+    remaining_depth: u8,
+    accept_named: bool,
+    accept_packed: bool,
+    accept_standard_enums: bool,
+    accept_legacy_enums: bool,
+}
+
+#[cfg(feature = "std")]
+impl<R> Deserializer<IoRead<R>>
+where
+    R: io::Read,
+{
+    /// Constructs a `Deserializer` which reads from a `Read`er.
+    pub fn from_reader(reader: R) -> Deserializer<IoRead<R>> {
+        Deserializer::new(IoRead::new(reader))
+    }
+}
+
+#[cfg(any(feature = "std", feature = "alloc"))]
+impl<'a> Deserializer<SliceRead<'a>> {
+    /// Constructs a `Deserializer` which reads from a slice.
+    ///
+    /// Borrowed strings and byte slices will be provided when possible.
+    pub fn from_slice(bytes: &'a [u8]) -> Deserializer<SliceRead<'a>> {
+        Deserializer::new(SliceRead::new(bytes))
+    }
+}
+
+impl<'a> Deserializer<MutSliceRead<'a>> {
+    /// Constructs a `Deserializer` which reads from a mutable slice that doubles as its own
+    /// scratch buffer.
+    ///
+    /// Borrowed strings and byte slices will be provided even for indefinite strings.
+    pub fn from_mut_slice(bytes: &'a mut [u8]) -> Deserializer<MutSliceRead<'a>> {
+        Deserializer::new(MutSliceRead::new(bytes))
+    }
+}
+
+impl<'a, 'b> Deserializer<SliceReadFixed<'a, 'b>> {
+    #[doc(hidden)]
+    pub fn from_slice_with_scratch(
+        bytes: &'a [u8],
+        scratch: &'b mut [u8],
+    ) -> Deserializer<SliceReadFixed<'a, 'b>> {
+        Deserializer::new(SliceReadFixed::new(bytes, scratch))
+    }
+}
+
+impl<'de, R> Deserializer<R>
+where
+    R: Read<'de>,
+{
+    /// Constructs a `Deserializer` from one of the possible serde_cbor input sources.
+    ///
+    /// `from_slice` and `from_reader` should normally be used instead of this method.
+    pub fn new(read: R) -> Self {
+        Deserializer {
+            read,
+            remaining_depth: 128,
+            accept_named: true,
+            accept_packed: true,
+            accept_standard_enums: true,
+            accept_legacy_enums: true,
+        }
+    }
+
+    /// Don't accept named variants and fields.
+    pub fn disable_named_format(mut self) -> Self {
+        self.accept_named = false;
+        self
+    }
+
+    /// Don't accept numbered variants and fields.
+    pub fn disable_packed_format(mut self) -> Self {
+        self.accept_packed = false;
+        self
+    }
+
+    /// Don't accept the new enum format used by `serde_cbor` versions >= v0.10.
+    pub fn disable_standard_enums(mut self) -> Self {
+        self.accept_standard_enums = false;
+        self
+    }
+
+    /// Don't accept the old enum format used by `serde_cbor` versions <= v0.9.
+    pub fn disable_legacy_enums(mut self) -> Self {
+        self.accept_legacy_enums = false;
+        self
+    }
+
+    /// This method should be called after a value has been deserialized to ensure there is no
+    /// trailing data in the input source.
+    pub fn end(&mut self) -> Result<()> {
+        match self.next()? {
+            Some(_) => Err(self.error(ErrorCode::TrailingData)),
+            None => Ok(()),
+        }
+    }
+
+    /// Turn a CBOR deserializer into an iterator over values of type T.
+    #[allow(clippy::should_implement_trait)] // Trait doesn't allow unconstrained T.
+    pub fn into_iter<T>(self) -> StreamDeserializer<'de, R, T>
+    where
+        T: de::Deserialize<'de>,
+    {
+        StreamDeserializer {
+            de: self,
+            output: PhantomData,
+            lifetime: PhantomData,
+        }
+    }
+
+    fn next(&mut self) -> Result<Option<u8>> {
+        self.read.next()
+    }
+
+    fn peek(&mut self) -> Result<Option<u8>> {
+        self.read.peek()
+    }
+
+    fn consume(&mut self) {
+        self.read.discard();
+    }
+
+    fn error(&self, reason: ErrorCode) -> Error {
+        let offset = self.read.offset();
+        Error::syntax(reason, offset)
+    }
+
+    fn parse_u8(&mut self) -> Result<u8> {
+        match self.next()? {
+            Some(byte) => Ok(byte),
+            None => Err(self.error(ErrorCode::EofWhileParsingValue)),
+        }
+    }
+
+    fn parse_u16(&mut self) -> Result<u16> {
+        let mut buf = [0; 2];
+        self.read
+            .read_into(&mut buf)
+            .map(|()| u16::from_be_bytes(buf))
+    }
+
+    fn parse_u32(&mut self) -> Result<u32> {
+        let mut buf = [0; 4];
+        self.read
+            .read_into(&mut buf)
+            .map(|()| u32::from_be_bytes(buf))
+    }
+
+    fn parse_u64(&mut self) -> Result<u64> {
+        let mut buf = [0; 8];
+        self.read
+            .read_into(&mut buf)
+            .map(|()| u64::from_be_bytes(buf))
+    }
+
+    fn parse_bytes<V>(&mut self, len: usize, visitor: V) -> Result<V::Value>
+    where
+        V: de::Visitor<'de>,
+    {
+        match self.read.read(len)? {
+            EitherLifetime::Long(buf) => visitor.visit_borrowed_bytes(buf),
+            EitherLifetime::Short(buf) => visitor.visit_bytes(buf),
+        }
+    }
+
+    fn parse_indefinite_bytes<V>(&mut self, visitor: V) -> Result<V::Value>
+    where
+        V: de::Visitor<'de>,
+    {
+        self.read.clear_buffer();
+        loop {
+            let byte = self.parse_u8()?;
+            let len = match byte {
+                0x40..=0x57 => byte as usize - 0x40,
+                0x58 => self.parse_u8()? as usize,
+                0x59 => self.parse_u16()? as usize,
+                0x5a => self.parse_u32()? as usize,
+                0x5b => {
+                    let len = self.parse_u64()?;
+                    if len > usize::max_value() as u64 {
+                        return Err(self.error(ErrorCode::LengthOutOfRange));
+                    }
+                    len as usize
+                }
+                0xff => break,
+                _ => return Err(self.error(ErrorCode::UnexpectedCode)),
+            };
+
+            self.read.read_to_buffer(len)?;
+        }
+
+        match self.read.take_buffer() {
+            EitherLifetime::Long(buf) => visitor.visit_borrowed_bytes(buf),
+            EitherLifetime::Short(buf) => visitor.visit_bytes(buf),
+        }
+    }
+
+    fn convert_str<'a>(buf: &'a [u8], buf_end_offset: u64) -> Result<&'a str> {
+        match str::from_utf8(buf) {
+            Ok(s) => Ok(s),
+            Err(e) => {
+                let shift = buf.len() - e.valid_up_to();
+                let offset = buf_end_offset - shift as u64;
+                Err(Error::syntax(ErrorCode::InvalidUtf8, offset))
+            }
+        }
+    }
+
+    fn parse_str<V>(&mut self, len: usize, visitor: V) -> Result<V::Value>
+    where
+        V: de::Visitor<'de>,
+    {
+        if let Some(offset) = self.read.offset().checked_add(len as u64) {
+            match self.read.read(len)? {
+                EitherLifetime::Long(buf) => {
+                    let s = Self::convert_str(buf, offset)?;
+                    visitor.visit_borrowed_str(s)
+                }
+                EitherLifetime::Short(buf) => {
+                    let s = Self::convert_str(buf, offset)?;
+                    visitor.visit_str(s)
+                }
+            }
+        } else {
+            // An overflow would have occured.
+            Err(Error::syntax(
+                ErrorCode::LengthOutOfRange,
+                self.read.offset(),
+            ))
+        }
+    }
+
+    fn parse_indefinite_str<V>(&mut self, visitor: V) -> Result<V::Value>
+    where
+        V: de::Visitor<'de>,
+    {
+        self.read.clear_buffer();
+        loop {
+            let byte = self.parse_u8()?;
+            let len = match byte {
+                0x60..=0x77 => byte as usize - 0x60,
+                0x78 => self.parse_u8()? as usize,
+                0x79 => self.parse_u16()? as usize,
+                0x7a => self.parse_u32()? as usize,
+                0x7b => {
+                    let len = self.parse_u64()?;
+                    if len > usize::max_value() as u64 {
+                        return Err(self.error(ErrorCode::LengthOutOfRange));
+                    }
+                    len as usize
+                }
+                0xff => break,
+                _ => return Err(self.error(ErrorCode::UnexpectedCode)),
+            };
+
+            self.read.read_to_buffer(len)?;
+        }
+
+        let offset = self.read.offset();
+        match self.read.take_buffer() {
+            EitherLifetime::Long(buf) => {
+                let s = Self::convert_str(buf, offset)?;
+                visitor.visit_borrowed_str(s)
+            }
+            EitherLifetime::Short(buf) => {
+                let s = Self::convert_str(buf, offset)?;
+                visitor.visit_str(s)
+            }
+        }
+    }
+
+    #[cfg(feature = "tags")]
+    fn handle_tagged_value<V>(&mut self, tag: u64, visitor: V) -> Result<V::Value>
+    where
+        V: de::Visitor<'de>,
+    {
+        self.recursion_checked(|d| {
+            set_tag(Some(tag));
+            let r = visitor.visit_newtype_struct(d);
+            set_tag(None);
+            r
+        })
+    }
+
+    #[cfg(not(feature = "tags"))]
+    fn handle_tagged_value<V>(&mut self, _tag: u64, visitor: V) -> Result<V::Value>
+    where
+        V: de::Visitor<'de>,
+    {
+        self.recursion_checked(|de| de.parse_value(visitor))
+    }
+
+    fn recursion_checked<F, T>(&mut self, f: F) -> Result<T>
+    where
+        F: FnOnce(&mut Deserializer<R>) -> Result<T>,
+    {
+        self.remaining_depth -= 1;
+        if self.remaining_depth == 0 {
+            return Err(self.error(ErrorCode::RecursionLimitExceeded));
+        }
+        let r = f(self);
+        self.remaining_depth += 1;
+        r
+    }
+
+    fn parse_array<V>(&mut self, mut len: usize, visitor: V) -> Result<V::Value>
+    where
+        V: de::Visitor<'de>,
+    {
+        self.recursion_checked(|de| {
+            let value = visitor.visit_seq(SeqAccess { de, len: &mut len })?;
+
+            if len != 0 {
+                Err(de.error(ErrorCode::TrailingData))
+            } else {
+                Ok(value)
+            }
+        })
+    }
+
+    fn parse_indefinite_array<V>(&mut self, visitor: V) -> Result<V::Value>
+    where
+        V: de::Visitor<'de>,
+    {
+        self.recursion_checked(|de| {
+            let value = visitor.visit_seq(IndefiniteSeqAccess { de })?;
+            match de.next()? {
+                Some(0xff) => Ok(value),
+                Some(_) => Err(de.error(ErrorCode::TrailingData)),
+                None => Err(de.error(ErrorCode::EofWhileParsingArray)),
+            }
+        })
+    }
+
+    fn parse_map<V>(&mut self, mut len: usize, visitor: V) -> Result<V::Value>
+    where
+        V: de::Visitor<'de>,
+    {
+        let accept_packed = self.accept_packed;
+        let accept_named = self.accept_named;
+        self.recursion_checked(|de| {
+            let value = visitor.visit_map(MapAccess {
+                de,
+                len: &mut len,
+                accept_named,
+                accept_packed,
+            })?;
+
+            if len != 0 {
+                Err(de.error(ErrorCode::TrailingData))
+            } else {
+                Ok(value)
+            }
+        })
+    }
+
+    fn parse_indefinite_map<V>(&mut self, visitor: V) -> Result<V::Value>
+    where
+        V: de::Visitor<'de>,
+    {
+        let accept_named = self.accept_named;
+        let accept_packed = self.accept_packed;
+        self.recursion_checked(|de| {
+            let value = visitor.visit_map(IndefiniteMapAccess {
+                de,
+                accept_packed,
+                accept_named,
+            })?;
+            match de.next()? {
+                Some(0xff) => Ok(value),
+                Some(_) => Err(de.error(ErrorCode::TrailingData)),
+                None => Err(de.error(ErrorCode::EofWhileParsingMap)),
+            }
+        })
+    }
+
+    fn parse_enum<V>(&mut self, mut len: usize, visitor: V) -> Result<V::Value>
+    where
+        V: de::Visitor<'de>,
+    {
+        self.recursion_checked(|de| {
+            let value = visitor.visit_enum(VariantAccess {
+                seq: SeqAccess { de, len: &mut len },
+            })?;
+
+            if len != 0 {
+                Err(de.error(ErrorCode::TrailingData))
+            } else {
+                Ok(value)
+            }
+        })
+    }
+
+    fn parse_enum_map<V>(&mut self, visitor: V) -> Result<V::Value>
+    where
+        V: de::Visitor<'de>,
+    {
+        let accept_named = self.accept_named;
+        let accept_packed = self.accept_packed;
+        self.recursion_checked(|de| {
+            let mut len = 1;
+            let value = visitor.visit_enum(VariantAccessMap {
+                map: MapAccess {
+                    de,
+                    len: &mut len,
+                    accept_packed,
+                    accept_named,
+                },
+            })?;
+
+            if len != 0 {
+                Err(de.error(ErrorCode::TrailingData))
+            } else {
+                Ok(value)
+            }
+        })
+    }
+
+    fn parse_indefinite_enum<V>(&mut self, visitor: V) -> Result<V::Value>
+    where
+        V: de::Visitor<'de>,
+    {
+        self.recursion_checked(|de| {
+            let value = visitor.visit_enum(VariantAccess {
+                seq: IndefiniteSeqAccess { de },
+            })?;
+            match de.next()? {
+                Some(0xff) => Ok(value),
+                Some(_) => Err(de.error(ErrorCode::TrailingData)),
+                None => Err(de.error(ErrorCode::EofWhileParsingArray)),
+            }
+        })
+    }
+
+    fn parse_f16(&mut self) -> Result<f32> {
+        Ok(f32::from(f16::from_bits(self.parse_u16()?)))
+    }
+
+    fn parse_f32(&mut self) -> Result<f32> {
+        self.parse_u32().map(|i| f32::from_bits(i))
+    }
+
+    fn parse_f64(&mut self) -> Result<f64> {
+        self.parse_u64().map(|i| f64::from_bits(i))
+    }
+
+    // Don't warn about the `unreachable!` in case
+    // exhaustive integer pattern matching is enabled.
+    #[allow(unreachable_patterns)]
+    fn parse_value<V>(&mut self, visitor: V) -> Result<V::Value>
+    where
+        V: de::Visitor<'de>,
+    {
+        let byte = self.parse_u8()?;
+        match byte {
+            // Major type 0: an unsigned integer
+            0x00..=0x17 => visitor.visit_u8(byte),
+            0x18 => {
+                let value = self.parse_u8()?;
+                visitor.visit_u8(value)
+            }
+            0x19 => {
+                let value = self.parse_u16()?;
+                visitor.visit_u16(value)
+            }
+            0x1a => {
+                let value = self.parse_u32()?;
+                visitor.visit_u32(value)
+            }
+            0x1b => {
+                let value = self.parse_u64()?;
+                visitor.visit_u64(value)
+            }
+            0x1c..=0x1f => Err(self.error(ErrorCode::UnassignedCode)),
+
+            // Major type 1: a negative integer
+            0x20..=0x37 => visitor.visit_i8(-1 - (byte - 0x20) as i8),
+            0x38 => {
+                let value = self.parse_u8()?;
+                visitor.visit_i16(-1 - i16::from(value))
+            }
+            0x39 => {
+                let value = self.parse_u16()?;
+                visitor.visit_i32(-1 - i32::from(value))
+            }
+            0x3a => {
+                let value = self.parse_u32()?;
+                visitor.visit_i64(-1 - i64::from(value))
+            }
+            0x3b => {
+                let value = self.parse_u64()?;
+                if value > i64::max_value() as u64 {
+                    return visitor.visit_i128(-1 - i128::from(value));
+                }
+                visitor.visit_i64(-1 - value as i64)
+            }
+            0x3c..=0x3f => Err(self.error(ErrorCode::UnassignedCode)),
+
+            // Major type 2: a byte string
+            0x40..=0x57 => self.parse_bytes(byte as usize - 0x40, visitor),
+            0x58 => {
+                let len = self.parse_u8()?;
+                self.parse_bytes(len as usize, visitor)
+            }
+            0x59 => {
+                let len = self.parse_u16()?;
+                self.parse_bytes(len as usize, visitor)
+            }
+            0x5a => {
+                let len = self.parse_u32()?;
+                self.parse_bytes(len as usize, visitor)
+            }
+            0x5b => {
+                let len = self.parse_u64()?;
+                if len > usize::max_value() as u64 {
+                    return Err(self.error(ErrorCode::LengthOutOfRange));
+                }
+                self.parse_bytes(len as usize, visitor)
+            }
+            0x5c..=0x5e => Err(self.error(ErrorCode::UnassignedCode)),
+            0x5f => self.parse_indefinite_bytes(visitor),
+
+            // Major type 3: a text string
+            0x60..=0x77 => self.parse_str(byte as usize - 0x60, visitor),
+            0x78 => {
+                let len = self.parse_u8()?;
+                self.parse_str(len as usize, visitor)
+            }
+            0x79 => {
+                let len = self.parse_u16()?;
+                self.parse_str(len as usize, visitor)
+            }
+            0x7a => {
+                let len = self.parse_u32()?;
+                self.parse_str(len as usize, visitor)
+            }
+            0x7b => {
+                let len = self.parse_u64()?;
+                if len > usize::max_value() as u64 {
+                    return Err(self.error(ErrorCode::LengthOutOfRange));
+                }
+                self.parse_str(len as usize, visitor)
+            }
+            0x7c..=0x7e => Err(self.error(ErrorCode::UnassignedCode)),
+            0x7f => self.parse_indefinite_str(visitor),
+
+            // Major type 4: an array of data items
+            0x80..=0x97 => self.parse_array(byte as usize - 0x80, visitor),
+            0x98 => {
+                let len = self.parse_u8()?;
+                self.parse_array(len as usize, visitor)
+            }
+            0x99 => {
+                let len = self.parse_u16()?;
+                self.parse_array(len as usize, visitor)
+            }
+            0x9a => {
+                let len = self.parse_u32()?;
+                self.parse_array(len as usize, visitor)
+            }
+            0x9b => {
+                let len = self.parse_u64()?;
+                if len > usize::max_value() as u64 {
+                    return Err(self.error(ErrorCode::LengthOutOfRange));
+                }
+                self.parse_array(len as usize, visitor)
+            }
+            0x9c..=0x9e => Err(self.error(ErrorCode::UnassignedCode)),
+            0x9f => self.parse_indefinite_array(visitor),
+
+            // Major type 5: a map of pairs of data items
+            0xa0..=0xb7 => self.parse_map(byte as usize - 0xa0, visitor),
+            0xb8 => {
+                let len = self.parse_u8()?;
+                self.parse_map(len as usize, visitor)
+            }
+            0xb9 => {
+                let len = self.parse_u16()?;
+                self.parse_map(len as usize, visitor)
+            }
+            0xba => {
+                let len = self.parse_u32()?;
+                self.parse_map(len as usize, visitor)
+            }
+            0xbb => {
+                let len = self.parse_u64()?;
+                if len > usize::max_value() as u64 {
+                    return Err(self.error(ErrorCode::LengthOutOfRange));
+                }
+                self.parse_map(len as usize, visitor)
+            }
+            0xbc..=0xbe => Err(self.error(ErrorCode::UnassignedCode)),
+            0xbf => self.parse_indefinite_map(visitor),
+
+            // Major type 6: optional semantic tagging of other major types
+            0xc0..=0xd7 => {
+                let tag = u64::from(byte) - 0xc0;
+                self.handle_tagged_value(tag, visitor)
+            }
+            0xd8 => {
+                let tag = self.parse_u8()?;
+                self.handle_tagged_value(tag.into(), visitor)
+            }
+            0xd9 => {
+                let tag = self.parse_u16()?;
+                self.handle_tagged_value(tag.into(), visitor)
+            }
+            0xda => {
+                let tag = self.parse_u32()?;
+                self.handle_tagged_value(tag.into(), visitor)
+            }
+            0xdb => {
+                let tag = self.parse_u64()?;
+                self.handle_tagged_value(tag, visitor)
+            }
+            0xdc..=0xdf => Err(self.error(ErrorCode::UnassignedCode)),
+
+            // Major type 7: floating-point numbers and other simple data types that need no content
+            0xe0..=0xf3 => Err(self.error(ErrorCode::UnassignedCode)),
+            0xf4 => visitor.visit_bool(false),
+            0xf5 => visitor.visit_bool(true),
+            0xf6 => visitor.visit_unit(),
+            0xf7 => visitor.visit_unit(),
+            0xf8 => Err(self.error(ErrorCode::UnassignedCode)),
+            0xf9 => {
+                let value = self.parse_f16()?;
+                visitor.visit_f32(value)
+            }
+            0xfa => {
+                let value = self.parse_f32()?;
+                visitor.visit_f32(value)
+            }
+            0xfb => {
+                let value = self.parse_f64()?;
+                visitor.visit_f64(value)
+            }
+            0xfc..=0xfe => Err(self.error(ErrorCode::UnassignedCode)),
+            0xff => Err(self.error(ErrorCode::UnexpectedCode)),
+
+            _ => unreachable!(),
+        }
+    }
+}
+
+impl<'de, 'a, R> de::Deserializer<'de> for &'a mut Deserializer<R>
+where
+    R: Read<'de>,
+{
+    type Error = Error;
+
+    #[inline]
+    fn deserialize_any<V>(self, visitor: V) -> Result<V::Value>
+    where
+        V: de::Visitor<'de>,
+    {
+        self.parse_value(visitor)
+    }
+
+    #[inline]
+    fn deserialize_option<V>(self, visitor: V) -> Result<V::Value>
+    where
+        V: de::Visitor<'de>,
+    {
+        match self.peek()? {
+            Some(0xf6) => {
+                self.consume();
+                visitor.visit_none()
+            }
+            _ => visitor.visit_some(self),
+        }
+    }
+
+    #[inline]
+    fn deserialize_newtype_struct<V>(self, _name: &str, visitor: V) -> Result<V::Value>
+    where
+        V: de::Visitor<'de>,
+    {
+        visitor.visit_newtype_struct(self)
+    }
+
+    // Unit variants are encoded as just the variant identifier.
+    // Tuple variants are encoded as an array of the variant identifier followed by the fields.
+    // Struct variants are encoded as an array of the variant identifier followed by the struct.
+    #[inline]
+    fn deserialize_enum<V>(
+        self,
+        _name: &str,
+        _variants: &'static [&'static str],
+        visitor: V,
+    ) -> Result<V::Value>
+    where
+        V: de::Visitor<'de>,
+    {
+        match self.peek()? {
+            Some(byte @ 0x80..=0x9f) => {
+                if !self.accept_legacy_enums {
+                    return Err(self.error(ErrorCode::WrongEnumFormat));
+                }
+                self.consume();
+                match byte {
+                    0x80..=0x97 => self.parse_enum(byte as usize - 0x80, visitor),
+                    0x98 => {
+                        let len = self.parse_u8()?;
+                        self.parse_enum(len as usize, visitor)
+                    }
+                    0x99 => {
+                        let len = self.parse_u16()?;
+                        self.parse_enum(len as usize, visitor)
+                    }
+                    0x9a => {
+                        let len = self.parse_u32()?;
+                        self.parse_enum(len as usize, visitor)
+                    }
+                    0x9b => {
+                        let len = self.parse_u64()?;
+                        if len > usize::max_value() as u64 {
+                            return Err(self.error(ErrorCode::LengthOutOfRange));
+                        }
+                        self.parse_enum(len as usize, visitor)
+                    }
+                    0x9c..=0x9e => Err(self.error(ErrorCode::UnassignedCode)),
+                    0x9f => self.parse_indefinite_enum(visitor),
+
+                    _ => unreachable!(),
+                }
+            }
+            Some(0xa1) => {
+                if !self.accept_standard_enums {
+                    return Err(self.error(ErrorCode::WrongEnumFormat));
+                }
+                self.consume();
+                self.parse_enum_map(visitor)
+            }
+            None => Err(self.error(ErrorCode::EofWhileParsingValue)),
+            _ => {
+                if !self.accept_standard_enums && !self.accept_legacy_enums {
+                    return Err(self.error(ErrorCode::WrongEnumFormat));
+                }
+                visitor.visit_enum(UnitVariantAccess { de: self })
+            }
+        }
+    }
+
+    #[inline]
+    fn is_human_readable(&self) -> bool {
+        false
+    }
+
+    serde::forward_to_deserialize_any! {
+        bool i8 i16 i32 i64 i128 u8 u16 u32 u64 u128 f32 f64 char str string unit
+        unit_struct seq tuple tuple_struct map struct identifier ignored_any
+        bytes byte_buf
+    }
+}
+
+impl<R> Deserializer<R>
+where
+    R: Offset,
+{
+    /// Return the current offset in the reader
+    #[inline]
+    pub fn byte_offset(&self) -> usize {
+        self.read.byte_offset()
+    }
+}
+
+trait MakeError {
+    fn error(&self, code: ErrorCode) -> Error;
+}
+
+struct SeqAccess<'a, R> {
+    de: &'a mut Deserializer<R>,
+    len: &'a mut usize,
+}
+
+impl<'de, 'a, R> de::SeqAccess<'de> for SeqAccess<'a, R>
+where
+    R: Read<'de>,
+{
+    type Error = Error;
+
+    fn next_element_seed<T>(&mut self, seed: T) -> Result<Option<T::Value>>
+    where
+        T: de::DeserializeSeed<'de>,
+    {
+        if *self.len == 0 {
+            return Ok(None);
+        }
+        *self.len -= 1;
+
+        let value = seed.deserialize(&mut *self.de)?;
+        Ok(Some(value))
+    }
+
+    fn size_hint(&self) -> Option<usize> {
+        Some(*self.len)
+    }
+}
+
+impl<'de, 'a, R> MakeError for SeqAccess<'a, R>
+where
+    R: Read<'de>,
+{
+    fn error(&self, code: ErrorCode) -> Error {
+        self.de.error(code)
+    }
+}
+
+struct IndefiniteSeqAccess<'a, R> {
+    de: &'a mut Deserializer<R>,
+}
+
+impl<'de, 'a, R> de::SeqAccess<'de> for IndefiniteSeqAccess<'a, R>
+where
+    R: Read<'de>,
+{
+    type Error = Error;
+
+    fn next_element_seed<T>(&mut self, seed: T) -> Result<Option<T::Value>>
+    where
+        T: de::DeserializeSeed<'de>,
+    {
+        match self.de.peek()? {
+            Some(0xff) => return Ok(None),
+            Some(_) => {}
+            None => return Err(self.de.error(ErrorCode::EofWhileParsingArray)),
+        }
+
+        let value = seed.deserialize(&mut *self.de)?;
+        Ok(Some(value))
+    }
+}
+
+impl<'de, 'a, R> MakeError for IndefiniteSeqAccess<'a, R>
+where
+    R: Read<'de>,
+{
+    fn error(&self, code: ErrorCode) -> Error {
+        self.de.error(code)
+    }
+}
+
+struct MapAccess<'a, R> {
+    de: &'a mut Deserializer<R>,
+    len: &'a mut usize,
+    accept_named: bool,
+    accept_packed: bool,
+}
+
+impl<'de, 'a, R> de::MapAccess<'de> for MapAccess<'a, R>
+where
+    R: Read<'de>,
+{
+    type Error = Error;
+
+    fn next_key_seed<K>(&mut self, seed: K) -> Result<Option<K::Value>>
+    where
+        K: de::DeserializeSeed<'de>,
+    {
+        if *self.len == 0 {
+            return Ok(None);
+        }
+        *self.len -= 1;
+
+        match self.de.peek()? {
+            Some(_byte @ 0x00..=0x1b) if !self.accept_packed => {
+                return Err(self.de.error(ErrorCode::WrongStructFormat));
+            }
+            Some(_byte @ 0x60..=0x7f) if !self.accept_named => {
+                return Err(self.de.error(ErrorCode::WrongStructFormat));
+            }
+            _ => {}
+        };
+
+        let value = seed.deserialize(&mut *self.de)?;
+        Ok(Some(value))
+    }
+
+    fn next_value_seed<V>(&mut self, seed: V) -> Result<V::Value>
+    where
+        V: de::DeserializeSeed<'de>,
+    {
+        seed.deserialize(&mut *self.de)
+    }
+
+    fn size_hint(&self) -> Option<usize> {
+        Some(*self.len)
+    }
+}
+
+impl<'de, 'a, R> MakeError for MapAccess<'a, R>
+where
+    R: Read<'de>,
+{
+    fn error(&self, code: ErrorCode) -> Error {
+        self.de.error(code)
+    }
+}
+
+struct IndefiniteMapAccess<'a, R> {
+    de: &'a mut Deserializer<R>,
+    accept_packed: bool,
+    accept_named: bool,
+}
+
+impl<'de, 'a, R> de::MapAccess<'de> for IndefiniteMapAccess<'a, R>
+where
+    R: Read<'de>,
+{
+    type Error = Error;
+
+    fn next_key_seed<K>(&mut self, seed: K) -> Result<Option<K::Value>>
+    where
+        K: de::DeserializeSeed<'de>,
+    {
+        match self.de.peek()? {
+            Some(_byte @ 0x00..=0x1b) if !self.accept_packed => {
+                return Err(self.de.error(ErrorCode::WrongStructFormat))
+            }
+            Some(_byte @ 0x60..=0x7f) if !self.accept_named => {
+                return Err(self.de.error(ErrorCode::WrongStructFormat))
+            }
+            Some(0xff) => return Ok(None),
+            Some(_) => {}
+            None => return Err(self.de.error(ErrorCode::EofWhileParsingMap)),
+        }
+
+        let value = seed.deserialize(&mut *self.de)?;
+        Ok(Some(value))
+    }
+
+    fn next_value_seed<V>(&mut self, seed: V) -> Result<V::Value>
+    where
+        V: de::DeserializeSeed<'de>,
+    {
+        seed.deserialize(&mut *self.de)
+    }
+}
+
+struct UnitVariantAccess<'a, R> {
+    de: &'a mut Deserializer<R>,
+}
+
+impl<'de, 'a, R> de::EnumAccess<'de> for UnitVariantAccess<'a, R>
+where
+    R: Read<'de>,
+{
+    type Error = Error;
+    type Variant = UnitVariantAccess<'a, R>;
+
+    fn variant_seed<V>(self, seed: V) -> Result<(V::Value, UnitVariantAccess<'a, R>)>
+    where
+        V: de::DeserializeSeed<'de>,
+    {
+        let variant = seed.deserialize(&mut *self.de)?;
+        Ok((variant, self))
+    }
+}
+
+impl<'de, 'a, R> de::VariantAccess<'de> for UnitVariantAccess<'a, R>
+where
+    R: Read<'de>,
+{
+    type Error = Error;
+
+    fn unit_variant(self) -> Result<()> {
+        Ok(())
+    }
+
+    fn newtype_variant_seed<T>(self, _seed: T) -> Result<T::Value>
+    where
+        T: de::DeserializeSeed<'de>,
+    {
+        Err(de::Error::invalid_type(
+            de::Unexpected::UnitVariant,
+            &"newtype variant",
+        ))
+    }
+
+    fn tuple_variant<V>(self, _len: usize, _visitor: V) -> Result<V::Value>
+    where
+        V: de::Visitor<'de>,
+    {
+        Err(de::Error::invalid_type(
+            de::Unexpected::UnitVariant,
+            &"tuple variant",
+        ))
+    }
+
+    fn struct_variant<V>(self, _fields: &'static [&'static str], _visitor: V) -> Result<V::Value>
+    where
+        V: de::Visitor<'de>,
+    {
+        Err(de::Error::invalid_type(
+            de::Unexpected::UnitVariant,
+            &"struct variant",
+        ))
+    }
+}
+
+struct VariantAccess<T> {
+    seq: T,
+}
+
+impl<'de, T> de::EnumAccess<'de> for VariantAccess<T>
+where
+    T: de::SeqAccess<'de, Error = Error> + MakeError,
+{
+    type Error = Error;
+    type Variant = VariantAccess<T>;
+
+    fn variant_seed<V>(mut self, seed: V) -> Result<(V::Value, VariantAccess<T>)>
+    where
+        V: de::DeserializeSeed<'de>,
+    {
+        let variant = match self.seq.next_element_seed(seed) {
+            Ok(Some(variant)) => variant,
+            Ok(None) => return Err(self.seq.error(ErrorCode::ArrayTooShort)),
+            Err(e) => return Err(e),
+        };
+        Ok((variant, self))
+    }
+}
+
+impl<'de, T> de::VariantAccess<'de> for VariantAccess<T>
+where
+    T: de::SeqAccess<'de, Error = Error> + MakeError,
+{
+    type Error = Error;
+
+    fn unit_variant(mut self) -> Result<()> {
+        match self.seq.next_element() {
+            Ok(Some(())) => Ok(()),
+            Ok(None) => Err(self.seq.error(ErrorCode::ArrayTooLong)),
+            Err(e) => Err(e),
+        }
+    }
+
+    fn newtype_variant_seed<S>(mut self, seed: S) -> Result<S::Value>
+    where
+        S: de::DeserializeSeed<'de>,
+    {
+        match self.seq.next_element_seed(seed) {
+            Ok(Some(variant)) => Ok(variant),
+            Ok(None) => Err(self.seq.error(ErrorCode::ArrayTooShort)),
+            Err(e) => Err(e),
+        }
+    }
+
+    fn tuple_variant<V>(self, _len: usize, visitor: V) -> Result<V::Value>
+    where
+        V: de::Visitor<'de>,
+    {
+        visitor.visit_seq(self.seq)
+    }
+
+    fn struct_variant<V>(mut self, _fields: &'static [&'static str], visitor: V) -> Result<V::Value>
+    where
+        V: de::Visitor<'de>,
+    {
+        let seed = StructVariantSeed { visitor };
+        match self.seq.next_element_seed(seed) {
+            Ok(Some(variant)) => Ok(variant),
+            Ok(None) => Err(self.seq.error(ErrorCode::ArrayTooShort)),
+            Err(e) => Err(e),
+        }
+    }
+}
+
+struct StructVariantSeed<V> {
+    visitor: V,
+}
+
+impl<'de, V> de::DeserializeSeed<'de> for StructVariantSeed<V>
+where
+    V: de::Visitor<'de>,
+{
+    type Value = V::Value;
+
+    fn deserialize<D>(self, de: D) -> result::Result<V::Value, D::Error>
+    where
+        D: de::Deserializer<'de>,
+    {
+        de.deserialize_any(self.visitor)
+    }
+}
+
+/// Iterator that deserializes a stream into multiple CBOR values.
+///
+/// A stream deserializer can be created from any CBOR deserializer using the
+/// `Deserializer::into_iter` method.
+///
+/// ```
+/// # extern crate serde_cbor;
+/// use serde_cbor::de::Deserializer;
+/// use serde_cbor::value::Value;
+///
+/// # fn main() {
+/// let data: Vec<u8> = vec![
+///     0x01, 0x66, 0x66, 0x6f, 0x6f, 0x62, 0x61, 0x72,
+/// ];
+/// let mut it = Deserializer::from_slice(&data[..]).into_iter::<Value>();
+/// assert_eq!(
+///     Value::Integer(1),
+///     it.next().unwrap().unwrap()
+/// );
+/// assert_eq!(
+///     Value::Text("foobar".to_string()),
+///     it.next().unwrap().unwrap()
+/// );
+/// # }
+/// ```
+#[derive(Debug)]
+pub struct StreamDeserializer<'de, R, T> {
+    de: Deserializer<R>,
+    output: PhantomData<T>,
+    lifetime: PhantomData<&'de ()>,
+}
+
+impl<'de, R, T> StreamDeserializer<'de, R, T>
+where
+    R: Read<'de>,
+    T: de::Deserialize<'de>,
+{
+    /// Create a new CBOR stream deserializer from one of the possible
+    /// serde_cbor input sources.
+    ///
+    /// Typically it is more convenient to use one of these methods instead:
+    ///
+    /// * `Deserializer::from_slice(...).into_iter()`
+    /// * `Deserializer::from_reader(...).into_iter()`
+    pub fn new(read: R) -> StreamDeserializer<'de, R, T> {
+        StreamDeserializer {
+            de: Deserializer::new(read),
+            output: PhantomData,
+            lifetime: PhantomData,
+        }
+    }
+}
+
+impl<'de, R, T> StreamDeserializer<'de, R, T>
+where
+    R: Offset,
+    T: de::Deserialize<'de>,
+{
+    /// Return the current offset in the reader
+    #[inline]
+    pub fn byte_offset(&self) -> usize {
+        self.de.byte_offset()
+    }
+}
+
+impl<'de, R, T> Iterator for StreamDeserializer<'de, R, T>
+where
+    R: Read<'de>,
+    T: de::Deserialize<'de>,
+{
+    type Item = Result<T>;
+
+    fn next(&mut self) -> Option<Result<T>> {
+        match self.de.peek() {
+            Ok(Some(_)) => Some(T::deserialize(&mut self.de)),
+            Ok(None) => None,
+            Err(e) => Some(Err(e)),
+        }
+    }
+}
+
+struct VariantAccessMap<T> {
+    map: T,
+}
+
+impl<'de, T> de::EnumAccess<'de> for VariantAccessMap<T>
+where
+    T: de::MapAccess<'de, Error = Error> + MakeError,
+{
+    type Error = Error;
+    type Variant = VariantAccessMap<T>;
+
+    fn variant_seed<V>(mut self, seed: V) -> Result<(V::Value, VariantAccessMap<T>)>
+    where
+        V: de::DeserializeSeed<'de>,
+    {
+        let variant = match self.map.next_key_seed(seed) {
+            Ok(Some(variant)) => variant,
+            Ok(None) => return Err(self.map.error(ErrorCode::ArrayTooShort)),
+            Err(e) => return Err(e),
+        };
+        Ok((variant, self))
+    }
+}
+
+impl<'de, T> de::VariantAccess<'de> for VariantAccessMap<T>
+where
+    T: de::MapAccess<'de, Error = Error> + MakeError,
+{
+    type Error = Error;
+
+    fn unit_variant(mut self) -> Result<()> {
+        match self.map.next_value() {
+            Ok(()) => Ok(()),
+            Err(e) => Err(e),
+        }
+    }
+
+    fn newtype_variant_seed<S>(mut self, seed: S) -> Result<S::Value>
+    where
+        S: de::DeserializeSeed<'de>,
+    {
+        self.map.next_value_seed(seed)
+    }
+
+    fn tuple_variant<V>(mut self, _len: usize, visitor: V) -> Result<V::Value>
+    where
+        V: de::Visitor<'de>,
+    {
+        let seed = StructVariantSeed { visitor };
+        self.map.next_value_seed(seed)
+    }
+
+    fn struct_variant<V>(mut self, _fields: &'static [&'static str], visitor: V) -> Result<V::Value>
+    where
+        V: de::Visitor<'de>,
+    {
+        let seed = StructVariantSeed { visitor };
+        self.map.next_value_seed(seed)
+    }
+}
diff --git a/crates/serde_cbor/src/error.rs b/crates/serde_cbor/src/error.rs
new file mode 100644
index 0000000..b1a6a45
--- /dev/null
+++ b/crates/serde_cbor/src/error.rs
@@ -0,0 +1,318 @@
+//! When serializing or deserializing CBOR goes wrong.
+use core::fmt;
+use core::result;
+use serde::de;
+use serde::ser;
+#[cfg(feature = "std")]
+use std::error;
+#[cfg(feature = "std")]
+use std::io;
+
+/// This type represents all possible errors that can occur when serializing or deserializing CBOR
+/// data.
+pub struct Error(ErrorImpl);
+
+/// Alias for a `Result` with the error type `serde_cbor::Error`.
+pub type Result<T> = result::Result<T, Error>;
+
+/// Categorizes the cause of a `serde_cbor::Error`.
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+pub enum Category {
+    /// The error was caused by a failure to read or write bytes on an IO stream.
+    Io,
+    /// The error was caused by input that was not syntactically valid CBOR.
+    Syntax,
+    /// The error was caused by input data that was semantically incorrect.
+    Data,
+    /// The error was caused by prematurely reaching the end of the input data.
+    Eof,
+}
+
+impl Error {
+    /// The byte offset at which the error occurred.
+    pub fn offset(&self) -> u64 {
+        self.0.offset
+    }
+
+    pub(crate) fn syntax(code: ErrorCode, offset: u64) -> Error {
+        Error(ErrorImpl { code, offset })
+    }
+
+    #[cfg(feature = "std")]
+    pub(crate) fn io(error: io::Error) -> Error {
+        Error(ErrorImpl {
+            code: ErrorCode::Io(error),
+            offset: 0,
+        })
+    }
+
+    #[cfg(all(not(feature = "std"), feature = "unsealed_read_write"))]
+    /// Creates an error signalling that the underlying `Read` encountered an I/O error.
+    pub fn io() -> Error {
+        Error(ErrorImpl {
+            code: ErrorCode::Io,
+            offset: 0,
+        })
+    }
+
+    #[cfg(feature = "unsealed_read_write")]
+    /// Creates an error signalling that the scratch buffer was too small to fit the data.
+    pub fn scratch_too_small(offset: u64) -> Error {
+        Error(ErrorImpl {
+            code: ErrorCode::ScratchTooSmall,
+            offset,
+        })
+    }
+
+    #[cfg(not(feature = "unsealed_read_write"))]
+    pub(crate) fn scratch_too_small(offset: u64) -> Error {
+        Error(ErrorImpl {
+            code: ErrorCode::ScratchTooSmall,
+            offset,
+        })
+    }
+
+    #[cfg(feature = "unsealed_read_write")]
+    /// Creates an error with a custom message.
+    ///
+    /// **Note**: When the "std" feature is disabled, the message will be discarded.
+    pub fn message<T: fmt::Display>(_msg: T) -> Error {
+        #[cfg(not(feature = "std"))]
+        {
+            Error(ErrorImpl {
+                code: ErrorCode::Message,
+                offset: 0,
+            })
+        }
+        #[cfg(feature = "std")]
+        {
+            Error(ErrorImpl {
+                code: ErrorCode::Message(_msg.to_string()),
+                offset: 0,
+            })
+        }
+    }
+
+    #[cfg(not(feature = "unsealed_read_write"))]
+    pub(crate) fn message<T: fmt::Display>(_msg: T) -> Error {
+        #[cfg(not(feature = "std"))]
+        {
+            Error(ErrorImpl {
+                code: ErrorCode::Message,
+                offset: 0,
+            })
+        }
+        #[cfg(feature = "std")]
+        {
+            Error(ErrorImpl {
+                code: ErrorCode::Message(_msg.to_string()),
+                offset: 0,
+            })
+        }
+    }
+
+    #[cfg(feature = "unsealed_read_write")]
+    /// Creates an error signalling that the underlying read
+    /// encountered an end of input.
+    pub fn eof(offset: u64) -> Error {
+        Error(ErrorImpl {
+            code: ErrorCode::EofWhileParsingValue,
+            offset,
+        })
+    }
+
+    /// Categorizes the cause of this error.
+    pub fn classify(&self) -> Category {
+        match self.0.code {
+            #[cfg(feature = "std")]
+            ErrorCode::Message(_) => Category::Data,
+            #[cfg(not(feature = "std"))]
+            ErrorCode::Message => Category::Data,
+            #[cfg(feature = "std")]
+            ErrorCode::Io(_) => Category::Io,
+            #[cfg(not(feature = "std"))]
+            ErrorCode::Io => Category::Io,
+            ErrorCode::ScratchTooSmall => Category::Io,
+            ErrorCode::EofWhileParsingValue
+            | ErrorCode::EofWhileParsingArray
+            | ErrorCode::EofWhileParsingMap => Category::Eof,
+            ErrorCode::LengthOutOfRange
+            | ErrorCode::InvalidUtf8
+            | ErrorCode::UnassignedCode
+            | ErrorCode::UnexpectedCode
+            | ErrorCode::TrailingData
+            | ErrorCode::ArrayTooShort
+            | ErrorCode::ArrayTooLong
+            | ErrorCode::RecursionLimitExceeded
+            | ErrorCode::WrongEnumFormat
+            | ErrorCode::WrongStructFormat => Category::Syntax,
+        }
+    }
+
+    /// Returns true if this error was caused by a failure to read or write bytes on an IO stream.
+    pub fn is_io(&self) -> bool {
+        match self.classify() {
+            Category::Io => true,
+            _ => false,
+        }
+    }
+
+    /// Returns true if this error was caused by input that was not syntactically valid CBOR.
+    pub fn is_syntax(&self) -> bool {
+        match self.classify() {
+            Category::Syntax => true,
+            _ => false,
+        }
+    }
+
+    /// Returns true if this error was caused by data that was semantically incorrect.
+    pub fn is_data(&self) -> bool {
+        match self.classify() {
+            Category::Data => true,
+            _ => false,
+        }
+    }
+
+    /// Returns true if this error was caused by prematurely reaching the end of the input data.
+    pub fn is_eof(&self) -> bool {
+        match self.classify() {
+            Category::Eof => true,
+            _ => false,
+        }
+    }
+
+    /// Returns true if this error was caused by the scratch buffer being too small.
+    ///
+    /// Note this being `true` implies that `is_io()` is also `true`.
+    pub fn is_scratch_too_small(&self) -> bool {
+        match self.0.code {
+            ErrorCode::ScratchTooSmall => true,
+            _ => false,
+        }
+    }
+}
+
+#[cfg(feature = "std")]
+impl error::Error for Error {
+    fn source(&self) -> Option<&(dyn error::Error + 'static)> {
+        match self.0.code {
+            ErrorCode::Io(ref err) => Some(err),
+            _ => None,
+        }
+    }
+}
+
+impl fmt::Display for Error {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        if self.0.offset == 0 {
+            fmt::Display::fmt(&self.0.code, f)
+        } else {
+            write!(f, "{} at offset {}", self.0.code, self.0.offset)
+        }
+    }
+}
+
+impl fmt::Debug for Error {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        fmt::Debug::fmt(&self.0, fmt)
+    }
+}
+
+impl de::Error for Error {
+    fn custom<T: fmt::Display>(msg: T) -> Error {
+        Error::message(msg)
+    }
+
+    fn invalid_type(unexp: de::Unexpected<'_>, exp: &dyn de::Expected) -> Error {
+        if let de::Unexpected::Unit = unexp {
+            Error::custom(format_args!("invalid type: null, expected {}", exp))
+        } else {
+            Error::custom(format_args!("invalid type: {}, expected {}", unexp, exp))
+        }
+    }
+}
+
+impl ser::Error for Error {
+    fn custom<T: fmt::Display>(msg: T) -> Error {
+        Error::message(msg)
+    }
+}
+
+#[cfg(feature = "std")]
+impl From<io::Error> for Error {
+    fn from(e: io::Error) -> Error {
+        Error::io(e)
+    }
+}
+
+#[cfg(not(feature = "std"))]
+impl From<core::fmt::Error> for Error {
+    fn from(_: core::fmt::Error) -> Error {
+        Error(ErrorImpl {
+            code: ErrorCode::Message,
+            offset: 0,
+        })
+    }
+}
+
+#[derive(Debug)]
+struct ErrorImpl {
+    code: ErrorCode,
+    offset: u64,
+}
+
+#[derive(Debug)]
+pub(crate) enum ErrorCode {
+    #[cfg(feature = "std")]
+    Message(String),
+    #[cfg(not(feature = "std"))]
+    Message,
+    #[cfg(feature = "std")]
+    Io(io::Error),
+    #[allow(unused)]
+    #[cfg(not(feature = "std"))]
+    Io,
+    ScratchTooSmall,
+    EofWhileParsingValue,
+    EofWhileParsingArray,
+    EofWhileParsingMap,
+    LengthOutOfRange,
+    InvalidUtf8,
+    UnassignedCode,
+    UnexpectedCode,
+    TrailingData,
+    ArrayTooShort,
+    ArrayTooLong,
+    RecursionLimitExceeded,
+    WrongEnumFormat,
+    WrongStructFormat,
+}
+
+impl fmt::Display for ErrorCode {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        match *self {
+            #[cfg(feature = "std")]
+            ErrorCode::Message(ref msg) => f.write_str(msg),
+            #[cfg(not(feature = "std"))]
+            ErrorCode::Message => f.write_str("Unknown error"),
+            #[cfg(feature = "std")]
+            ErrorCode::Io(ref err) => fmt::Display::fmt(err, f),
+            #[cfg(not(feature = "std"))]
+            ErrorCode::Io => f.write_str("Unknown I/O error"),
+            ErrorCode::ScratchTooSmall => f.write_str("Scratch buffer too small"),
+            ErrorCode::EofWhileParsingValue => f.write_str("EOF while parsing a value"),
+            ErrorCode::EofWhileParsingArray => f.write_str("EOF while parsing an array"),
+            ErrorCode::EofWhileParsingMap => f.write_str("EOF while parsing a map"),
+            ErrorCode::LengthOutOfRange => f.write_str("length out of range"),
+            ErrorCode::InvalidUtf8 => f.write_str("invalid UTF-8"),
+            ErrorCode::UnassignedCode => f.write_str("unassigned type"),
+            ErrorCode::UnexpectedCode => f.write_str("unexpected code"),
+            ErrorCode::TrailingData => f.write_str("trailing data"),
+            ErrorCode::ArrayTooShort => f.write_str("array too short"),
+            ErrorCode::ArrayTooLong => f.write_str("array too long"),
+            ErrorCode::RecursionLimitExceeded => f.write_str("recursion limit exceeded"),
+            ErrorCode::WrongEnumFormat => f.write_str("wrong enum format"),
+            ErrorCode::WrongStructFormat => f.write_str("wrong struct format"),
+        }
+    }
+}
diff --git a/crates/serde_cbor/src/lib.rs b/crates/serde_cbor/src/lib.rs
new file mode 100644
index 0000000..5566854
--- /dev/null
+++ b/crates/serde_cbor/src/lib.rs
@@ -0,0 +1,369 @@
+//! CBOR and serialization.
+//!
+//! # Usage
+//!
+//! Serde CBOR supports Rust 1.40 and up. Add this to your `Cargo.toml`:
+//! ```toml
+//! [dependencies]
+//! serde_cbor = "0.10"
+//! ```
+//!
+//! Storing and loading Rust types is easy and requires only
+//! minimal modifications to the program code.
+//!
+//! ```rust
+//! use serde_derive::{Deserialize, Serialize};
+//! use std::error::Error;
+//! use std::fs::File;
+//!
+//! // Types annotated with `Serialize` can be stored as CBOR.
+//! // To be able to load them again add `Deserialize`.
+//! #[derive(Debug, Serialize, Deserialize)]
+//! struct Mascot {
+//!     name: String,
+//!     species: String,
+//!     year_of_birth: u32,
+//! }
+//!
+//! fn main() -> Result<(), Box<dyn Error>> {
+//!     let ferris = Mascot {
+//!         name: "Ferris".to_owned(),
+//!         species: "crab".to_owned(),
+//!         year_of_birth: 2015,
+//!     };
+//!
+//!     let ferris_file = File::create("examples/ferris.cbor")?;
+//!     // Write Ferris to the given file.
+//!     // Instead of a file you can use any type that implements `io::Write`
+//!     // like a HTTP body, database connection etc.
+//!     serde_cbor::to_writer(ferris_file, &ferris)?;
+//!
+//!     let tux_file = File::open("examples/tux.cbor")?;
+//!     // Load Tux from a file.
+//!     // Serde CBOR performs roundtrip serialization meaning that
+//!     // the data will not change in any way.
+//!     let tux: Mascot = serde_cbor::from_reader(tux_file)?;
+//!
+//!     println!("{:?}", tux);
+//!     // prints: Mascot { name: "Tux", species: "penguin", year_of_birth: 1996 }
+//!
+//!     Ok(())
+//! }
+//! ```
+//!
+//! There are a lot of options available to customize the format.
+//! To operate on untyped CBOR values have a look at the `Value` type.
+//!
+//! # Type-based Serialization and Deserialization
+//! Serde provides a mechanism for low boilerplate serialization & deserialization of values to and
+//! from CBOR via the serialization API. To be able to serialize a piece of data, it must implement
+//! the `serde::Serialize` trait. To be able to deserialize a piece of data, it must implement the
+//! `serde::Deserialize` trait. Serde provides an annotation to automatically generate the
+//! code for these traits: `#[derive(Serialize, Deserialize)]`.
+//!
+//! The CBOR API also provides an enum `serde_cbor::Value`.
+//!
+//! # Packed Encoding
+//! When serializing structs or enums in CBOR the keys or enum variant names will be serialized
+//! as string keys to a map. Especially in embedded environments this can increase the file
+//! size too much. In packed encoding all struct keys, as well as any enum variant that has no data,
+//! will be serialized as variable sized integers. The first 24 entries in any struct consume only a
+//! single byte!  Packed encoding uses serde's preferred [externally tagged enum
+//! format](https://serde.rs/enum-representations.html) and therefore serializes enum variant names
+//! as string keys when that variant contains data.  So, in the packed encoding example, `FirstVariant`
+//! encodes to a single byte, but encoding `SecondVariant` requires 16 bytes.
+//!
+//! To serialize a document in this format use `Serializer::new(writer).packed_format()` or
+//! the shorthand `ser::to_vec_packed`. The deserialization works without any changes.
+//!
+//! If you would like to omit the enum variant encoding for all variants, including ones that
+//! contain data, you can add `legacy_enums()` in addition to `packed_format()`, as can seen
+//! in the Serialize using minimal encoding example.
+//!
+//! # Self describing documents
+//! In some contexts different formats are used but there is no way to declare the format used
+//! out of band. For this reason CBOR has a magic number that may be added before any document.
+//! Self describing documents are created with `serializer.self_describe()`.
+//!
+//! # Examples
+//! Read a CBOR value that is known to be a map of string keys to string values and print it.
+//!
+//! ```rust
+//! use std::collections::BTreeMap;
+//! use serde_cbor::from_slice;
+//!
+//! let slice = b"\xa5aaaAabaBacaCadaDaeaE";
+//! let value: BTreeMap<String, String> = from_slice(slice).unwrap();
+//! println!("{:?}", value); // {"e": "E", "d": "D", "a": "A", "c": "C", "b": "B"}
+//! ```
+//!
+//! Read a general CBOR value with an unknown content.
+//!
+//! ```rust
+//! use serde_cbor::from_slice;
+//! use serde_cbor::value::Value;
+//!
+//! let slice = b"\x82\x01\xa1aaab";
+//! let value: Value = from_slice(slice).unwrap();
+//! println!("{:?}", value); // Array([U64(1), Object({String("a"): String("b")})])
+//! ```
+//!
+//! Serialize an object.
+//!
+//! ```rust
+//! use std::collections::BTreeMap;
+//! use serde_cbor::to_vec;
+//!
+//! let mut programming_languages = BTreeMap::new();
+//! programming_languages.insert("rust", vec!["safe", "concurrent", "fast"]);
+//! programming_languages.insert("python", vec!["powerful", "friendly", "open"]);
+//! programming_languages.insert("js", vec!["lightweight", "interpreted", "object-oriented"]);
+//! let encoded = to_vec(&programming_languages);
+//! assert_eq!(encoded.unwrap().len(), 103);
+//! ```
+//!
+//! Deserializing data in the middle of a slice
+//! ```
+//! # extern crate serde_cbor;
+//! use serde_cbor::Deserializer;
+//!
+//! # fn main() {
+//! let data: Vec<u8> = vec![
+//!     0x66, 0x66, 0x6f, 0x6f, 0x62, 0x61, 0x72, 0x66, 0x66, 0x6f, 0x6f, 0x62,
+//!     0x61, 0x72,
+//! ];
+//! let mut deserializer = Deserializer::from_slice(&data);
+//! let value: &str = serde::de::Deserialize::deserialize(&mut deserializer)
+//!     .unwrap();
+//! let rest = &data[deserializer.byte_offset()..];
+//! assert_eq!(value, "foobar");
+//! assert_eq!(rest, &[0x66, 0x66, 0x6f, 0x6f, 0x62, 0x61, 0x72]);
+//! # }
+//! ```
+//!
+//! Serialize using packed encoding
+//!
+//! ```rust
+//! use serde_derive::{Deserialize, Serialize};
+//! use serde_cbor::ser::to_vec_packed;
+//! use WithTwoVariants::*;
+//!
+//! #[derive(Debug, Serialize, Deserialize)]
+//! enum WithTwoVariants {
+//!     FirstVariant,
+//!     SecondVariant(u8),
+//! }
+//!
+//! let cbor = to_vec_packed(&FirstVariant).unwrap();
+//! assert_eq!(cbor.len(), 1);
+//!
+//! let cbor = to_vec_packed(&SecondVariant(0)).unwrap();
+//! assert_eq!(cbor.len(), 16); // Includes 13 bytes of "SecondVariant"
+//! ```
+//!
+//! Serialize using minimal encoding
+//!
+//! ```rust
+//! use serde_derive::{Deserialize, Serialize};
+//! use serde_cbor::{Result, Serializer, ser::{self, IoWrite}};
+//! use WithTwoVariants::*;
+//!
+//! fn to_vec_minimal<T>(value: &T) -> Result<Vec<u8>>
+//! where
+//!     T: serde::Serialize,
+//! {
+//!     let mut vec = Vec::new();
+//!     value.serialize(&mut Serializer::new(&mut IoWrite::new(&mut vec)).packed_format().legacy_enums())?;
+//!     Ok(vec)
+//! }
+//!
+//! #[derive(Debug, Serialize, Deserialize)]
+//! enum WithTwoVariants {
+//!     FirstVariant,
+//!     SecondVariant(u8),
+//! }
+//!
+//! let cbor = to_vec_minimal(&FirstVariant).unwrap();
+//! assert_eq!(cbor.len(), 1);
+//!
+//! let cbor = to_vec_minimal(&SecondVariant(0)).unwrap();
+//! assert_eq!(cbor.len(), 3);
+//! ```
+//!
+//! # `no-std` support
+//!
+//! Serde CBOR supports building in a `no_std` context, use the following lines
+//! in your `Cargo.toml` dependencies:
+//! ``` toml
+//! [dependencies]
+//! serde = { version = "1.0", default-features = false }
+//! serde_cbor = { version = "0.10", default-features = false }
+//! ```
+//!
+//! Without the `std` feature the functions [from_reader], [from_slice], [to_vec], and [to_writer]
+//! are not exported. To export [from_slice] and [to_vec] enable the `alloc` feature. The `alloc`
+//! feature uses the [`alloc` library][alloc-lib] and requires at least version 1.36.0 of Rust.
+//!
+//! [alloc-lib]: https://doc.rust-lang.org/alloc/
+//!
+//! *Note*: to use derive macros in serde you will need to declare `serde`
+//! dependency like so:
+//! ``` toml
+//! serde = { version = "1.0", default-features = false, features = ["derive"] }
+//! ```
+//!
+//! Serialize an object with `no_std` and without `alloc`.
+//! ``` rust
+//! # #[macro_use] extern crate serde_derive;
+//! # fn main() -> Result<(), serde_cbor::Error> {
+//! use serde::Serialize;
+//! use serde_cbor::Serializer;
+//! use serde_cbor::ser::SliceWrite;
+//!
+//! #[derive(Serialize)]
+//! struct User {
+//!     user_id: u32,
+//!     password_hash: [u8; 4],
+//! }
+//!
+//! let mut buf = [0u8; 100];
+//! let writer = SliceWrite::new(&mut buf[..]);
+//! let mut ser = Serializer::new(writer);
+//! let user = User {
+//!     user_id: 42,
+//!     password_hash: [1, 2, 3, 4],
+//! };
+//! user.serialize(&mut ser)?;
+//! let writer = ser.into_inner();
+//! let size = writer.bytes_written();
+//! let expected = [
+//!     0xa2, 0x67, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x2a, 0x6d,
+//!     0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x5f, 0x68, 0x61, 0x73,
+//!     0x68, 0x84, 0x1, 0x2, 0x3, 0x4
+//! ];
+//! assert_eq!(&buf[..size], expected);
+//! # Ok(())
+//! # }
+//! ```
+//!
+//! Deserialize an object.
+//! ``` rust
+//! # #[macro_use] extern crate serde_derive;
+//! # fn main() -> Result<(), serde_cbor::Error> {
+//! #[derive(Debug, PartialEq, Deserialize)]
+//! struct User {
+//!     user_id: u32,
+//!     password_hash: [u8; 4],
+//! }
+//!
+//! let value = [
+//!     0xa2, 0x67, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x2a, 0x6d,
+//!     0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x5f, 0x68, 0x61, 0x73,
+//!     0x68, 0x84, 0x1, 0x2, 0x3, 0x4
+//! ];
+//!
+//! // from_slice_with_scratch will not alter input data, use it whenever you
+//! // borrow from somewhere else.
+//! // You will have to size your scratch according to the input data you
+//! // expect.
+//! use serde_cbor::de::from_slice_with_scratch;
+//! let mut scratch = [0u8; 32];
+//! let user: User = from_slice_with_scratch(&value[..], &mut scratch)?;
+//! assert_eq!(user, User {
+//!     user_id: 42,
+//!     password_hash: [1, 2, 3, 4],
+//! });
+//!
+//! let mut value = [
+//!     0xa2, 0x67, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x2a, 0x6d,
+//!     0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x5f, 0x68, 0x61, 0x73,
+//!     0x68, 0x84, 0x1, 0x2, 0x3, 0x4
+//! ];
+//!
+//! // from_mut_slice will move data around the input slice, you may only use it
+//! // on data you may own or can modify.
+//! use serde_cbor::de::from_mut_slice;
+//! let user: User = from_mut_slice(&mut value[..])?;
+//! assert_eq!(user, User {
+//!     user_id: 42,
+//!     password_hash: [1, 2, 3, 4],
+//! });
+//! # Ok(())
+//! # }
+//! ```
+//!
+//! # Limitations
+//!
+//! While Serde CBOR strives to support all features of Serde and CBOR
+//! there are a few limitations.
+//!
+//! * [Tags] are ignored during deserialization and can't be emitted during
+//!     serialization. This is because Serde has no concept of tagged
+//!     values. See:&nbsp;[#3]
+//! * Unknown [simple values] cause an `UnassignedCode` error.
+//!     The simple values *False* and *True* are recognized and parsed as bool.
+//!     *Null* and *Undefined* are both deserialized as *unit*.
+//!     The *unit* type is serialized as *Null*. See:&nbsp;[#86]
+//! * [128-bit integers] can't be directly encoded in CBOR. If you need them
+//!     store them as a byte string. See:&nbsp;[#77]
+//!
+//! [Tags]: https://tools.ietf.org/html/rfc7049#section-2.4.4
+//! [#3]: https://github.com/pyfisch/cbor/issues/3
+//! [simple values]: https://tools.ietf.org/html/rfc7049#section-3.5
+//! [#86]: https://github.com/pyfisch/cbor/issues/86
+//! [128-bit integers]: https://doc.rust-lang.org/std/primitive.u128.html
+//! [#77]: https://github.com/pyfisch/cbor/issues/77
+
+#![deny(missing_docs)]
+#![cfg_attr(not(feature = "std"), no_std)]
+
+// When we are running tests in no_std mode we need to explicitly link std, because `cargo test`
+// will not work without it.
+#[cfg(all(not(feature = "std"), test))]
+extern crate std;
+
+#[cfg(feature = "alloc")]
+extern crate alloc;
+
+pub mod de;
+pub mod error;
+mod read;
+pub mod ser;
+pub mod tags;
+mod write;
+
+#[cfg(feature = "std")]
+pub mod value;
+
+// Re-export the [items recommended by serde](https://serde.rs/conventions.html).
+#[doc(inline)]
+pub use crate::de::{Deserializer, StreamDeserializer};
+
+#[doc(inline)]
+pub use crate::error::{Error, Result};
+
+#[doc(inline)]
+pub use crate::ser::Serializer;
+
+// Convenience functions for serialization and deserialization.
+// These functions are only available in `std` mode.
+#[cfg(feature = "std")]
+#[doc(inline)]
+pub use crate::de::from_reader;
+
+#[cfg(any(feature = "std", feature = "alloc"))]
+#[doc(inline)]
+pub use crate::de::from_slice;
+
+#[cfg(any(feature = "std", feature = "alloc"))]
+#[doc(inline)]
+pub use crate::ser::to_vec;
+
+#[cfg(feature = "std")]
+#[doc(inline)]
+pub use crate::ser::to_writer;
+
+// Re-export the value type like serde_json
+#[cfg(feature = "std")]
+#[doc(inline)]
+pub use crate::value::Value;
diff --git a/crates/serde_cbor/src/read.rs b/crates/serde_cbor/src/read.rs
new file mode 100644
index 0000000..1b53018
--- /dev/null
+++ b/crates/serde_cbor/src/read.rs
@@ -0,0 +1,637 @@
+#[cfg(feature = "alloc")]
+use alloc::{vec, vec::Vec};
+#[cfg(feature = "std")]
+use core::cmp;
+use core::mem;
+
+#[cfg(feature = "std")]
+use std::io::{self, Read as StdRead};
+
+use crate::error::{Error, ErrorCode, Result};
+
+#[cfg(not(feature = "unsealed_read_write"))]
+/// Trait used by the deserializer for iterating over input.
+///
+/// This trait is sealed by default, enabling the `unsealed_read_write` feature removes this bound
+/// to allow objects outside of this crate to implement this trait.
+pub trait Read<'de>: private::Sealed {
+    #[doc(hidden)]
+    /// Read n bytes from the input.
+    ///
+    /// Implementations that can are asked to return a slice with a Long lifetime that outlives the
+    /// decoder, but others (eg. ones that need to allocate the data into a temporary buffer) can
+    /// return it with a Short lifetime that just lives for the time of read's mutable borrow of
+    /// the reader.
+    ///
+    /// This may, as a side effect, clear the reader's scratch buffer (as the provided
+    /// implementation does).
+
+    // A more appropriate lifetime setup for this (that would allow the Deserializer::convert_str
+    // to stay a function) would be something like `fn read<'a, 'r: 'a>(&'a mut 'r immut self, ...) -> ...
+    // EitherLifetime<'r, 'de>>`, which borrows self mutably for the duration of the function and
+    // downgrates that reference to an immutable one that outlives the result (protecting the
+    // scratch buffer from changes), but alas, that can't be expressed (yet?).
+    fn read<'a>(&'a mut self, n: usize) -> Result<EitherLifetime<'a, 'de>> {
+        self.clear_buffer();
+        self.read_to_buffer(n)?;
+
+        Ok(self.take_buffer())
+    }
+
+    #[doc(hidden)]
+    fn next(&mut self) -> Result<Option<u8>>;
+
+    #[doc(hidden)]
+    fn peek(&mut self) -> Result<Option<u8>>;
+
+    #[doc(hidden)]
+    fn clear_buffer(&mut self);
+
+    #[doc(hidden)]
+    fn read_to_buffer(&mut self, n: usize) -> Result<()>;
+
+    #[doc(hidden)]
+    fn take_buffer<'a>(&'a mut self) -> EitherLifetime<'a, 'de>;
+
+    #[doc(hidden)]
+    fn read_into(&mut self, buf: &mut [u8]) -> Result<()>;
+
+    #[doc(hidden)]
+    fn discard(&mut self);
+
+    #[doc(hidden)]
+    fn offset(&self) -> u64;
+}
+
+#[cfg(feature = "unsealed_read_write")]
+/// Trait used by the deserializer for iterating over input.
+pub trait Read<'de> {
+    /// Read n bytes from the input.
+    ///
+    /// Implementations that can are asked to return a slice with a Long lifetime that outlives the
+    /// decoder, but others (eg. ones that need to allocate the data into a temporary buffer) can
+    /// return it with a Short lifetime that just lives for the time of read's mutable borrow of
+    /// the reader.
+    ///
+    /// This may, as a side effect, clear the reader's scratch buffer (as the provided
+    /// implementation does).
+
+    // A more appropriate lifetime setup for this (that would allow the Deserializer::convert_str
+    // to stay a function) would be something like `fn read<'a, 'r: 'a>(&'a mut 'r immut self, ...) -> ...
+    // EitherLifetime<'r, 'de>>`, which borrows self mutably for the duration of the function and
+    // downgrates that reference to an immutable one that outlives the result (protecting the
+    // scratch buffer from changes), but alas, that can't be expressed (yet?).
+    fn read<'a>(&'a mut self, n: usize) -> Result<EitherLifetime<'a, 'de>> {
+        self.clear_buffer();
+        self.read_to_buffer(n)?;
+
+        Ok(self.take_buffer())
+    }
+
+    /// Read the next byte from the input, if any.
+    fn next(&mut self) -> Result<Option<u8>>;
+
+    /// Peek at the next byte of the input, if any. This does not advance the reader, so the result
+    /// of this function will remain the same until a read or clear occurs.
+    fn peek(&mut self) -> Result<Option<u8>>;
+
+    /// Clear the underlying scratch buffer
+    fn clear_buffer(&mut self);
+
+    /// Append n bytes from the reader to the reader's scratch buffer (without clearing it)
+    fn read_to_buffer(&mut self, n: usize) -> Result<()>;
+
+    /// Read out everything accumulated in the reader's scratch buffer. This may, as a side effect,
+    /// clear it.
+    fn take_buffer<'a>(&'a mut self) -> EitherLifetime<'a, 'de>;
+
+    /// Read from the input until `buf` is full or end of input is encountered.
+    fn read_into(&mut self, buf: &mut [u8]) -> Result<()>;
+
+    /// Discard any data read by `peek`.
+    fn discard(&mut self);
+
+    /// Returns the offset from the start of the reader.
+    fn offset(&self) -> u64;
+}
+
+/// Represents a reader that can return its current position
+pub trait Offset {
+    fn byte_offset(&self) -> usize;
+}
+
+/// Represents a buffer with one of two lifetimes.
+pub enum EitherLifetime<'short, 'long> {
+    /// The short lifetime
+    Short(&'short [u8]),
+    /// The long lifetime
+    Long(&'long [u8]),
+}
+
+#[cfg(not(feature = "unsealed_read_write"))]
+mod private {
+    pub trait Sealed {}
+}
+
+/// CBOR input source that reads from a std::io input stream.
+#[cfg(feature = "std")]
+#[derive(Debug)]
+pub struct IoRead<R>
+where
+    R: io::Read,
+{
+    reader: OffsetReader<R>,
+    scratch: Vec<u8>,
+    ch: Option<u8>,
+}
+
+#[cfg(feature = "std")]
+impl<R> IoRead<R>
+where
+    R: io::Read,
+{
+    /// Creates a new CBOR input source to read from a std::io input stream.
+    pub fn new(reader: R) -> IoRead<R> {
+        IoRead {
+            reader: OffsetReader { reader, offset: 0 },
+            scratch: vec![],
+            ch: None,
+        }
+    }
+
+    #[inline]
+    fn next_inner(&mut self) -> Result<Option<u8>> {
+        let mut buf = [0; 1];
+        loop {
+            match self.reader.read(&mut buf) {
+                Ok(0) => return Ok(None),
+                Ok(_) => return Ok(Some(buf[0])),
+                Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {}
+                Err(e) => return Err(Error::io(e)),
+            }
+        }
+    }
+}
+
+#[cfg(all(feature = "std", not(feature = "unsealed_read_write")))]
+impl<R> private::Sealed for IoRead<R> where R: io::Read {}
+
+#[cfg(feature = "std")]
+impl<'de, R> Read<'de> for IoRead<R>
+where
+    R: io::Read,
+{
+    #[inline]
+    fn next(&mut self) -> Result<Option<u8>> {
+        match self.ch.take() {
+            Some(ch) => Ok(Some(ch)),
+            None => self.next_inner(),
+        }
+    }
+
+    #[inline]
+    fn peek(&mut self) -> Result<Option<u8>> {
+        match self.ch {
+            Some(ch) => Ok(Some(ch)),
+            None => {
+                self.ch = self.next_inner()?;
+                Ok(self.ch)
+            }
+        }
+    }
+
+    fn read_to_buffer(&mut self, mut n: usize) -> Result<()> {
+        // defend against malicious input pretending to be huge strings by limiting growth
+        self.scratch.reserve(cmp::min(n, 16 * 1024));
+
+        if n == 0 {
+            return Ok(());
+        }
+
+        if let Some(ch) = self.ch.take() {
+            self.scratch.push(ch);
+            n -= 1;
+        }
+
+        // n == 0 is OK here and needs no further special treatment
+
+        let transfer_result = {
+            // Prepare for take() (which consumes its reader) by creating a reference adaptor
+            // that'll only live in this block
+            let reference = self.reader.by_ref();
+            // Append the first n bytes of the reader to the scratch vector (or up to
+            // an error or EOF indicated by a shorter read)
+            let mut taken = reference.take(n as u64);
+            taken.read_to_end(&mut self.scratch)
+        };
+
+        match transfer_result {
+            Ok(r) if r == n => Ok(()),
+            Ok(_) => Err(Error::syntax(
+                ErrorCode::EofWhileParsingValue,
+                self.offset(),
+            )),
+            Err(e) => Err(Error::io(e)),
+        }
+    }
+
+    fn clear_buffer(&mut self) {
+        self.scratch.clear();
+    }
+
+    fn take_buffer<'a>(&'a mut self) -> EitherLifetime<'a, 'de> {
+        EitherLifetime::Short(&self.scratch)
+    }
+
+    fn read_into(&mut self, buf: &mut [u8]) -> Result<()> {
+        self.reader.read_exact(buf).map_err(|e| {
+            if e.kind() == io::ErrorKind::UnexpectedEof {
+                Error::syntax(ErrorCode::EofWhileParsingValue, self.offset())
+            } else {
+                Error::io(e)
+            }
+        })
+    }
+
+    #[inline]
+    fn discard(&mut self) {
+        self.ch = None;
+    }
+
+    fn offset(&self) -> u64 {
+        self.reader.offset
+    }
+}
+
+#[cfg(feature = "std")]
+impl<R> Offset for IoRead<R>
+where
+    R: std::io::Read,
+{
+    fn byte_offset(&self) -> usize {
+        self.offset() as usize
+    }
+}
+
+#[cfg(feature = "std")]
+#[derive(Debug)]
+struct OffsetReader<R> {
+    reader: R,
+    offset: u64,
+}
+
+#[cfg(feature = "std")]
+impl<R> io::Read for OffsetReader<R>
+where
+    R: io::Read,
+{
+    #[inline]
+    fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+        let r = self.reader.read(buf);
+        if let Ok(count) = r {
+            self.offset += count as u64;
+        }
+        r
+    }
+}
+
+/// A CBOR input source that reads from a slice of bytes.
+#[cfg(any(feature = "std", feature = "alloc"))]
+#[derive(Debug)]
+pub struct SliceRead<'a> {
+    slice: &'a [u8],
+    scratch: Vec<u8>,
+    index: usize,
+}
+
+#[cfg(any(feature = "std", feature = "alloc"))]
+impl<'a> SliceRead<'a> {
+    /// Creates a CBOR input source to read from a slice of bytes.
+    pub fn new(slice: &'a [u8]) -> SliceRead<'a> {
+        SliceRead {
+            slice,
+            scratch: vec![],
+            index: 0,
+        }
+    }
+
+    fn end(&self, n: usize) -> Result<usize> {
+        match self.index.checked_add(n) {
+            Some(end) if end <= self.slice.len() => Ok(end),
+            _ => Err(Error::syntax(
+                ErrorCode::EofWhileParsingValue,
+                self.slice.len() as u64,
+            )),
+        }
+    }
+}
+
+#[cfg(any(feature = "std", feature = "alloc"))]
+impl<'a> Offset for SliceRead<'a> {
+    #[inline]
+    fn byte_offset(&self) -> usize {
+        self.index
+    }
+}
+
+#[cfg(all(
+    any(feature = "std", feature = "alloc"),
+    not(feature = "unsealed_read_write")
+))]
+impl<'a> private::Sealed for SliceRead<'a> {}
+
+#[cfg(any(feature = "std", feature = "alloc"))]
+impl<'a> Read<'a> for SliceRead<'a> {
+    #[inline]
+    fn next(&mut self) -> Result<Option<u8>> {
+        Ok(if self.index < self.slice.len() {
+            let ch = self.slice[self.index];
+            self.index += 1;
+            Some(ch)
+        } else {
+            None
+        })
+    }
+
+    #[inline]
+    fn peek(&mut self) -> Result<Option<u8>> {
+        Ok(if self.index < self.slice.len() {
+            Some(self.slice[self.index])
+        } else {
+            None
+        })
+    }
+
+    fn clear_buffer(&mut self) {
+        self.scratch.clear();
+    }
+
+    fn read_to_buffer(&mut self, n: usize) -> Result<()> {
+        let end = self.end(n)?;
+        let slice = &self.slice[self.index..end];
+        self.scratch.extend_from_slice(slice);
+        self.index = end;
+
+        Ok(())
+    }
+
+    #[inline]
+    fn read<'b>(&'b mut self, n: usize) -> Result<EitherLifetime<'b, 'a>> {
+        let end = self.end(n)?;
+        let slice = &self.slice[self.index..end];
+        self.index = end;
+        Ok(EitherLifetime::Long(slice))
+    }
+
+    fn take_buffer<'b>(&'b mut self) -> EitherLifetime<'b, 'a> {
+        EitherLifetime::Short(&self.scratch)
+    }
+
+    #[inline]
+    fn read_into(&mut self, buf: &mut [u8]) -> Result<()> {
+        let end = self.end(buf.len())?;
+        buf.copy_from_slice(&self.slice[self.index..end]);
+        self.index = end;
+        Ok(())
+    }
+
+    #[inline]
+    fn discard(&mut self) {
+        self.index += 1;
+    }
+
+    fn offset(&self) -> u64 {
+        self.index as u64
+    }
+}
+
+/// A CBOR input source that reads from a slice of bytes using a fixed size scratch buffer.
+///
+/// [`SliceRead`](struct.SliceRead.html) and [`MutSliceRead`](struct.MutSliceRead.html) are usually
+/// preferred over this, as they can handle indefinite length items.
+#[derive(Debug)]
+pub struct SliceReadFixed<'a, 'b> {
+    slice: &'a [u8],
+    scratch: &'b mut [u8],
+    index: usize,
+    scratch_index: usize,
+}
+
+impl<'a, 'b> SliceReadFixed<'a, 'b> {
+    /// Creates a CBOR input source to read from a slice of bytes, backed by a scratch buffer.
+    pub fn new(slice: &'a [u8], scratch: &'b mut [u8]) -> SliceReadFixed<'a, 'b> {
+        SliceReadFixed {
+            slice,
+            scratch,
+            index: 0,
+            scratch_index: 0,
+        }
+    }
+
+    fn end(&self, n: usize) -> Result<usize> {
+        match self.index.checked_add(n) {
+            Some(end) if end <= self.slice.len() => Ok(end),
+            _ => Err(Error::syntax(
+                ErrorCode::EofWhileParsingValue,
+                self.slice.len() as u64,
+            )),
+        }
+    }
+
+    fn scratch_end(&self, n: usize) -> Result<usize> {
+        match self.scratch_index.checked_add(n) {
+            Some(end) if end <= self.scratch.len() => Ok(end),
+            _ => Err(Error::scratch_too_small(self.index as u64)),
+        }
+    }
+}
+
+#[cfg(not(feature = "unsealed_read_write"))]
+impl<'a, 'b> private::Sealed for SliceReadFixed<'a, 'b> {}
+
+impl<'a, 'b> Read<'a> for SliceReadFixed<'a, 'b> {
+    #[inline]
+    fn next(&mut self) -> Result<Option<u8>> {
+        Ok(if self.index < self.slice.len() {
+            let ch = self.slice[self.index];
+            self.index += 1;
+            Some(ch)
+        } else {
+            None
+        })
+    }
+
+    #[inline]
+    fn peek(&mut self) -> Result<Option<u8>> {
+        Ok(if self.index < self.slice.len() {
+            Some(self.slice[self.index])
+        } else {
+            None
+        })
+    }
+
+    fn clear_buffer(&mut self) {
+        self.scratch_index = 0;
+    }
+
+    fn read_to_buffer(&mut self, n: usize) -> Result<()> {
+        let end = self.end(n)?;
+        let scratch_end = self.scratch_end(n)?;
+        let slice = &self.slice[self.index..end];
+        self.scratch[self.scratch_index..scratch_end].copy_from_slice(&slice);
+        self.index = end;
+        self.scratch_index = scratch_end;
+
+        Ok(())
+    }
+
+    fn read<'c>(&'c mut self, n: usize) -> Result<EitherLifetime<'c, 'a>> {
+        let end = self.end(n)?;
+        let slice = &self.slice[self.index..end];
+        self.index = end;
+        Ok(EitherLifetime::Long(slice))
+    }
+
+    fn take_buffer<'c>(&'c mut self) -> EitherLifetime<'c, 'a> {
+        EitherLifetime::Short(&self.scratch[0..self.scratch_index])
+    }
+
+    #[inline]
+    fn read_into(&mut self, buf: &mut [u8]) -> Result<()> {
+        let end = self.end(buf.len())?;
+        buf.copy_from_slice(&self.slice[self.index..end]);
+        self.index = end;
+        Ok(())
+    }
+
+    #[inline]
+    fn discard(&mut self) {
+        self.index += 1;
+    }
+
+    fn offset(&self) -> u64 {
+        self.index as u64
+    }
+}
+
+#[cfg(any(feature = "std", feature = "alloc"))]
+impl<'a, 'b> Offset for SliceReadFixed<'a, 'b> {
+    #[inline]
+    fn byte_offset(&self) -> usize {
+        self.index
+    }
+}
+
+/// A CBOR input source that reads from a slice of bytes, and can move data around internally to
+/// reassemble indefinite strings without the need of an allocated scratch buffer.
+#[derive(Debug)]
+pub struct MutSliceRead<'a> {
+    /// A complete view of the reader's data. It is promised that bytes before buffer_end are not
+    /// mutated any more.
+    slice: &'a mut [u8],
+    /// Read cursor position in slice
+    index: usize,
+    /// Number of bytes already discarded from the slice
+    before: usize,
+    /// End of the buffer area that contains all bytes read_into_buffer. This is always <= index.
+    buffer_end: usize,
+}
+
+impl<'a> MutSliceRead<'a> {
+    /// Creates a CBOR input source to read from a slice of bytes.
+    pub fn new(slice: &'a mut [u8]) -> MutSliceRead<'a> {
+        MutSliceRead {
+            slice,
+            index: 0,
+            before: 0,
+            buffer_end: 0,
+        }
+    }
+
+    fn end(&self, n: usize) -> Result<usize> {
+        match self.index.checked_add(n) {
+            Some(end) if end <= self.slice.len() => Ok(end),
+            _ => Err(Error::syntax(
+                ErrorCode::EofWhileParsingValue,
+                self.slice.len() as u64,
+            )),
+        }
+    }
+}
+
+#[cfg(not(feature = "unsealed_read_write"))]
+impl<'a> private::Sealed for MutSliceRead<'a> {}
+
+impl<'a> Read<'a> for MutSliceRead<'a> {
+    #[inline]
+    fn next(&mut self) -> Result<Option<u8>> {
+        // This is duplicated from SliceRead, can that be eased?
+        Ok(if self.index < self.slice.len() {
+            let ch = self.slice[self.index];
+            self.index += 1;
+            Some(ch)
+        } else {
+            None
+        })
+    }
+
+    #[inline]
+    fn peek(&mut self) -> Result<Option<u8>> {
+        // This is duplicated from SliceRead, can that be eased?
+        Ok(if self.index < self.slice.len() {
+            Some(self.slice[self.index])
+        } else {
+            None
+        })
+    }
+
+    fn clear_buffer(&mut self) {
+        self.slice = &mut mem::replace(&mut self.slice, &mut [])[self.index..];
+        self.before += self.index;
+        self.index = 0;
+        self.buffer_end = 0;
+    }
+
+    fn read_to_buffer(&mut self, n: usize) -> Result<()> {
+        let end = self.end(n)?;
+        debug_assert!(
+            self.buffer_end <= self.index,
+            "MutSliceRead invariant violated: scratch buffer exceeds index"
+        );
+        self.slice[self.buffer_end..end].rotate_left(self.index - self.buffer_end);
+        self.buffer_end += n;
+        self.index = end;
+
+        Ok(())
+    }
+
+    fn take_buffer<'b>(&'b mut self) -> EitherLifetime<'b, 'a> {
+        let (left, right) = mem::replace(&mut self.slice, &mut []).split_at_mut(self.index);
+        self.slice = right;
+        self.before += self.index;
+        self.index = 0;
+
+        let left = &left[..self.buffer_end];
+        self.buffer_end = 0;
+
+        EitherLifetime::Long(left)
+    }
+
+    #[inline]
+    fn read_into(&mut self, buf: &mut [u8]) -> Result<()> {
+        // This is duplicated from SliceRead, can that be eased?
+        let end = self.end(buf.len())?;
+        buf.copy_from_slice(&self.slice[self.index..end]);
+        self.index = end;
+        Ok(())
+    }
+
+    #[inline]
+    fn discard(&mut self) {
+        self.index += 1;
+    }
+
+    fn offset(&self) -> u64 {
+        (self.before + self.index) as u64
+    }
+}
diff --git a/crates/serde_cbor/src/ser.rs b/crates/serde_cbor/src/ser.rs
new file mode 100644
index 0000000..7016dc3
--- /dev/null
+++ b/crates/serde_cbor/src/ser.rs
@@ -0,0 +1,743 @@
+//! Serialize a Rust data structure to CBOR data.
+
+#[cfg(feature = "alloc")]
+use alloc::vec::Vec;
+
+#[cfg(feature = "std")]
+pub use crate::write::IoWrite;
+pub use crate::write::{SliceWrite, Write};
+
+use crate::error::{Error, Result};
+use half::f16;
+use serde::ser::{self, Serialize};
+#[cfg(feature = "std")]
+use std::io;
+
+use crate::tags::{get_tag, CBOR_NEWTYPE_NAME};
+
+/// Serializes a value to a vector.
+#[cfg(any(feature = "std", feature = "alloc"))]
+pub fn to_vec<T>(value: &T) -> Result<Vec<u8>>
+where
+    T: ser::Serialize,
+{
+    let mut vec = Vec::new();
+    value.serialize(&mut Serializer::new(&mut vec))?;
+    Ok(vec)
+}
+
+/// Serializes a value to a vector in packed format.
+#[cfg(feature = "std")]
+pub fn to_vec_packed<T>(value: &T) -> Result<Vec<u8>>
+where
+    T: ser::Serialize,
+{
+    let mut vec = Vec::new();
+    value.serialize(&mut Serializer::new(&mut IoWrite::new(&mut vec)).packed_format())?;
+    Ok(vec)
+}
+
+/// Serializes a value to a writer.
+#[cfg(feature = "std")]
+pub fn to_writer<W, T>(writer: W, value: &T) -> Result<()>
+where
+    W: io::Write,
+    T: ser::Serialize,
+{
+    value.serialize(&mut Serializer::new(&mut IoWrite::new(writer)))
+}
+
+/// A structure for serializing Rust values to CBOR.
+#[derive(Debug)]
+pub struct Serializer<W> {
+    writer: W,
+    packed: bool,
+    enum_as_map: bool,
+}
+
+impl<W> Serializer<W>
+where
+    W: Write,
+{
+    /// Creates a new CBOR serializer.
+    ///
+    /// `to_vec` and `to_writer` should normally be used instead of this method.
+    #[inline]
+    pub fn new(writer: W) -> Self {
+        Serializer {
+            writer,
+            packed: false,
+            enum_as_map: true,
+        }
+    }
+
+    /// Choose concise/packed format for serializer.
+    ///
+    /// In the packed format enum variant names and field names
+    /// are replaced with numeric indizes to conserve space.
+    pub fn packed_format(mut self) -> Self {
+        self.packed = true;
+        self
+    }
+
+    /// Enable old enum format used by `serde_cbor` versions <= v0.9.
+    ///
+    /// The `legacy_enums` option determines how enums are encoded.
+    ///
+    /// This makes no difference when encoding and decoding enums using
+    /// this crate, but it shows up when decoding to a `Value` or decoding
+    /// in other languages.
+    ///
+    /// # Examples
+    ///
+    /// Given the following enum
+    ///
+    /// ```rust
+    /// enum Enum {
+    ///     Unit,
+    ///     NewType(i32),
+    ///     Tuple(String, bool),
+    ///     Struct{ x: i32, y: i32 },
+    /// }
+    /// ```
+    /// we will give the `Value` with the same encoding for each case using
+    /// JSON notation.
+    ///
+    /// ## Default encodings
+    ///
+    /// * `Enum::Unit` encodes as `"Unit"`
+    /// * `Enum::NewType(10)` encodes as `{"NewType": 10}`
+    /// * `Enum::Tuple("x", true)` encodes as `{"Tuple": ["x", true]}`
+    ///
+    /// ## Legacy encodings
+    ///
+    /// * `Enum::Unit` encodes as `"Unit"`
+    /// * `Enum::NewType(10)` encodes as `["NewType", 10]`
+    /// * `Enum::Tuple("x", true)` encodes as `["Tuple", "x", true]`
+    /// * `Enum::Struct{ x: 5, y: -5 }` encodes as `["Struct", {"x": 5, "y": -5}]`
+    pub fn legacy_enums(mut self) -> Self {
+        self.enum_as_map = false;
+        self
+    }
+
+    /// Writes a CBOR self-describe tag to the stream.
+    ///
+    /// Tagging allows a decoder to distinguish different file formats based on their content
+    /// without further information.
+    #[inline]
+    pub fn self_describe(&mut self) -> Result<()> {
+        let mut buf = [6 << 5 | 25, 0, 0];
+        (&mut buf[1..]).copy_from_slice(&55799u16.to_be_bytes());
+        self.writer.write_all(&buf).map_err(|e| e.into())
+    }
+
+    /// Unwrap the `Writer` from the `Serializer`.
+    #[inline]
+    pub fn into_inner(self) -> W {
+        self.writer
+    }
+
+    #[inline]
+    fn write_u8(&mut self, major: u8, value: u8) -> Result<()> {
+        if value <= 0x17 {
+            self.writer.write_all(&[major << 5 | value])
+        } else {
+            let buf = [major << 5 | 24, value];
+            self.writer.write_all(&buf)
+        }
+        .map_err(|e| e.into())
+    }
+
+    #[inline]
+    fn write_u16(&mut self, major: u8, value: u16) -> Result<()> {
+        if value <= u16::from(u8::max_value()) {
+            self.write_u8(major, value as u8)
+        } else {
+            let mut buf = [major << 5 | 25, 0, 0];
+            (&mut buf[1..]).copy_from_slice(&value.to_be_bytes());
+            self.writer.write_all(&buf).map_err(|e| e.into())
+        }
+    }
+
+    #[inline]
+    fn write_u32(&mut self, major: u8, value: u32) -> Result<()> {
+        if value <= u32::from(u16::max_value()) {
+            self.write_u16(major, value as u16)
+        } else {
+            let mut buf = [major << 5 | 26, 0, 0, 0, 0];
+            (&mut buf[1..]).copy_from_slice(&value.to_be_bytes());
+            self.writer.write_all(&buf).map_err(|e| e.into())
+        }
+    }
+
+    #[inline]
+    fn write_u64(&mut self, major: u8, value: u64) -> Result<()> {
+        if value <= u64::from(u32::max_value()) {
+            self.write_u32(major, value as u32)
+        } else {
+            let mut buf = [major << 5 | 27, 0, 0, 0, 0, 0, 0, 0, 0];
+            (&mut buf[1..]).copy_from_slice(&value.to_be_bytes());
+            self.writer.write_all(&buf).map_err(|e| e.into())
+        }
+    }
+
+    #[inline]
+    fn serialize_collection<'a>(
+        &'a mut self,
+        major: u8,
+        len: Option<usize>,
+    ) -> Result<CollectionSerializer<'a, W>> {
+        let needs_eof = match len {
+            Some(len) => {
+                self.write_u64(major, len as u64)?;
+                false
+            }
+            None => {
+                self.writer
+                    .write_all(&[major << 5 | 31])
+                    .map_err(|e| e.into())?;
+                true
+            }
+        };
+
+        Ok(CollectionSerializer {
+            ser: self,
+            needs_eof,
+        })
+    }
+}
+
+impl<'a, W> ser::Serializer for &'a mut Serializer<W>
+where
+    W: Write,
+{
+    type Ok = ();
+    type Error = Error;
+
+    type SerializeSeq = CollectionSerializer<'a, W>;
+    type SerializeTuple = &'a mut Serializer<W>;
+    type SerializeTupleStruct = &'a mut Serializer<W>;
+    type SerializeTupleVariant = &'a mut Serializer<W>;
+    type SerializeMap = CollectionSerializer<'a, W>;
+    type SerializeStruct = StructSerializer<'a, W>;
+    type SerializeStructVariant = StructSerializer<'a, W>;
+
+    #[inline]
+    fn serialize_bool(self, value: bool) -> Result<()> {
+        let value = if value { 0xf5 } else { 0xf4 };
+        self.writer.write_all(&[value]).map_err(|e| e.into())
+    }
+
+    #[inline]
+    fn serialize_i8(self, value: i8) -> Result<()> {
+        if value < 0 {
+            self.write_u8(1, -(value + 1) as u8)
+        } else {
+            self.write_u8(0, value as u8)
+        }
+    }
+
+    #[inline]
+    fn serialize_i16(self, value: i16) -> Result<()> {
+        if value < 0 {
+            self.write_u16(1, -(value + 1) as u16)
+        } else {
+            self.write_u16(0, value as u16)
+        }
+    }
+
+    #[inline]
+    fn serialize_i32(self, value: i32) -> Result<()> {
+        if value < 0 {
+            self.write_u32(1, -(value + 1) as u32)
+        } else {
+            self.write_u32(0, value as u32)
+        }
+    }
+
+    #[inline]
+    fn serialize_i64(self, value: i64) -> Result<()> {
+        if value < 0 {
+            self.write_u64(1, -(value + 1) as u64)
+        } else {
+            self.write_u64(0, value as u64)
+        }
+    }
+
+    #[inline]
+    fn serialize_i128(self, value: i128) -> Result<()> {
+        if value < 0 {
+            if -(value + 1) > i128::from(u64::max_value()) {
+                return Err(Error::message("The number can't be stored in CBOR"));
+            }
+            self.write_u64(1, -(value + 1) as u64)
+        } else {
+            if value > i128::from(u64::max_value()) {
+                return Err(Error::message("The number can't be stored in CBOR"));
+            }
+            self.write_u64(0, value as u64)
+        }
+    }
+
+    #[inline]
+    fn serialize_u8(self, value: u8) -> Result<()> {
+        self.write_u8(0, value)
+    }
+
+    #[inline]
+    fn serialize_u16(self, value: u16) -> Result<()> {
+        self.write_u16(0, value)
+    }
+
+    #[inline]
+    fn serialize_u32(self, value: u32) -> Result<()> {
+        self.write_u32(0, value)
+    }
+
+    #[inline]
+    fn serialize_u64(self, value: u64) -> Result<()> {
+        self.write_u64(0, value)
+    }
+
+    #[inline]
+    fn serialize_u128(self, value: u128) -> Result<()> {
+        if value > u128::from(u64::max_value()) {
+            return Err(Error::message("The number can't be stored in CBOR"));
+        }
+        self.write_u64(0, value as u64)
+    }
+
+    #[inline]
+    #[allow(clippy::float_cmp)]
+    fn serialize_f32(self, value: f32) -> Result<()> {
+        if value.is_infinite() {
+            if value.is_sign_positive() {
+                self.writer.write_all(&[0xf9, 0x7c, 0x00])
+            } else {
+                self.writer.write_all(&[0xf9, 0xfc, 0x00])
+            }
+        } else if value.is_nan() {
+            self.writer.write_all(&[0xf9, 0x7e, 0x00])
+        } else if f32::from(f16::from_f32(value)) == value {
+            let mut buf = [0xf9, 0, 0];
+            (&mut buf[1..]).copy_from_slice(&f16::from_f32(value).to_bits().to_be_bytes());
+            self.writer.write_all(&buf)
+        } else {
+            let mut buf = [0xfa, 0, 0, 0, 0];
+            (&mut buf[1..]).copy_from_slice(&value.to_bits().to_be_bytes());
+            self.writer.write_all(&buf)
+        }
+        .map_err(|e| e.into())
+    }
+
+    #[inline]
+    #[allow(clippy::float_cmp)]
+    fn serialize_f64(self, value: f64) -> Result<()> {
+        if !value.is_finite() || f64::from(value as f32) == value {
+            self.serialize_f32(value as f32)
+        } else {
+            let mut buf = [0xfb, 0, 0, 0, 0, 0, 0, 0, 0];
+            (&mut buf[1..]).copy_from_slice(&value.to_bits().to_be_bytes());
+            self.writer.write_all(&buf).map_err(|e| e.into())
+        }
+    }
+
+    #[inline]
+    fn serialize_char(self, value: char) -> Result<()> {
+        // A char encoded as UTF-8 takes 4 bytes at most.
+        let mut buf = [0; 4];
+        self.serialize_str(value.encode_utf8(&mut buf))
+    }
+
+    #[inline]
+    fn serialize_str(self, value: &str) -> Result<()> {
+        self.write_u64(3, value.len() as u64)?;
+        self.writer
+            .write_all(value.as_bytes())
+            .map_err(|e| e.into())
+    }
+
+    #[inline]
+    fn serialize_bytes(self, value: &[u8]) -> Result<()> {
+        self.write_u64(2, value.len() as u64)?;
+        self.writer.write_all(value).map_err(|e| e.into())
+    }
+
+    #[inline]
+    fn serialize_unit(self) -> Result<()> {
+        self.serialize_none()
+    }
+
+    #[inline]
+    fn serialize_some<T>(self, value: &T) -> Result<()>
+    where
+        T: ?Sized + ser::Serialize,
+    {
+        value.serialize(self)
+    }
+
+    #[inline]
+    fn serialize_none(self) -> Result<()> {
+        self.writer.write_all(&[0xf6]).map_err(|e| e.into())
+    }
+
+    #[inline]
+    fn serialize_unit_struct(self, _name: &'static str) -> Result<()> {
+        self.serialize_unit()
+    }
+
+    #[inline]
+    fn serialize_unit_variant(
+        self,
+        _name: &'static str,
+        variant_index: u32,
+        variant: &'static str,
+    ) -> Result<()> {
+        if self.packed {
+            self.serialize_u32(variant_index)
+        } else {
+            self.serialize_str(variant)
+        }
+    }
+
+    #[inline]
+    fn serialize_newtype_struct<T>(self, name: &'static str, value: &T) -> Result<()>
+    where
+        T: ?Sized + ser::Serialize,
+    {
+        if name == CBOR_NEWTYPE_NAME {
+            for tag in get_tag().into_iter() {
+                self.write_u64(6, tag)?;
+            }
+        }
+        value.serialize(self)
+    }
+
+    #[inline]
+    fn serialize_newtype_variant<T>(
+        self,
+        name: &'static str,
+        variant_index: u32,
+        variant: &'static str,
+        value: &T,
+    ) -> Result<()>
+    where
+        T: ?Sized + ser::Serialize,
+    {
+        if self.enum_as_map {
+            self.write_u64(5, 1u64)?;
+            variant.serialize(&mut *self)?;
+        } else {
+            self.writer.write_all(&[4 << 5 | 2]).map_err(|e| e.into())?;
+            self.serialize_unit_variant(name, variant_index, variant)?;
+        }
+        value.serialize(self)
+    }
+
+    #[inline]
+    fn serialize_seq(self, len: Option<usize>) -> Result<CollectionSerializer<'a, W>> {
+        self.serialize_collection(4, len)
+    }
+
+    #[inline]
+    fn serialize_tuple(self, len: usize) -> Result<&'a mut Serializer<W>> {
+        self.write_u64(4, len as u64)?;
+        Ok(self)
+    }
+
+    #[inline]
+    fn serialize_tuple_struct(
+        self,
+        _name: &'static str,
+        len: usize,
+    ) -> Result<&'a mut Serializer<W>> {
+        self.serialize_tuple(len)
+    }
+
+    #[inline]
+    fn serialize_tuple_variant(
+        self,
+        name: &'static str,
+        variant_index: u32,
+        variant: &'static str,
+        len: usize,
+    ) -> Result<&'a mut Serializer<W>> {
+        if self.enum_as_map {
+            self.write_u64(5, 1u64)?;
+            variant.serialize(&mut *self)?;
+            self.serialize_tuple(len)
+        } else {
+            self.write_u64(4, (len + 1) as u64)?;
+            self.serialize_unit_variant(name, variant_index, variant)?;
+            Ok(self)
+        }
+    }
+
+    #[inline]
+    fn serialize_map(self, len: Option<usize>) -> Result<CollectionSerializer<'a, W>> {
+        self.serialize_collection(5, len)
+    }
+
+    #[cfg(not(feature = "std"))]
+    fn collect_str<T: ?Sized>(self, value: &T) -> Result<()>
+    where
+        T: core::fmt::Display,
+    {
+        use crate::write::FmtWrite;
+        use core::fmt::Write;
+
+        let mut w = FmtWrite::new(&mut self.writer);
+        write!(w, "{}", value)?;
+        Ok(())
+    }
+
+    #[inline]
+    fn serialize_struct(self, _name: &'static str, len: usize) -> Result<StructSerializer<'a, W>> {
+        self.write_u64(5, len as u64)?;
+        Ok(StructSerializer { ser: self, idx: 0 })
+    }
+
+    #[inline]
+    fn serialize_struct_variant(
+        self,
+        name: &'static str,
+        variant_index: u32,
+        variant: &'static str,
+        len: usize,
+    ) -> Result<StructSerializer<'a, W>> {
+        if self.enum_as_map {
+            self.write_u64(5, 1u64)?;
+        } else {
+            self.writer.write_all(&[4 << 5 | 2]).map_err(|e| e.into())?;
+        }
+        self.serialize_unit_variant(name, variant_index, variant)?;
+        self.serialize_struct(name, len)
+    }
+
+    #[inline]
+    fn is_human_readable(&self) -> bool {
+        false
+    }
+}
+
+impl<'a, W> ser::SerializeTuple for &'a mut Serializer<W>
+where
+    W: Write,
+{
+    type Ok = ();
+    type Error = Error;
+
+    #[inline]
+    fn serialize_element<T>(&mut self, value: &T) -> Result<()>
+    where
+        T: ?Sized + ser::Serialize,
+    {
+        value.serialize(&mut **self)
+    }
+
+    #[inline]
+    fn end(self) -> Result<()> {
+        Ok(())
+    }
+}
+
+impl<'a, W> ser::SerializeTupleStruct for &'a mut Serializer<W>
+where
+    W: Write,
+{
+    type Ok = ();
+    type Error = Error;
+
+    #[inline]
+    fn serialize_field<T>(&mut self, value: &T) -> Result<()>
+    where
+        T: ?Sized + ser::Serialize,
+    {
+        value.serialize(&mut **self)
+    }
+
+    #[inline]
+    fn end(self) -> Result<()> {
+        Ok(())
+    }
+}
+
+impl<'a, W> ser::SerializeTupleVariant for &'a mut Serializer<W>
+where
+    W: Write,
+{
+    type Ok = ();
+    type Error = Error;
+
+    #[inline]
+    fn serialize_field<T>(&mut self, value: &T) -> Result<()>
+    where
+        T: ?Sized + ser::Serialize,
+    {
+        value.serialize(&mut **self)
+    }
+
+    #[inline]
+    fn end(self) -> Result<()> {
+        Ok(())
+    }
+}
+
+#[doc(hidden)]
+pub struct StructSerializer<'a, W> {
+    ser: &'a mut Serializer<W>,
+    idx: u32,
+}
+
+impl<'a, W> StructSerializer<'a, W>
+where
+    W: Write,
+{
+    #[inline]
+    fn serialize_field_inner<T>(&mut self, key: &'static str, value: &T) -> Result<()>
+    where
+        T: ?Sized + ser::Serialize,
+    {
+        if self.ser.packed {
+            self.idx.serialize(&mut *self.ser)?;
+        } else {
+            key.serialize(&mut *self.ser)?;
+        }
+        value.serialize(&mut *self.ser)?;
+        self.idx += 1;
+        Ok(())
+    }
+
+    #[inline]
+    fn skip_field_inner(&mut self, _: &'static str) -> Result<()> {
+        self.idx += 1;
+        Ok(())
+    }
+
+    #[inline]
+    fn end_inner(self) -> Result<()> {
+        Ok(())
+    }
+}
+
+impl<'a, W> ser::SerializeStruct for StructSerializer<'a, W>
+where
+    W: Write,
+{
+    type Ok = ();
+    type Error = Error;
+
+    #[inline]
+    fn serialize_field<T>(&mut self, key: &'static str, value: &T) -> Result<()>
+    where
+        T: ?Sized + ser::Serialize,
+    {
+        self.serialize_field_inner(key, value)
+    }
+
+    #[inline]
+    fn skip_field(&mut self, key: &'static str) -> Result<()> {
+        self.skip_field_inner(key)
+    }
+
+    #[inline]
+    fn end(self) -> Result<()> {
+        self.end_inner()
+    }
+}
+
+impl<'a, W> ser::SerializeStructVariant for StructSerializer<'a, W>
+where
+    W: Write,
+{
+    type Ok = ();
+    type Error = Error;
+
+    #[inline]
+    fn serialize_field<T>(&mut self, key: &'static str, value: &T) -> Result<()>
+    where
+        T: ?Sized + ser::Serialize,
+    {
+        self.serialize_field_inner(key, value)
+    }
+
+    #[inline]
+    fn skip_field(&mut self, key: &'static str) -> Result<()> {
+        self.skip_field_inner(key)
+    }
+
+    #[inline]
+    fn end(self) -> Result<()> {
+        self.end_inner()
+    }
+}
+
+#[doc(hidden)]
+pub struct CollectionSerializer<'a, W> {
+    ser: &'a mut Serializer<W>,
+    needs_eof: bool,
+}
+
+impl<'a, W> CollectionSerializer<'a, W>
+where
+    W: Write,
+{
+    #[inline]
+    fn end_inner(self) -> Result<()> {
+        if self.needs_eof {
+            self.ser.writer.write_all(&[0xff]).map_err(|e| e.into())
+        } else {
+            Ok(())
+        }
+    }
+}
+
+impl<'a, W> ser::SerializeSeq for CollectionSerializer<'a, W>
+where
+    W: Write,
+{
+    type Ok = ();
+    type Error = Error;
+
+    #[inline]
+    fn serialize_element<T>(&mut self, value: &T) -> Result<()>
+    where
+        T: ?Sized + ser::Serialize,
+    {
+        value.serialize(&mut *self.ser)
+    }
+
+    #[inline]
+    fn end(self) -> Result<()> {
+        self.end_inner()
+    }
+}
+
+impl<'a, W> ser::SerializeMap for CollectionSerializer<'a, W>
+where
+    W: Write,
+{
+    type Ok = ();
+    type Error = Error;
+
+    #[inline]
+    fn serialize_key<T>(&mut self, key: &T) -> Result<()>
+    where
+        T: ?Sized + ser::Serialize,
+    {
+        key.serialize(&mut *self.ser)
+    }
+
+    #[inline]
+    fn serialize_value<T>(&mut self, value: &T) -> Result<()>
+    where
+        T: ?Sized + ser::Serialize,
+    {
+        value.serialize(&mut *self.ser)
+    }
+
+    #[inline]
+    fn end(self) -> Result<()> {
+        self.end_inner()
+    }
+}
diff --git a/crates/serde_cbor/src/tags.rs b/crates/serde_cbor/src/tags.rs
new file mode 100644
index 0000000..8adccb8
--- /dev/null
+++ b/crates/serde_cbor/src/tags.rs
@@ -0,0 +1,220 @@
+//! Support for cbor tags
+use core::fmt;
+use core::marker::PhantomData;
+use serde::de::{
+    Deserialize, Deserializer, EnumAccess, IntoDeserializer, MapAccess, SeqAccess, Visitor,
+};
+use serde::forward_to_deserialize_any;
+use serde::ser::{Serialize, Serializer};
+
+/// signals that a newtype is from a CBOR tag
+pub(crate) const CBOR_NEWTYPE_NAME: &str = "\0cbor_tag";
+
+/// A value that is optionally tagged with a cbor tag
+///
+/// this only serves as an intermediate helper for tag serialization or deserialization
+pub struct Tagged<T> {
+    /// cbor tag
+    pub tag: Option<u64>,
+    /// value
+    pub value: T,
+}
+
+impl<T> Tagged<T> {
+    /// Create a new tagged value
+    pub fn new(tag: Option<u64>, value: T) -> Self {
+        Self { tag, value }
+    }
+}
+
+impl<T: Serialize> Serialize for Tagged<T> {
+    fn serialize<S: Serializer>(&self, s: S) -> Result<S::Ok, S::Error> {
+        set_tag(self.tag);
+        let r = s.serialize_newtype_struct(CBOR_NEWTYPE_NAME, &self.value);
+        set_tag(None);
+        r
+    }
+}
+
+fn untagged<T>(value: T) -> Tagged<T> {
+    Tagged::new(None, value)
+}
+
+macro_rules! delegate {
+    ($name: ident, $type: ty) => {
+        fn $name<E: serde::de::Error>(self, v: $type) -> Result<Self::Value, E> {
+            T::deserialize(v.into_deserializer()).map(untagged)
+        }
+    };
+}
+
+struct EnumDeserializer<A>(A);
+
+impl<'de, A> Deserializer<'de> for EnumDeserializer<A>
+where
+    A: EnumAccess<'de>,
+{
+    type Error = A::Error;
+
+    fn deserialize_any<V: Visitor<'de>>(self, visitor: V) -> Result<V::Value, Self::Error> {
+        visitor.visit_enum(self.0)
+    }
+
+    forward_to_deserialize_any! {
+        bool i8 i16 i32 i64 i128 u8 u16 u32 u64 u128 f32 f64 char str string
+        bytes byte_buf option unit unit_struct newtype_struct seq tuple
+        tuple_struct map struct enum identifier ignored_any
+    }
+}
+
+struct NoneDeserializer<E>(PhantomData<E>);
+
+impl<'de, E> Deserializer<'de> for NoneDeserializer<E>
+where
+    E: serde::de::Error,
+{
+    type Error = E;
+
+    fn deserialize_any<V: Visitor<'de>>(self, visitor: V) -> Result<V::Value, Self::Error> {
+        visitor.visit_none()
+    }
+
+    forward_to_deserialize_any! {
+        bool i8 i16 i32 i64 i128 u8 u16 u32 u64 u128 f32 f64 char str string
+        bytes byte_buf option unit unit_struct newtype_struct seq tuple
+        tuple_struct map struct enum identifier ignored_any
+    }
+}
+
+struct BytesDeserializer<'a, E>(&'a [u8], PhantomData<E>);
+
+impl<'de, 'a, E> Deserializer<'de> for BytesDeserializer<'a, E>
+where
+    E: serde::de::Error,
+{
+    type Error = E;
+
+    fn deserialize_any<V: Visitor<'de>>(self, visitor: V) -> Result<V::Value, Self::Error> {
+        visitor.visit_bytes(self.0)
+    }
+
+    forward_to_deserialize_any! {
+        bool i8 i16 i32 i64 i128 u8 u16 u32 u64 u128 f32 f64 char str string
+        bytes byte_buf option unit unit_struct newtype_struct seq tuple
+        tuple_struct map struct enum identifier ignored_any
+    }
+}
+
+/// A visitor that intercepts *just* visit_newtype_struct and passes through everything else.
+struct MaybeTaggedVisitor<T>(PhantomData<T>);
+
+impl<'de, T: Deserialize<'de>> Visitor<'de> for MaybeTaggedVisitor<T> {
+    type Value = Tagged<T>;
+
+    fn expecting(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+        fmt.write_str("a cbor tag newtype")
+    }
+
+    delegate!(visit_bool, bool);
+
+    delegate!(visit_i8, i8);
+    delegate!(visit_i16, i16);
+    delegate!(visit_i32, i32);
+    delegate!(visit_i64, i64);
+
+    delegate!(visit_u8, u8);
+    delegate!(visit_u16, u16);
+    delegate!(visit_u32, u32);
+    delegate!(visit_u64, u64);
+
+    delegate!(visit_f32, f32);
+    delegate!(visit_f64, f64);
+
+    delegate!(visit_char, char);
+    delegate!(visit_str, &str);
+    delegate!(visit_borrowed_str, &'de str);
+
+    #[cfg(feature = "std")]
+    delegate!(visit_byte_buf, Vec<u8>);
+
+    #[cfg(feature = "std")]
+    delegate!(visit_string, String);
+
+    fn visit_bytes<E: serde::de::Error>(self, value: &[u8]) -> Result<Self::Value, E> {
+        T::deserialize(BytesDeserializer(value, PhantomData)).map(untagged)
+    }
+
+    fn visit_borrowed_bytes<E: serde::de::Error>(self, value: &'de [u8]) -> Result<Self::Value, E> {
+        T::deserialize(serde::de::value::BorrowedBytesDeserializer::new(value)).map(untagged)
+    }
+
+    fn visit_unit<E: serde::de::Error>(self) -> Result<Self::Value, E> {
+        T::deserialize(().into_deserializer()).map(untagged)
+    }
+
+    fn visit_none<E: serde::de::Error>(self) -> Result<Self::Value, E> {
+        T::deserialize(NoneDeserializer(PhantomData)).map(untagged)
+    }
+
+    fn visit_some<D: Deserializer<'de>>(self, deserializer: D) -> Result<Self::Value, D::Error> {
+        T::deserialize(deserializer).map(untagged)
+    }
+
+    fn visit_seq<A: SeqAccess<'de>>(self, seq: A) -> Result<Self::Value, A::Error> {
+        T::deserialize(serde::de::value::SeqAccessDeserializer::new(seq)).map(untagged)
+    }
+
+    fn visit_map<V: MapAccess<'de>>(self, map: V) -> Result<Self::Value, V::Error> {
+        T::deserialize(serde::de::value::MapAccessDeserializer::new(map)).map(untagged)
+    }
+
+    fn visit_enum<A: EnumAccess<'de>>(self, data: A) -> Result<Self::Value, A::Error> {
+        T::deserialize(EnumDeserializer(data)).map(untagged)
+    }
+
+    fn visit_newtype_struct<D: serde::Deserializer<'de>>(
+        self,
+        deserializer: D,
+    ) -> Result<Self::Value, D::Error> {
+        let t = get_tag();
+        T::deserialize(deserializer).map(|v| Tagged::new(t, v))
+    }
+}
+
+impl<'de, T: serde::de::Deserialize<'de>> serde::de::Deserialize<'de> for Tagged<T> {
+    fn deserialize<D: serde::de::Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
+        deserializer.deserialize_any(MaybeTaggedVisitor::<T>(PhantomData))
+    }
+}
+
+/// function to get the current cbor tag
+///
+/// The only place where it makes sense to call this function is within visit_newtype_struct of a serde visitor.
+/// This is a low level API. In most cases it is preferable to use Tagged
+pub fn current_cbor_tag() -> Option<u64> {
+    get_tag()
+}
+
+#[cfg(feature = "tags")]
+pub(crate) fn set_tag(value: Option<u64>) {
+    CBOR_TAG.with(|f| *f.borrow_mut() = value);
+}
+
+#[cfg(feature = "tags")]
+pub(crate) fn get_tag() -> Option<u64> {
+    CBOR_TAG.with(|f| *f.borrow())
+}
+
+#[cfg(not(feature = "tags"))]
+pub(crate) fn set_tag(_value: Option<u64>) {}
+
+#[cfg(not(feature = "tags"))]
+pub(crate) fn get_tag() -> Option<u64> {
+    None
+}
+
+#[cfg(feature = "tags")]
+use std::cell::RefCell;
+
+#[cfg(feature = "tags")]
+thread_local!(static CBOR_TAG: RefCell<Option<u64>> = RefCell::new(None));
diff --git a/crates/serde_cbor/src/value/de.rs b/crates/serde_cbor/src/value/de.rs
new file mode 100644
index 0000000..f5bdbb7
--- /dev/null
+++ b/crates/serde_cbor/src/value/de.rs
@@ -0,0 +1,166 @@
+use std::collections::BTreeMap;
+use std::fmt;
+
+use crate::value::Value;
+use serde::de;
+
+impl<'de> de::Deserialize<'de> for Value {
+    #[inline]
+    fn deserialize<D>(deserializer: D) -> Result<Value, D::Error>
+    where
+        D: de::Deserializer<'de>,
+    {
+        struct ValueVisitor;
+
+        impl<'de> de::Visitor<'de> for ValueVisitor {
+            type Value = Value;
+
+            fn expecting(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+                fmt.write_str("any valid CBOR value")
+            }
+
+            #[inline]
+            fn visit_str<E>(self, value: &str) -> Result<Value, E>
+            where
+                E: de::Error,
+            {
+                self.visit_string(String::from(value))
+            }
+
+            #[inline]
+            fn visit_string<E>(self, value: String) -> Result<Value, E>
+            where
+                E: de::Error,
+            {
+                Ok(Value::Text(value))
+            }
+            #[inline]
+            fn visit_bytes<E>(self, v: &[u8]) -> Result<Self::Value, E>
+            where
+                E: de::Error,
+            {
+                self.visit_byte_buf(v.to_owned())
+            }
+
+            #[inline]
+            fn visit_byte_buf<E>(self, v: Vec<u8>) -> Result<Self::Value, E>
+            where
+                E: de::Error,
+            {
+                Ok(Value::Bytes(v))
+            }
+
+            #[inline]
+            fn visit_u64<E>(self, v: u64) -> Result<Self::Value, E>
+            where
+                E: de::Error,
+            {
+                Ok(Value::Integer(v.into()))
+            }
+
+            #[inline]
+            fn visit_i64<E>(self, v: i64) -> Result<Self::Value, E>
+            where
+                E: de::Error,
+            {
+                Ok(Value::Integer(v.into()))
+            }
+
+            #[inline]
+            fn visit_i128<E>(self, v: i128) -> Result<Self::Value, E>
+            where
+                E: de::Error,
+            {
+                Ok(Value::Integer(v))
+            }
+
+            #[inline]
+            fn visit_bool<E>(self, v: bool) -> Result<Self::Value, E>
+            where
+                E: de::Error,
+            {
+                Ok(Value::Bool(v))
+            }
+
+            #[inline]
+            fn visit_none<E>(self) -> Result<Self::Value, E>
+            where
+                E: de::Error,
+            {
+                self.visit_unit()
+            }
+
+            #[inline]
+            fn visit_unit<E>(self) -> Result<Self::Value, E>
+            where
+                E: de::Error,
+            {
+                Ok(Value::Null)
+            }
+
+            #[inline]
+            fn visit_seq<V>(self, mut visitor: V) -> Result<Self::Value, V::Error>
+            where
+                V: de::SeqAccess<'de>,
+            {
+                let mut vec = Vec::new();
+
+                while let Some(elem) = visitor.next_element()? {
+                    vec.push(elem);
+                }
+
+                Ok(Value::Array(vec))
+            }
+
+            #[inline]
+            fn visit_map<V>(self, mut visitor: V) -> Result<Value, V::Error>
+            where
+                V: de::MapAccess<'de>,
+            {
+                let mut values = BTreeMap::new();
+
+                while let Some((key, value)) = visitor.next_entry()? {
+                    values.insert(key, value);
+                }
+
+                Ok(Value::Map(values))
+            }
+
+            #[inline]
+            fn visit_f64<E>(self, v: f64) -> Result<Self::Value, E>
+            where
+                E: de::Error,
+            {
+                Ok(Value::Float(v))
+            }
+
+            fn visit_newtype_struct<D>(self, deserializer: D) -> Result<Self::Value, D::Error>
+            where
+                D: serde::Deserializer<'de>,
+            {
+                let tag = crate::tags::get_tag();
+                let inner = deserializer.deserialize_any(self);
+                match tag {
+                    Some(tag) => inner.map(|v| Value::Tag(tag, Box::new(v))),
+                    None => inner,
+                }
+            }
+        }
+
+        deserializer.deserialize_any(ValueVisitor)
+    }
+}
+
+/// Convert a `serde_cbor::Value` into a type `T`
+#[allow(clippy::needless_pass_by_value)]
+pub fn from_value<T>(value: Value) -> Result<T, crate::error::Error>
+where
+    T: de::DeserializeOwned,
+{
+    // TODO implement in a way that doesn't require
+    // roundtrip through buffer (i.e. by implementing
+    // `serde::de::Deserializer` for `Value` and then doing
+    // `T::deserialize(value)`).
+    let buf = crate::to_vec(&value)?;
+    crate::from_slice(buf.as_slice())
+}
diff --git a/crates/serde_cbor/src/value/mod.rs b/crates/serde_cbor/src/value/mod.rs
new file mode 100644
index 0000000..7bd2255
--- /dev/null
+++ b/crates/serde_cbor/src/value/mod.rs
@@ -0,0 +1,156 @@
+//! CBOR values, keys and serialization routines.
+
+mod de;
+mod ser;
+
+use std::cmp::{Ord, Ordering, PartialOrd};
+use std::collections::BTreeMap;
+
+#[doc(inline)]
+pub use self::de::from_value;
+#[doc(inline)]
+pub use self::ser::to_value;
+
+/// The `Value` enum, a loosely typed way of representing any valid CBOR value.
+///
+/// Maps are sorted according to the canonical ordering
+/// described in [RFC 7049 bis].
+/// Therefore values are unambiguously serialized
+/// to a canonical form of CBOR from the same RFC.
+///
+/// [RFC 7049 bis]: https://tools.ietf.org/html/draft-ietf-cbor-7049bis-04#section-2
+#[derive(Clone, Debug)]
+pub enum Value {
+    /// Represents the absence of a value or the value undefined.
+    Null,
+    /// Represents a boolean value.
+    Bool(bool),
+    /// Integer CBOR numbers.
+    ///
+    /// The biggest value that can be represented is 2^64 - 1.
+    /// While the smallest value is -2^64.
+    /// Values outside this range can't be serialized
+    /// and will cause an error.
+    Integer(i128),
+    /// Represents a floating point value.
+    Float(f64),
+    /// Represents a byte string.
+    Bytes(Vec<u8>),
+    /// Represents an UTF-8 encoded string.
+    Text(String),
+    /// Represents an array of values.
+    Array(Vec<Value>),
+    /// Represents a map.
+    ///
+    /// Maps are also called tables, dictionaries, hashes, or objects (in JSON).
+    /// While any value can be used as a CBOR key
+    /// it is better to use only one type of key in a map
+    /// to avoid ambiguity.
+    /// If floating point values are used as keys they are compared bit-by-bit for equality.
+    /// If arrays or maps are used as keys the comparisons
+    /// to establish canonical order may be slow and therefore insertion
+    /// and retrieval of values will be slow too.
+    Map(BTreeMap<Value, Value>),
+    /// Represents a tagged value
+    Tag(u64, Box<Value>),
+    // The hidden variant allows the enum to be extended
+    // with variants for tags and simple values.
+    #[doc(hidden)]
+    __Hidden,
+}
+
+impl PartialEq for Value {
+    fn eq(&self, other: &Value) -> bool {
+        self.cmp(other) == Ordering::Equal
+    }
+}
+
+impl Eq for Value {}
+
+impl PartialOrd for Value {
+    fn partial_cmp(&self, other: &Value) -> Option<Ordering> {
+        Some(self.cmp(other))
+    }
+}
+
+impl Ord for Value {
+    fn cmp(&self, other: &Value) -> Ordering {
+        // Determine the canonical order of two values:
+        // 1. Smaller major type sorts first.
+        // 2. Shorter sequence sorts first.
+        // 3. Compare integers by magnitude.
+        // 4. Compare byte and text sequences lexically.
+        // 5. Compare the serializations of both types. (expensive)
+        use self::Value::*;
+        if self.major_type() != other.major_type() {
+            return self.major_type().cmp(&other.major_type());
+        }
+        match (self, other) {
+            (Integer(a), Integer(b)) => a.abs().cmp(&b.abs()),
+            (Bytes(a), Bytes(b)) if a.len() != b.len() => a.len().cmp(&b.len()),
+            (Text(a), Text(b)) if a.len() != b.len() => a.len().cmp(&b.len()),
+            (Array(a), Array(b)) if a.len() != b.len() => a.len().cmp(&b.len()),
+            (Map(a), Map(b)) if a.len() != b.len() => a.len().cmp(&b.len()),
+            (Bytes(a), Bytes(b)) => a.cmp(b),
+            (Text(a), Text(b)) => a.cmp(b),
+            (a, b) => {
+                let a = crate::to_vec(a).expect("self is serializable");
+                let b = crate::to_vec(b).expect("other is serializable");
+                a.cmp(&b)
+            }
+        }
+    }
+}
+
+macro_rules! impl_from {
+    ($variant:path, $for_type:ty) => {
+        impl From<$for_type> for Value {
+            fn from(v: $for_type) -> Value {
+                $variant(v.into())
+            }
+        }
+    };
+}
+
+impl_from!(Value::Bool, bool);
+impl_from!(Value::Integer, i8);
+impl_from!(Value::Integer, i16);
+impl_from!(Value::Integer, i32);
+impl_from!(Value::Integer, i64);
+// i128 omitted because not all numbers fit in CBOR serialization
+impl_from!(Value::Integer, u8);
+impl_from!(Value::Integer, u16);
+impl_from!(Value::Integer, u32);
+impl_from!(Value::Integer, u64);
+// u128 omitted because not all numbers fit in CBOR serialization
+impl_from!(Value::Float, f32);
+impl_from!(Value::Float, f64);
+impl_from!(Value::Bytes, Vec<u8>);
+impl_from!(Value::Text, String);
+// TODO: figure out if these impls should be more generic or removed.
+impl_from!(Value::Array, Vec<Value>);
+impl_from!(Value::Map, BTreeMap<Value, Value>);
+
+impl Value {
+    fn major_type(&self) -> u8 {
+        use self::Value::*;
+        match self {
+            Null => 7,
+            Bool(_) => 7,
+            Integer(v) => {
+                if *v >= 0 {
+                    0
+                } else {
+                    1
+                }
+            }
+            Tag(_, _) => 6,
+            Float(_) => 7,
+            Bytes(_) => 2,
+            Text(_) => 3,
+            Array(_) => 4,
+            Map(_) => 5,
+            __Hidden => unreachable!(),
+        }
+    }
+}
diff --git a/crates/serde_cbor/src/value/ser.rs b/crates/serde_cbor/src/value/ser.rs
new file mode 100644
index 0000000..347aae9
--- /dev/null
+++ b/crates/serde_cbor/src/value/ser.rs
@@ -0,0 +1,443 @@
+// Copyright 2017 Serde Developers
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::collections::BTreeMap;
+
+use crate::error::Error;
+use serde::{self, Serialize};
+
+use crate::tags::Tagged;
+use crate::value::Value;
+
+impl serde::Serialize for Value {
+    #[inline]
+    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+    where
+        S: serde::Serializer,
+    {
+        match *self {
+            Value::Integer(v) => serializer.serialize_i128(v),
+            Value::Bytes(ref v) => serializer.serialize_bytes(&v),
+            Value::Text(ref v) => serializer.serialize_str(&v),
+            Value::Array(ref v) => v.serialize(serializer),
+            Value::Map(ref v) => v.serialize(serializer),
+            Value::Tag(tag, ref v) => Tagged::new(Some(tag), v).serialize(serializer),
+            Value::Float(v) => serializer.serialize_f64(v),
+            Value::Bool(v) => serializer.serialize_bool(v),
+            Value::Null => serializer.serialize_unit(),
+            Value::__Hidden => unreachable!(),
+        }
+    }
+}
+
+struct Serializer;
+
+impl serde::Serializer for Serializer {
+    type Ok = Value;
+    type Error = Error;
+
+    type SerializeSeq = SerializeVec;
+    type SerializeTuple = SerializeVec;
+    type SerializeTupleStruct = SerializeVec;
+    type SerializeTupleVariant = SerializeTupleVariant;
+    type SerializeMap = SerializeMap;
+    type SerializeStruct = SerializeMap;
+    type SerializeStructVariant = SerializeStructVariant;
+
+    #[inline]
+    fn serialize_bool(self, value: bool) -> Result<Value, Error> {
+        Ok(Value::Bool(value))
+    }
+
+    #[inline]
+    fn serialize_i8(self, value: i8) -> Result<Value, Error> {
+        self.serialize_i64(i64::from(value))
+    }
+
+    #[inline]
+    fn serialize_i16(self, value: i16) -> Result<Value, Error> {
+        self.serialize_i64(i64::from(value))
+    }
+
+    #[inline]
+    fn serialize_i32(self, value: i32) -> Result<Value, Error> {
+        self.serialize_i64(i64::from(value))
+    }
+
+    #[inline]
+    fn serialize_i64(self, value: i64) -> Result<Value, Error> {
+        self.serialize_i128(i128::from(value))
+    }
+
+    fn serialize_i128(self, value: i128) -> Result<Value, Error> {
+        Ok(Value::Integer(value))
+    }
+
+    #[inline]
+    fn serialize_u8(self, value: u8) -> Result<Value, Error> {
+        self.serialize_u64(u64::from(value))
+    }
+
+    #[inline]
+    fn serialize_u16(self, value: u16) -> Result<Value, Error> {
+        self.serialize_u64(u64::from(value))
+    }
+
+    #[inline]
+    fn serialize_u32(self, value: u32) -> Result<Value, Error> {
+        self.serialize_u64(u64::from(value))
+    }
+
+    #[inline]
+    fn serialize_u64(self, value: u64) -> Result<Value, Error> {
+        Ok(Value::Integer(value.into()))
+    }
+
+    #[inline]
+    fn serialize_f32(self, value: f32) -> Result<Value, Error> {
+        self.serialize_f64(f64::from(value))
+    }
+
+    #[inline]
+    fn serialize_f64(self, value: f64) -> Result<Value, Error> {
+        Ok(Value::Float(value))
+    }
+
+    #[inline]
+    fn serialize_char(self, value: char) -> Result<Value, Error> {
+        let mut s = String::new();
+        s.push(value);
+        self.serialize_str(&s)
+    }
+
+    #[inline]
+    fn serialize_str(self, value: &str) -> Result<Value, Error> {
+        Ok(Value::Text(value.to_owned()))
+    }
+
+    fn serialize_bytes(self, value: &[u8]) -> Result<Value, Error> {
+        Ok(Value::Bytes(value.to_vec()))
+    }
+
+    #[inline]
+    fn serialize_unit(self) -> Result<Value, Error> {
+        Ok(Value::Null)
+    }
+
+    #[inline]
+    fn serialize_unit_struct(self, _name: &'static str) -> Result<Value, Error> {
+        self.serialize_unit()
+    }
+
+    #[inline]
+    fn serialize_unit_variant(
+        self,
+        _name: &'static str,
+        _variant_index: u32,
+        variant: &'static str,
+    ) -> Result<Value, Error> {
+        self.serialize_str(variant)
+    }
+
+    #[inline]
+    fn serialize_newtype_struct<T: ?Sized>(
+        self,
+        _name: &'static str,
+        value: &T,
+    ) -> Result<Value, Error>
+    where
+        T: Serialize,
+    {
+        value.serialize(self)
+    }
+
+    fn serialize_newtype_variant<T: ?Sized>(
+        self,
+        _name: &'static str,
+        _variant_index: u32,
+        variant: &'static str,
+        value: &T,
+    ) -> Result<Value, Error>
+    where
+        T: Serialize,
+    {
+        let mut values = BTreeMap::new();
+        values.insert(Value::from(variant.to_owned()), to_value(&value)?);
+        Ok(Value::Map(values))
+    }
+
+    #[inline]
+    fn serialize_none(self) -> Result<Value, Error> {
+        self.serialize_unit()
+    }
+
+    #[inline]
+    fn serialize_some<T: ?Sized>(self, value: &T) -> Result<Value, Error>
+    where
+        T: Serialize,
+    {
+        value.serialize(self)
+    }
+
+    fn serialize_seq(self, len: Option<usize>) -> Result<Self::SerializeSeq, Error> {
+        Ok(SerializeVec {
+            vec: Vec::with_capacity(len.unwrap_or(0)),
+        })
+    }
+
+    fn serialize_tuple(self, len: usize) -> Result<Self::SerializeTuple, Error> {
+        self.serialize_seq(Some(len))
+    }
+
+    fn serialize_tuple_struct(
+        self,
+        _name: &'static str,
+        len: usize,
+    ) -> Result<Self::SerializeTupleStruct, Error> {
+        self.serialize_tuple(len)
+    }
+
+    fn serialize_tuple_variant(
+        self,
+        _name: &'static str,
+        _variant_index: u32,
+        variant: &'static str,
+        len: usize,
+    ) -> Result<Self::SerializeTupleVariant, Error> {
+        Ok(SerializeTupleVariant {
+            name: String::from(variant),
+            vec: Vec::with_capacity(len),
+        })
+    }
+
+    fn serialize_map(self, _len: Option<usize>) -> Result<Self::SerializeMap, Error> {
+        Ok(SerializeMap {
+            map: BTreeMap::new(),
+            next_key: None,
+        })
+    }
+
+    fn serialize_struct(
+        self,
+        _name: &'static str,
+        len: usize,
+    ) -> Result<Self::SerializeStruct, Error> {
+        self.serialize_map(Some(len))
+    }
+
+    fn serialize_struct_variant(
+        self,
+        _name: &'static str,
+        _variant_index: u32,
+        variant: &'static str,
+        _len: usize,
+    ) -> Result<Self::SerializeStructVariant, Error> {
+        Ok(SerializeStructVariant {
+            name: String::from(variant),
+            map: BTreeMap::new(),
+        })
+    }
+
+    #[inline]
+    fn is_human_readable(&self) -> bool {
+        false
+    }
+}
+
+pub struct SerializeVec {
+    vec: Vec<Value>,
+}
+
+pub struct SerializeTupleVariant {
+    name: String,
+    vec: Vec<Value>,
+}
+
+pub struct SerializeMap {
+    map: BTreeMap<Value, Value>,
+    next_key: Option<Value>,
+}
+
+pub struct SerializeStructVariant {
+    name: String,
+    map: BTreeMap<Value, Value>,
+}
+
+impl serde::ser::SerializeSeq for SerializeVec {
+    type Ok = Value;
+    type Error = Error;
+
+    fn serialize_element<T: ?Sized>(&mut self, value: &T) -> Result<(), Error>
+    where
+        T: Serialize,
+    {
+        self.vec.push(to_value(&value)?);
+        Ok(())
+    }
+
+    fn end(self) -> Result<Value, Error> {
+        Ok(Value::Array(self.vec))
+    }
+}
+
+impl serde::ser::SerializeTuple for SerializeVec {
+    type Ok = Value;
+    type Error = Error;
+
+    fn serialize_element<T: ?Sized>(&mut self, value: &T) -> Result<(), Error>
+    where
+        T: Serialize,
+    {
+        serde::ser::SerializeSeq::serialize_element(self, value)
+    }
+
+    fn end(self) -> Result<Value, Error> {
+        serde::ser::SerializeSeq::end(self)
+    }
+}
+
+impl serde::ser::SerializeTupleStruct for SerializeVec {
+    type Ok = Value;
+    type Error = Error;
+
+    fn serialize_field<T: ?Sized>(&mut self, value: &T) -> Result<(), Error>
+    where
+        T: Serialize,
+    {
+        serde::ser::SerializeSeq::serialize_element(self, value)
+    }
+
+    fn end(self) -> Result<Value, Error> {
+        serde::ser::SerializeSeq::end(self)
+    }
+}
+
+impl serde::ser::SerializeTupleVariant for SerializeTupleVariant {
+    type Ok = Value;
+    type Error = Error;
+
+    fn serialize_field<T: ?Sized>(&mut self, value: &T) -> Result<(), Error>
+    where
+        T: Serialize,
+    {
+        self.vec.push(to_value(&value)?);
+        Ok(())
+    }
+
+    fn end(self) -> Result<Value, Error> {
+        let mut object = BTreeMap::new();
+
+        object.insert(Value::from(self.name), Value::Array(self.vec));
+
+        Ok(Value::Map(object))
+    }
+}
+
+impl serde::ser::SerializeMap for SerializeMap {
+    type Ok = Value;
+    type Error = Error;
+
+    fn serialize_key<T: ?Sized>(&mut self, key: &T) -> Result<(), Error>
+    where
+        T: Serialize,
+    {
+        self.next_key = Some(to_value(&key)?);
+        Ok(())
+    }
+
+    fn serialize_value<T: ?Sized>(&mut self, value: &T) -> Result<(), Error>
+    where
+        T: Serialize,
+    {
+        let key = self.next_key.take();
+        // Panic because this indicates a bug in the program rather than an
+        // expected failure.
+        let key = key.expect("serialize_value called before serialize_key");
+        self.map.insert(key, to_value(&value)?);
+        Ok(())
+    }
+
+    fn end(self) -> Result<Value, Error> {
+        Ok(Value::Map(self.map))
+    }
+}
+
+impl serde::ser::SerializeStruct for SerializeMap {
+    type Ok = Value;
+    type Error = Error;
+
+    fn serialize_field<T: ?Sized>(&mut self, key: &'static str, value: &T) -> Result<(), Error>
+    where
+        T: Serialize,
+    {
+        serde::ser::SerializeMap::serialize_key(self, key)?;
+        serde::ser::SerializeMap::serialize_value(self, value)
+    }
+
+    fn end(self) -> Result<Value, Error> {
+        serde::ser::SerializeMap::end(self)
+    }
+}
+
+impl serde::ser::SerializeStructVariant for SerializeStructVariant {
+    type Ok = Value;
+    type Error = Error;
+
+    fn serialize_field<T: ?Sized>(&mut self, key: &'static str, value: &T) -> Result<(), Error>
+    where
+        T: Serialize,
+    {
+        self.map
+            .insert(Value::from(String::from(key)), to_value(&value)?);
+        Ok(())
+    }
+
+    fn end(self) -> Result<Value, Error> {
+        let mut object = BTreeMap::new();
+
+        object.insert(Value::from(self.name), Value::Map(self.map));
+
+        Ok(Value::Map(object))
+    }
+}
+
+/// Convert a `T` into `serde_cbor::Value` which is an enum that can represent
+/// any valid CBOR data.
+///
+/// ```rust
+/// extern crate serde;
+///
+/// #[macro_use]
+/// extern crate serde_derive;
+/// extern crate serde_cbor;
+///
+/// use std::error::Error;
+///
+/// #[derive(Serialize)]
+/// struct User {
+///     fingerprint: String,
+///     location: String,
+/// }
+///
+/// fn main() {
+///     let u = User {
+///         fingerprint: "0xF9BA143B95FF6D82".to_owned(),
+///         location: "Menlo Park, CA".to_owned(),
+///     };
+///
+///     let v = serde_cbor::value::to_value(u).unwrap();
+/// }
+/// ```
+#[allow(clippy::needless_pass_by_value)]
+// Taking by value is more friendly to iterator adapters, option and result
+pub fn to_value<T>(value: T) -> Result<Value, Error>
+where
+    T: Serialize,
+{
+    value.serialize(Serializer)
+}
diff --git a/crates/serde_cbor/src/write.rs b/crates/serde_cbor/src/write.rs
new file mode 100644
index 0000000..94c326e
--- /dev/null
+++ b/crates/serde_cbor/src/write.rs
@@ -0,0 +1,175 @@
+#[cfg(feature = "alloc")]
+use alloc::vec::Vec;
+#[cfg(not(feature = "std"))]
+use core::fmt;
+#[cfg(feature = "std")]
+use std::io;
+
+use crate::error;
+
+#[cfg(not(feature = "unsealed_read_write"))]
+/// A sink for serialized CBOR.
+///
+/// This trait is similar to the [`Write`]() trait in the standard library,
+/// but has a smaller and more general API.
+///
+/// Any object implementing `std::io::Write`
+/// can be wrapped in an [`IoWrite`](../write/struct.IoWrite.html) that implements
+/// this trait for the underlying object.
+pub trait Write: private::Sealed {
+    /// The type of error returned when a write operation fails.
+    #[doc(hidden)]
+    type Error: Into<error::Error>;
+
+    /// Attempts to write an entire buffer into this write.
+    #[doc(hidden)]
+    fn write_all(&mut self, buf: &[u8]) -> Result<(), Self::Error>;
+}
+
+#[cfg(feature = "unsealed_read_write")]
+/// A sink for serialized CBOR.
+///
+/// This trait is similar to the [`Write`]() trait in the standard library,
+/// but has a smaller and more general API.
+///
+/// Any object implementing `std::io::Write`
+/// can be wrapped in an [`IoWrite`](../write/struct.IoWrite.html) that implements
+/// this trait for the underlying object.
+///
+/// This trait is sealed by default, enabling the `unsealed_read_write` feature removes this bound
+/// to allow objects outside of this crate to implement this trait.
+pub trait Write {
+    /// The type of error returned when a write operation fails.
+    type Error: Into<error::Error>;
+
+    /// Attempts to write an entire buffer into this write.
+    fn write_all(&mut self, buf: &[u8]) -> Result<(), Self::Error>;
+}
+
+#[cfg(not(feature = "unsealed_read_write"))]
+mod private {
+    pub trait Sealed {}
+}
+
+impl<W> Write for &mut W
+where
+    W: Write,
+{
+    type Error = W::Error;
+
+    fn write_all(&mut self, buf: &[u8]) -> Result<(), Self::Error> {
+        (*self).write_all(buf)
+    }
+}
+
+#[cfg(not(feature = "unsealed_read_write"))]
+impl<W> private::Sealed for &mut W where W: Write {}
+
+#[cfg(feature = "std")]
+/// A wrapper for types that implement
+/// [`std::io::Write`](https://doc.rust-lang.org/std/io/trait.Write.html) to implement the local
+/// [`Write`](trait.Write.html) trait.
+#[derive(Debug)]
+pub struct IoWrite<W>(W);
+
+#[cfg(feature = "std")]
+impl<W: io::Write> IoWrite<W> {
+    /// Wraps an `io::Write` writer to make it compatible with [`Write`](trait.Write.html)
+    pub fn new(w: W) -> IoWrite<W> {
+        IoWrite(w)
+    }
+}
+
+#[cfg(feature = "std")]
+impl<W: io::Write> Write for IoWrite<W> {
+    type Error = io::Error;
+
+    fn write_all(&mut self, buf: &[u8]) -> Result<(), Self::Error> {
+        self.0.write_all(buf)
+    }
+}
+
+#[cfg(all(feature = "std", not(feature = "unsealed_read_write")))]
+impl<W> private::Sealed for IoWrite<W> where W: io::Write {}
+
+#[cfg(any(feature = "std", feature = "alloc"))]
+impl Write for Vec<u8> {
+    type Error = error::Error;
+
+    fn write_all(&mut self, buf: &[u8]) -> Result<(), Self::Error> {
+        self.extend_from_slice(buf);
+        Ok(())
+    }
+}
+
+#[cfg(all(
+    any(feature = "std", feature = "alloc"),
+    not(feature = "unsealed_read_write")
+))]
+impl private::Sealed for Vec<u8> {}
+
+#[cfg(not(feature = "std"))]
+#[derive(Debug)]
+pub struct FmtWrite<'a, W: Write>(&'a mut W);
+
+#[cfg(not(feature = "std"))]
+impl<'a, W: Write> FmtWrite<'a, W> {
+    /// Wraps an `fmt::Write` writer to make it compatible with [`Write`](trait.Write.html)
+    pub fn new(w: &'a mut W) -> FmtWrite<'a, W> {
+        FmtWrite(w)
+    }
+}
+
+#[cfg(not(feature = "std"))]
+impl<'a, W: Write> fmt::Write for FmtWrite<'a, W> {
+    fn write_str(&mut self, s: &str) -> fmt::Result {
+        self.0.write_all(s.as_bytes()).map_err(|_| fmt::Error)
+    }
+}
+
+#[cfg(all(not(feature = "std"), not(feature = "unsealed_read_write")))]
+impl<'a, W> private::Sealed for FmtWrite<'a, W> where W: Write {}
+
+/// Implements [`Write`](trait.Write.html) for mutable byte slices (`&mut [u8]`).
+///
+/// Returns an error if the value to serialize is too large to fit in the slice.
+#[derive(Debug)]
+pub struct SliceWrite<'a> {
+    slice: &'a mut [u8],
+    index: usize,
+}
+
+impl<'a> SliceWrite<'a> {
+    /// Wraps a mutable slice so it can be used as a `Write`.
+    pub fn new(slice: &'a mut [u8]) -> SliceWrite<'a> {
+        SliceWrite { slice, index: 0 }
+    }
+
+    /// Returns the number of bytes written to the underlying slice.
+    pub fn bytes_written(&self) -> usize {
+        self.index
+    }
+
+    /// Returns the underlying slice.
+    pub fn into_inner(self) -> &'a mut [u8] {
+        self.slice
+    }
+}
+
+impl<'a> Write for SliceWrite<'a> {
+    type Error = error::Error;
+
+    fn write_all(&mut self, buf: &[u8]) -> Result<(), Self::Error> {
+        if self.slice.len() - self.index < buf.len() {
+            // This buffer will not fit in our slice
+            return Err(error::Error::scratch_too_small(self.index as u64));
+        }
+        let end = self.index + buf.len();
+        self.slice[self.index..end].copy_from_slice(buf);
+        self.index = end;
+        Ok(())
+    }
+}
+
+#[cfg(not(feature = "unsealed_read_write"))]
+impl<'a> private::Sealed for SliceWrite<'a> {}
diff --git a/crates/serde_cbor/tests/bennofs.rs b/crates/serde_cbor/tests/bennofs.rs
new file mode 100644
index 0000000..1b289f4
--- /dev/null
+++ b/crates/serde_cbor/tests/bennofs.rs
@@ -0,0 +1,60 @@
+#[macro_use]
+extern crate serde_derive;
+
+use serde::Serialize;
+use serde_cbor::ser::SliceWrite;
+use serde_cbor::{self, Serializer};
+
+#[derive(Debug, PartialEq, Serialize, Deserialize)]
+struct Example {
+    foo: Foo,
+    payload: u8,
+}
+
+#[derive(Debug, PartialEq, Serialize, Deserialize)]
+struct Foo {
+    x: u8,
+    color: Color,
+}
+
+#[derive(Debug, PartialEq, Serialize, Deserialize)]
+enum Color {
+    Red,
+    Blue,
+    Yellow(u8),
+}
+
+const EXAMPLE: Example = Example {
+    foo: Foo {
+        x: 0xAA,
+        color: Color::Yellow(40),
+    },
+    payload: 0xCC,
+};
+
+#[cfg(feature = "std")]
+mod std_tests {
+    use super::*;
+
+    #[test]
+    fn test() {
+        let serialized = serde_cbor::ser::to_vec_packed(&EXAMPLE).expect("bennofs 1");
+        println!("{:?}", serialized);
+        let deserialized: Example = serde_cbor::from_slice(&serialized).expect("bennofs 2");
+        assert_eq!(EXAMPLE, deserialized);
+    }
+}
+
+#[test]
+fn test() {
+    let mut slice = [0u8; 64];
+    let writer = SliceWrite::new(&mut slice);
+    let mut serializer = Serializer::new(writer).packed_format();
+    EXAMPLE.serialize(&mut serializer).unwrap();
+    let writer = serializer.into_inner();
+    let end = writer.bytes_written();
+    let slice = writer.into_inner();
+    let deserialized: Example =
+        serde_cbor::de::from_slice_with_scratch(&slice[..end], &mut []).unwrap();
+    assert_eq!(EXAMPLE, deserialized);
+}
diff --git a/crates/serde_cbor/tests/canonical.rs b/crates/serde_cbor/tests/canonical.rs
new file mode 100644
index 0000000..438e29e
--- /dev/null
+++ b/crates/serde_cbor/tests/canonical.rs
@@ -0,0 +1,104 @@
+#[cfg(feature = "std")]
+mod std_tests {
+    use serde_cbor::value::Value;
+
+    #[test]
+    fn integer_canonical_sort_order() {
+        let expected = [
+            0,
+            23,
+            24,
+            255,
+            256,
+            65535,
+            65536,
+            4294967295,
+            -1,
+            -24,
+            -25,
+            -256,
+            -257,
+            -65536,
+            -65537,
+            -4294967296,
+        ]
+        .iter()
+        .map(|i| Value::Integer(*i))
+        .collect::<Vec<_>>();
+
+        let mut sorted = expected.clone();
+        sorted.sort();
+
+        assert_eq!(expected, sorted);
+    }
+
+    #[test]
+    fn string_canonical_sort_order() {
+        let expected = ["", "a", "b", "aa"]
+            .iter()
+            .map(|s| Value::Text(s.to_string()))
+            .collect::<Vec<_>>();
+
+        let mut sorted = expected.clone();
+        sorted.sort();
+
+        assert_eq!(expected, sorted);
+    }
+
+    #[test]
+    fn bytes_canonical_sort_order() {
+        let expected = vec![vec![], vec![0u8], vec![1u8], vec![0u8, 0u8]]
+            .into_iter()
+            .map(|v| Value::Bytes(v))
+            .collect::<Vec<_>>();
+
+        let mut sorted = expected.clone();
+        sorted.sort();
+
+        assert_eq!(expected, sorted);
+    }
+
+    #[test]
+    fn simple_data_canonical_sort_order() {
+        let expected = vec![Value::Bool(false), Value::Bool(true), Value::Null];
+
+        let mut sorted = expected.clone();
+        sorted.sort();
+
+        assert_eq!(expected, sorted);
+    }
+
+    #[test]
+    fn major_type_canonical_sort_order() {
+        let expected = vec![
+            Value::Integer(0),
+            Value::Integer(-1),
+            Value::Bytes(vec![]),
+            Value::Text("".to_string()),
+            Value::Null,
+        ];
+
+        let mut sorted = expected.clone();
+        sorted.sort();
+
+        assert_eq!(expected, sorted);
+    }
+
+    #[test]
+    fn test_rfc_example() {
+        // See: https://tools.ietf.org/html/draft-ietf-cbor-7049bis-04#section-4.10
+        let expected = vec![
+            Value::Integer(10),
+            Value::Integer(100),
+            Value::Integer(-1),
+            Value::Text("z".to_owned()),
+            Value::Text("aa".to_owned()),
+            Value::Array(vec![Value::Integer(100)]),
+            Value::Array(vec![Value::Integer(-1)]),
+            Value::Bool(false),
+        ];
+        let mut sorted = expected.clone();
+        sorted.sort();
+        assert_eq!(expected, sorted);
+    }
+}
diff --git a/crates/serde_cbor/tests/crash.cbor b/crates/serde_cbor/tests/crash.cbor
new file mode 100644
index 0000000..a3bc785
--- /dev/null
+++ b/crates/serde_cbor/tests/crash.cbor
Binary files differ
diff --git a/crates/serde_cbor/tests/de.rs b/crates/serde_cbor/tests/de.rs
new file mode 100644
index 0000000..01d7914
--- /dev/null
+++ b/crates/serde_cbor/tests/de.rs
@@ -0,0 +1,747 @@
+#[macro_use]
+extern crate serde_derive;
+
+use serde_cbor;
+use serde_cbor::de;
+
+#[test]
+fn test_str() {
+    let s: &str =
+        de::from_slice_with_scratch(&[0x66, 0x66, 0x6f, 0x6f, 0x62, 0x61, 0x72], &mut []).unwrap();
+    assert_eq!(s, "foobar");
+}
+
+#[test]
+fn test_bytes() {
+    let s: &[u8] =
+        de::from_slice_with_scratch(&[0x46, 0x66, 0x6f, 0x6f, 0x62, 0x61, 0x72], &mut []).unwrap();
+    assert_eq!(s, b"foobar");
+}
+
+#[test]
+fn test_int() {
+    let num: i64 = de::from_slice_with_scratch(&[0x39, 0x07, 0xde], &mut []).unwrap();
+    assert_eq!(num, -2015);
+}
+
+#[test]
+fn test_float() {
+    let float: f64 = de::from_slice_with_scratch(b"\xfa\x47\xc3\x50\x00", &mut []).unwrap();
+    assert_eq!(float, 100000.0);
+}
+
+#[test]
+fn test_indefinite_object() {
+    #[derive(Debug, Deserialize, PartialEq)]
+    struct Foo {
+        a: u64,
+        b: [u64; 2],
+    }
+    let expected = Foo { a: 1, b: [2, 3] };
+    let actual: Foo =
+        de::from_slice_with_scratch(b"\xbfaa\x01ab\x9f\x02\x03\xff\xff", &mut []).unwrap();
+    assert_eq!(expected, actual);
+}
+
+#[cfg(feature = "std")]
+mod std_tests {
+    use std::collections::BTreeMap;
+
+    use serde::de as serde_de;
+    use serde_cbor::value::Value;
+    use serde_cbor::{de, error, to_vec, Deserializer};
+
+    #[test]
+    fn test_string1() {
+        let value: error::Result<Value> =
+            de::from_slice(&[0x66, 0x66, 0x6f, 0x6f, 0x62, 0x61, 0x72]);
+        assert_eq!(value.unwrap(), Value::Text("foobar".to_owned()));
+    }
+
+    #[test]
+    fn test_string2() {
+        let value: error::Result<Value> = de::from_slice(&[
+            0x71, 0x49, 0x20, 0x6d, 0x65, 0x74, 0x20, 0x61, 0x20, 0x74, 0x72, 0x61, 0x76, 0x65,
+            0x6c, 0x6c, 0x65, 0x72,
+        ]);
+        assert_eq!(value.unwrap(), Value::Text("I met a traveller".to_owned()));
+    }
+
+    #[test]
+    fn test_string3() {
+        let slice = b"\x78\x2fI met a traveller from an antique land who said";
+        let value: error::Result<Value> = de::from_slice(slice);
+        assert_eq!(
+            value.unwrap(),
+            Value::Text("I met a traveller from an antique land who said".to_owned())
+        );
+    }
+
+    #[test]
+    fn test_byte_string() {
+        let value: error::Result<Value> =
+            de::from_slice(&[0x46, 0x66, 0x6f, 0x6f, 0x62, 0x61, 0x72]);
+        assert_eq!(value.unwrap(), Value::Bytes(b"foobar".to_vec()));
+    }
+
+    #[test]
+    fn test_numbers1() {
+        let value: error::Result<Value> = de::from_slice(&[0x00]);
+        assert_eq!(value.unwrap(), Value::Integer(0));
+    }
+
+    #[test]
+    fn test_numbers2() {
+        let value: error::Result<Value> = de::from_slice(&[0x1a, 0x00, 0xbc, 0x61, 0x4e]);
+        assert_eq!(value.unwrap(), Value::Integer(12345678));
+    }
+
+    #[test]
+    fn test_numbers3() {
+        let value: error::Result<Value> = de::from_slice(&[0x39, 0x07, 0xde]);
+        assert_eq!(value.unwrap(), Value::Integer(-2015));
+    }
+
+    #[test]
+    fn test_bool() {
+        let value: error::Result<Value> = de::from_slice(b"\xf4");
+        assert_eq!(value.unwrap(), Value::Bool(false));
+    }
+
+    #[test]
+    fn test_trailing_bytes() {
+        let value: error::Result<Value> = de::from_slice(b"\xf4trailing");
+        assert!(value.is_err());
+    }
+
+    #[test]
+    fn test_list1() {
+        let value: error::Result<Value> = de::from_slice(b"\x83\x01\x02\x03");
+        assert_eq!(
+            value.unwrap(),
+            Value::Array(vec![
+                Value::Integer(1),
+                Value::Integer(2),
+                Value::Integer(3)
+            ])
+        );
+    }
+
+    #[test]
+    fn test_list2() {
+        let value: error::Result<Value> = de::from_slice(b"\x82\x01\x82\x02\x81\x03");
+        assert_eq!(
+            value.unwrap(),
+            Value::Array(vec![
+                Value::Integer(1),
+                Value::Array(vec![
+                    Value::Integer(2),
+                    Value::Array(vec![Value::Integer(3)])
+                ])
+            ])
+        );
+    }
+
+    #[test]
+    fn test_object() {
+        let value: error::Result<Value> = de::from_slice(b"\xa5aaaAabaBacaCadaDaeaE");
+        let mut object = BTreeMap::new();
+        object.insert(Value::Text("a".to_owned()), Value::Text("A".to_owned()));
+        object.insert(Value::Text("b".to_owned()), Value::Text("B".to_owned()));
+        object.insert(Value::Text("c".to_owned()), Value::Text("C".to_owned()));
+        object.insert(Value::Text("d".to_owned()), Value::Text("D".to_owned()));
+        object.insert(Value::Text("e".to_owned()), Value::Text("E".to_owned()));
+        assert_eq!(value.unwrap(), Value::Map(object));
+    }
+
+    #[test]
+    fn test_indefinite_object() {
+        let value: error::Result<Value> = de::from_slice(b"\xbfaa\x01ab\x9f\x02\x03\xff\xff");
+        let mut object = BTreeMap::new();
+        object.insert(Value::Text("a".to_owned()), Value::Integer(1));
+        object.insert(
+            Value::Text("b".to_owned()),
+            Value::Array(vec![Value::Integer(2), Value::Integer(3)]),
+        );
+        assert_eq!(value.unwrap(), Value::Map(object));
+    }
+
+    #[test]
+    fn test_indefinite_list() {
+        let value: error::Result<Value> = de::from_slice(b"\x9f\x01\x02\x03\xff");
+        assert_eq!(
+            value.unwrap(),
+            Value::Array(vec![
+                Value::Integer(1),
+                Value::Integer(2),
+                Value::Integer(3)
+            ])
+        );
+    }
+
+    #[test]
+    fn test_indefinite_string() {
+        let value: error::Result<Value> =
+            de::from_slice(b"\x7f\x65Mary \x64Had \x62a \x67Little \x60\x64Lamb\xff");
+        assert_eq!(
+            value.unwrap(),
+            Value::Text("Mary Had a Little Lamb".to_owned())
+        );
+    }
+
+    #[test]
+    fn test_indefinite_byte_string() {
+        let value: error::Result<Value> = de::from_slice(b"\x5f\x42\x01\x23\x42\x45\x67\xff");
+        assert_eq!(value.unwrap(), Value::Bytes(b"\x01#Eg".to_vec()));
+    }
+
+    #[test]
+    fn test_multiple_indefinite_strings() {
+        let input = b"\x82\x7f\x65Mary \x64Had \x62a \x67Little \x60\x64Lamb\xff\x5f\x42\x01\x23\x42\x45\x67\xff";
+        _test_multiple_indefinite_strings(de::from_slice(input));
+        _test_multiple_indefinite_strings(de::from_mut_slice(input.to_vec().as_mut()));
+        let mut buf = [0u8; 64];
+        _test_multiple_indefinite_strings(de::from_slice_with_scratch(input, &mut buf));
+    }
+    fn _test_multiple_indefinite_strings(value: error::Result<Value>) {
+        // This assures that buffer rewinding in infinite buffers works as intended.
+        assert_eq!(
+            value.unwrap(),
+            Value::Array(vec![
+                Value::Text("Mary Had a Little Lamb".to_owned()),
+                Value::Bytes(b"\x01#Eg".to_vec())
+            ])
+        );
+    }
+
+    #[test]
+    fn test_float() {
+        let value: error::Result<Value> = de::from_slice(b"\xfa\x47\xc3\x50\x00");
+        assert_eq!(value.unwrap(), Value::Float(100000.0));
+    }
+
+    #[test]
+    fn test_self_describing() {
+        let value: error::Result<Value> =
+            de::from_slice(&[0xd9, 0xd9, 0xf7, 0x66, 0x66, 0x6f, 0x6f, 0x62, 0x61, 0x72]);
+        let expected = Value::Text("foobar".to_owned());
+        let strip_tags = |x: Value| {
+            if let Value::Tag(_, inner) = x {
+                *inner
+            } else {
+                x
+            }
+        };
+        assert_eq!(strip_tags(value.unwrap()), expected);
+    }
+
+    #[test]
+    fn test_f16() {
+        let mut x: Value = de::from_slice(&[0xf9, 0x41, 0x00]).unwrap();
+        assert_eq!(x, Value::Float(2.5));
+        x = de::from_slice(&[0xf9, 0x41, 0x90]).unwrap();
+        assert_eq!(x, Value::Float(2.78125));
+        x = de::from_slice(&[0xf9, 0x50, 0x90]).unwrap();
+        assert_eq!(x, Value::Float(36.5));
+        x = de::from_slice(&[0xf9, 0xd0, 0x90]).unwrap();
+        assert_eq!(x, Value::Float(-36.5));
+    }
+
+    #[test]
+    fn test_crazy_list() {
+        let slice = b"\x88\x1b\x00\x00\x00\x1c\xbe\x99\x1d\xc7\x3b\x00\x7a\xcf\x51\xdc\x51\x70\xdb\x3a\x1b\x3a\x06\xdd\xf5\xf6\xf7\xfb\x41\x76\x5e\xb1\xf8\x00\x00\x00\xf9\x7c\x00";
+        let value: Vec<Value> = de::from_slice(slice).unwrap();
+        assert_eq!(
+            value,
+            vec![
+                Value::Integer(123456789959),
+                Value::Integer(-34567897654325468),
+                Value::Integer(-456787678),
+                Value::Bool(true),
+                Value::Null,
+                Value::Null,
+                Value::Float(23456543.5),
+                Value::Float(::std::f64::INFINITY)
+            ]
+        );
+    }
+
+    #[test]
+    fn test_nan() {
+        let value: f64 = de::from_slice(b"\xf9\x7e\x00").unwrap();
+        assert!(value.is_nan());
+    }
+
+    #[test]
+    fn test_32f16() {
+        let value: f32 = de::from_slice(b"\xf9\x50\x00").unwrap();
+        assert_eq!(value, 32.0f32);
+    }
+
+    #[test]
+    // The file was reported as not working by user kie0tauB
+    // but it parses to a cbor value.
+    fn test_kietaub_file() {
+        let file = include_bytes!("kietaub.cbor");
+        let value_result: error::Result<Value> = de::from_slice(file);
+        value_result.unwrap();
+    }
+
+    #[test]
+    fn test_option_roundtrip() {
+        let obj1 = Some(10u32);
+
+        let v = to_vec(&obj1).unwrap();
+        let obj2: Result<Option<u32>, _> = serde_cbor::de::from_reader(&v[..]);
+        println!("{:?}", obj2);
+
+        assert_eq!(obj1, obj2.unwrap());
+    }
+
+    #[test]
+    fn test_option_none_roundtrip() {
+        let obj1 = None;
+
+        let v = to_vec(&obj1).unwrap();
+        println!("{:?}", v);
+        let obj2: Result<Option<u32>, _> = serde_cbor::de::from_reader(&v[..]);
+
+        assert_eq!(obj1, obj2.unwrap());
+    }
+
+    #[test]
+    fn test_variable_length_map() {
+        let slice = b"\xbf\x67\x6d\x65\x73\x73\x61\x67\x65\x64\x70\x6f\x6e\x67\xff";
+        let value: Value = de::from_slice(slice).unwrap();
+        let mut map = BTreeMap::new();
+        map.insert(
+            Value::Text("message".to_string()),
+            Value::Text("pong".to_string()),
+        );
+        assert_eq!(value, Value::Map(map))
+    }
+
+    #[test]
+    fn test_object_determinism_roundtrip() {
+        let expected = b"\xa2aa\x01ab\x82\x02\x03";
+
+        // 0.1% chance of not catching failure
+        for _ in 0..10 {
+            assert_eq!(
+                &to_vec(&de::from_slice::<Value>(expected).unwrap()).unwrap(),
+                expected
+            );
+        }
+    }
+
+    #[test]
+    fn stream_deserializer() {
+        let slice = b"\x01\x66foobar";
+        let mut it = Deserializer::from_slice(slice).into_iter::<Value>();
+        assert_eq!(Value::Integer(1), it.next().unwrap().unwrap());
+        assert_eq!(
+            Value::Text("foobar".to_string()),
+            it.next().unwrap().unwrap()
+        );
+        assert!(it.next().is_none());
+    }
+
+    #[test]
+    fn stream_deserializer_eof() {
+        let slice = b"\x01\x66foob";
+        let mut it = Deserializer::from_slice(slice).into_iter::<Value>();
+        assert_eq!(Value::Integer(1), it.next().unwrap().unwrap());
+        assert!(it.next().unwrap().unwrap_err().is_eof());
+    }
+
+    #[test]
+    fn stream_deserializer_eof_in_indefinite() {
+        let slice = b"\x7f\x65Mary \x64Had \x62a \x60\x67Little \x60\x64Lamb\xff";
+        let indices: &[usize] = &[
+            2,  // announcement but no data
+            10, // mid-buffer EOF
+            12, // neither new element nor end marker
+        ];
+        for end_of_slice in indices {
+            let mut it = Deserializer::from_slice(&slice[..*end_of_slice]).into_iter::<Value>();
+            assert!(it.next().unwrap().unwrap_err().is_eof());
+
+            let mut mutcopy = slice[..*end_of_slice].to_vec();
+            let mut it = Deserializer::from_mut_slice(mutcopy.as_mut()).into_iter::<Value>();
+            assert!(it.next().unwrap().unwrap_err().is_eof());
+
+            let mut buf = [0u8; 64];
+            let mut it = Deserializer::from_slice_with_scratch(&slice[..*end_of_slice], &mut buf)
+                .into_iter::<Value>();
+            assert!(it.next().unwrap().unwrap_err().is_eof());
+        }
+    }
+
+    #[test]
+    fn crash() {
+        let file = include_bytes!("crash.cbor");
+        let value_result: error::Result<Value> = de::from_slice(file);
+        assert_eq!(
+            value_result.unwrap_err().classify(),
+            serde_cbor::error::Category::Syntax
+        );
+    }
+
+    fn from_slice_stream<'a, T>(slice: &'a [u8]) -> error::Result<(&'a [u8], T)>
+    where
+        T: serde_de::Deserialize<'a>,
+    {
+        let mut deserializer = Deserializer::from_slice(slice);
+        let value = serde_de::Deserialize::deserialize(&mut deserializer)?;
+        let rest = &slice[deserializer.byte_offset()..];
+
+        Ok((rest, value))
+    }
+
+    #[test]
+    fn test_slice_offset() {
+        let v: Vec<u8> = vec![
+            0x66, 0x66, 0x6f, 0x6f, 0x62, 0x61, 0x72, 0x66, 0x66, 0x6f, 0x6f, 0x62, 0x61, 0x72,
+        ];
+        let (rest, value): (&[u8], String) = from_slice_stream(&v[..]).unwrap();
+        assert_eq!(value, "foobar");
+        assert_eq!(rest, &[0x66, 0x66, 0x6f, 0x6f, 0x62, 0x61, 0x72]);
+        let (rest, value): (&[u8], String) = from_slice_stream(rest).unwrap();
+        assert_eq!(value, "foobar");
+        assert_eq!(rest, &[]);
+    }
+
+    #[derive(Debug, Copy, Clone)]
+    struct Options {
+        standard: bool,
+        legacy: bool,
+        packed: bool,
+        named: bool,
+    }
+
+    impl Default for Options {
+        fn default() -> Self {
+            Options {
+                standard: true,
+                legacy: true,
+                packed: true,
+                named: true,
+            }
+        }
+    }
+
+    impl Options {
+        fn no_standard(self) -> Self {
+            Options {
+                standard: false,
+                ..self
+            }
+        }
+
+        fn no_legacy(self) -> Self {
+            Options {
+                legacy: false,
+                ..self
+            }
+        }
+
+        fn no_packed(self) -> Self {
+            Options {
+                packed: false,
+                ..self
+            }
+        }
+
+        fn no_named(self) -> Self {
+            Options {
+                named: false,
+                ..self
+            }
+        }
+    }
+
+    fn from_slice_stream_options<'a, T>(
+        slice: &'a [u8],
+        options: Options,
+    ) -> error::Result<(&'a [u8], T)>
+    where
+        T: serde_de::Deserialize<'a>,
+    {
+        let deserializer = Deserializer::from_slice(slice);
+        let deserializer = if !options.packed {
+            deserializer.disable_packed_format()
+        } else {
+            deserializer
+        };
+        let deserializer = if !options.named {
+            deserializer.disable_named_format()
+        } else {
+            deserializer
+        };
+        let deserializer = if !options.standard {
+            deserializer.disable_standard_enums()
+        } else {
+            deserializer
+        };
+        let mut deserializer = if !options.legacy {
+            deserializer.disable_legacy_enums()
+        } else {
+            deserializer
+        };
+        let value = serde_de::Deserialize::deserialize(&mut deserializer)?;
+        let rest = &slice[deserializer.byte_offset()..];
+
+        Ok((rest, value))
+    }
+
+    #[test]
+    fn test_deserializer_enums() {
+        #[derive(Debug, PartialEq, Deserialize)]
+        enum Enum {
+            Unit,
+            NewType(i32),
+            Tuple(String, bool),
+            Struct { x: i32, y: i32 },
+        }
+
+        // This is the format used in serde >= 0.10
+        //
+        // Serialization of Enum::NewType(10)
+        let v: Vec<u8> = vec![
+            0xa1, // map 1pair
+            0x67, 0x4e, 0x65, 0x77, 0x54, 0x79, 0x70, 0x65, // utf8 string: NewType
+            0x1a, // u32
+            0x00, 0x00, 0x00, 0x0a, // 10 (dec)
+        ];
+        let (_rest, value): (&[u8], Enum) = from_slice_stream(&v[..]).unwrap();
+        assert_eq!(value, Enum::NewType(10));
+        let (_rest, value): (&[u8], Enum) =
+            from_slice_stream_options(&v[..], Options::default().no_legacy()).unwrap();
+        assert_eq!(value, Enum::NewType(10));
+        let value: error::Result<(&[u8], Enum)> =
+            from_slice_stream_options(&v[..], Options::default().no_standard());
+        assert_eq!(
+            value.unwrap_err().classify(),
+            serde_cbor::error::Category::Syntax
+        );
+        let value: error::Result<(&[u8], Enum)> =
+            from_slice_stream_options(&v[..], Options::default().no_standard().no_legacy());
+        assert_eq!(
+            value.unwrap_err().classify(),
+            serde_cbor::error::Category::Syntax
+        );
+        // Serialization of Enum::Unit
+        let v: Vec<u8> = vec![
+            0x64, 0x55, 0x6e, 0x69, 0x74, // utf8 string: Unit
+        ];
+        let (_rest, value): (&[u8], Enum) = from_slice_stream(&v[..]).unwrap();
+        assert_eq!(value, Enum::Unit);
+        let (_rest, value): (&[u8], Enum) =
+            from_slice_stream_options(&v[..], Options::default().no_legacy()).unwrap();
+        assert_eq!(value, Enum::Unit);
+        let (_rest, value): (&[u8], Enum) =
+            from_slice_stream_options(&v[..], Options::default().no_standard()).unwrap();
+        assert_eq!(value, Enum::Unit);
+        let value: error::Result<(&[u8], Enum)> =
+            from_slice_stream_options(&v[..], Options::default().no_legacy().no_standard());
+        assert_eq!(
+            value.unwrap_err().classify(),
+            serde_cbor::error::Category::Syntax
+        );
+
+        // This is the format used in serde <= 0.9
+        let v: Vec<u8> = vec![
+            0x82, // array 2 items
+            0x67, 0x4e, 0x65, 0x77, 0x54, 0x79, 0x70, 0x65, // utf8 string: NewType
+            0x1a, // u32
+            0x00, 0x00, 0x00, 0x0a, // 10 (dec)
+        ];
+        let (_rest, value): (&[u8], Enum) = from_slice_stream(&v[..]).unwrap();
+        assert_eq!(value, Enum::NewType(10));
+        let value: error::Result<(&[u8], Enum)> =
+            from_slice_stream_options(&v[..], Options::default().no_legacy());
+        assert_eq!(
+            value.unwrap_err().classify(),
+            serde_cbor::error::Category::Syntax
+        );
+        let value: error::Result<(&[u8], Enum)> =
+            from_slice_stream_options(&v[..], Options::default().no_standard());
+        assert_eq!(value.unwrap().1, Enum::NewType(10));
+        let value: error::Result<(&[u8], Enum)> =
+            from_slice_stream_options(&v[..], Options::default().no_standard().no_legacy());
+        assert_eq!(
+            value.unwrap_err().classify(),
+            serde_cbor::error::Category::Syntax
+        );
+    }
+
+    #[test]
+    fn test_packed_deserialization() {
+        #[derive(Debug, PartialEq, Deserialize)]
+        struct User {
+            user_id: u32,
+            password_hash: [u8; 4],
+        }
+
+        // unpacked
+        let v: Vec<u8> = vec![
+            0xa2, // map 2pair
+            0x67, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x69, 0x64, // utf8 string: user_id
+            0x0a, // integer: 10
+            // utf8 string: password_hash
+            0x6d, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68,
+            0x84, 0x01, 0x02, 0x03, 0x04, // 4 byte array [1, 2, 3, 4]
+        ];
+
+        let (_rest, value): (&[u8], User) = from_slice_stream(&v[..]).unwrap();
+        assert_eq!(
+            value,
+            User {
+                user_id: 10,
+                password_hash: [1, 2, 3, 4],
+            }
+        );
+        let (_rest, value): (&[u8], User) =
+            from_slice_stream_options(&v[..], Options::default().no_packed()).unwrap();
+        assert_eq!(
+            value,
+            User {
+                user_id: 10,
+                password_hash: [1, 2, 3, 4],
+            }
+        );
+        let value: error::Result<(&[u8], User)> =
+            from_slice_stream_options(&v[..], Options::default().no_named());
+        assert_eq!(
+            value.unwrap_err().classify(),
+            serde_cbor::error::Category::Syntax
+        );
+
+        // unpacked - indefinite length
+        let v: Vec<u8> = vec![
+            0xbf, // map to be followed by a break
+            0x67, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x69, 0x64, // utf8 string: user_id
+            0x0a, // integer: 10
+            // utf8 string: password_hash
+            0x6d, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68,
+            0x84, 0x01, 0x02, 0x03, 0x04, // 4 byte array [1, 2, 3, 4]
+            0xff, // break
+        ];
+
+        let (_rest, value): (&[u8], User) = from_slice_stream(&v[..]).unwrap();
+        assert_eq!(
+            value,
+            User {
+                user_id: 10,
+                password_hash: [1, 2, 3, 4],
+            }
+        );
+        let (_rest, value): (&[u8], User) =
+            from_slice_stream_options(&v[..], Options::default().no_packed()).unwrap();
+        assert_eq!(
+            value,
+            User {
+                user_id: 10,
+                password_hash: [1, 2, 3, 4],
+            }
+        );
+        let value: error::Result<(&[u8], User)> =
+            from_slice_stream_options(&v[..], Options::default().no_named());
+        assert_eq!(
+            value.unwrap_err().classify(),
+            serde_cbor::error::Category::Syntax
+        );
+
+        // packed
+        let v: Vec<u8> = vec![
+            0xa2, // map 2pair
+            0x00, // index 0
+            0x0a, // integer: 10
+            0x01, // index 1
+            0x84, 0x01, 0x02, 0x03, 0x04, // 4 byte array [1, 2, 3, 4]
+        ];
+
+        let (_rest, value): (&[u8], User) = from_slice_stream(&v[..]).unwrap();
+        assert_eq!(
+            value,
+            User {
+                user_id: 10,
+                password_hash: [1, 2, 3, 4],
+            }
+        );
+        let (_rest, value): (&[u8], User) =
+            from_slice_stream_options(&v[..], Options::default().no_named()).unwrap();
+        assert_eq!(
+            value,
+            User {
+                user_id: 10,
+                password_hash: [1, 2, 3, 4],
+            }
+        );
+        let value: error::Result<(&[u8], User)> =
+            from_slice_stream_options(&v[..], Options::default().no_packed());
+        assert_eq!(
+            value.unwrap_err().classify(),
+            serde_cbor::error::Category::Syntax
+        );
+
+        // packed - indefinite length
+        let v: Vec<u8> = vec![
+            0xbf, // map, to be followed by a break
+            0x00, // index 0
+            0x0a, // integer: 10
+            0x01, // index 1
+            0x84, 0x01, 0x02, 0x03, 0x04, // 4 byte array [1, 2, 3, 4]
+            0xff, // break
+        ];
+
+        let (_rest, value): (&[u8], User) = from_slice_stream(&v[..]).unwrap();
+        assert_eq!(
+            value,
+            User {
+                user_id: 10,
+                password_hash: [1, 2, 3, 4],
+            }
+        );
+        let (_rest, value): (&[u8], User) =
+            from_slice_stream_options(&v[..], Options::default().no_named()).unwrap();
+        assert_eq!(
+            value,
+            User {
+                user_id: 10,
+                password_hash: [1, 2, 3, 4],
+            }
+        );
+        let value: error::Result<(&[u8], User)> =
+            from_slice_stream_options(&v[..], Options::default().no_packed());
+        assert_eq!(
+            value.unwrap_err().classify(),
+            serde_cbor::error::Category::Syntax
+        );
+    }
+
+    use serde_cbor::{de::from_slice, ser::to_vec_packed};
+    use std::net::{IpAddr, Ipv4Addr};
+    #[test]
+    fn test_ipaddr_deserialization() {
+        let ip = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1));
+        let buf = to_vec_packed(&ip).unwrap();
+        let deserialized_ip = from_slice::<IpAddr>(&buf).unwrap();
+        assert_eq!(ip, deserialized_ip);
+
+        let buf = to_vec(&ip).unwrap();
+        let deserialized_ip = from_slice::<IpAddr>(&buf).unwrap();
+        assert_eq!(ip, deserialized_ip);
+    }
+
+    #[test]
+    fn attempt_stack_overflow() {
+        // Create a tag 17, followed by 999 more tag 17:
+        // 17(17(17(17(17(17(17(17(17(17(17(17(17(17(17(17(17(17(...
+        // This causes deep recursion in the decoder and may
+        // exhaust the stack and therfore result in a stack overflow.
+        let input = vec![0xd1; 1000];
+        let err = serde_cbor::from_slice::<serde_cbor::Value>(&input).expect_err("recursion limit");
+        assert!(err.is_syntax());
+    }
+}
diff --git a/crates/serde_cbor/tests/enum.rs b/crates/serde_cbor/tests/enum.rs
new file mode 100644
index 0000000..630500d
--- /dev/null
+++ b/crates/serde_cbor/tests/enum.rs
@@ -0,0 +1,236 @@
+use serde::Serialize;
+use serde_cbor;
+use serde_cbor::ser::{Serializer, SliceWrite};
+
+#[macro_use]
+extern crate serde_derive;
+
+#[test]
+fn test_simple_data_enum_roundtrip() {
+    #[derive(Debug, Serialize, Deserialize, PartialEq)]
+    enum DataEnum {
+        A(u32),
+        B(f32),
+    }
+
+    let a = DataEnum::A(42);
+
+    let mut slice = [0u8; 64];
+    let writer = SliceWrite::new(&mut slice);
+    let mut serializer = Serializer::new(writer);
+    a.serialize(&mut serializer).unwrap();
+    let writer = serializer.into_inner();
+    let end = writer.bytes_written();
+    let slice = writer.into_inner();
+    let deserialized: DataEnum =
+        serde_cbor::de::from_slice_with_scratch(&slice[..end], &mut []).unwrap();
+    assert_eq!(a, deserialized);
+}
+
+#[cfg(feature = "std")]
+mod std_tests {
+    use std::collections::BTreeMap;
+
+    use serde_cbor::ser::{IoWrite, Serializer};
+    use serde_cbor::value::Value;
+    use serde_cbor::{from_slice, to_vec};
+
+    pub fn to_vec_legacy<T>(value: &T) -> serde_cbor::Result<Vec<u8>>
+    where
+        T: serde::ser::Serialize,
+    {
+        let mut vec = Vec::new();
+        value.serialize(&mut Serializer::new(&mut IoWrite::new(&mut vec)).legacy_enums())?;
+        Ok(vec)
+    }
+
+    #[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
+    enum Enum {
+        A,
+        B,
+    }
+
+    #[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
+    struct EnumStruct {
+        e: Enum,
+    }
+
+    #[test]
+    fn test_enum() {
+        let enum_struct = EnumStruct { e: Enum::B };
+        let raw = &to_vec(&enum_struct).unwrap();
+        println!("raw enum {:?}", raw);
+        let re: EnumStruct = from_slice(raw).unwrap();
+        assert_eq!(enum_struct, re);
+    }
+
+    #[repr(u16)]
+    #[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
+    enum ReprEnum {
+        A,
+        B,
+    }
+
+    #[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
+    struct ReprEnumStruct {
+        e: ReprEnum,
+    }
+
+    #[test]
+    fn test_repr_enum() {
+        let repr_enum_struct = ReprEnumStruct { e: ReprEnum::B };
+        let re: ReprEnumStruct = from_slice(&to_vec(&repr_enum_struct).unwrap()).unwrap();
+        assert_eq!(repr_enum_struct, re);
+    }
+
+    #[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
+    enum DataEnum {
+        A(u32),
+        B(bool, u8),
+        C { x: u8, y: String },
+    }
+
+    #[test]
+    fn test_data_enum() {
+        let data_enum_a = DataEnum::A(4);
+        let re_a: DataEnum = from_slice(&to_vec(&data_enum_a).unwrap()).unwrap();
+        assert_eq!(data_enum_a, re_a);
+        let data_enum_b = DataEnum::B(true, 42);
+        let re_b: DataEnum = from_slice(&to_vec(&data_enum_b).unwrap()).unwrap();
+        assert_eq!(data_enum_b, re_b);
+        let data_enum_c = DataEnum::C {
+            x: 3,
+            y: "foo".to_owned(),
+        };
+        println!("{:?}", &to_vec(&data_enum_c).unwrap());
+        let re_c: DataEnum = from_slice(&to_vec(&data_enum_c).unwrap()).unwrap();
+        assert_eq!(data_enum_c, re_c);
+    }
+
+    #[test]
+    fn test_serialize() {
+        assert_eq!(to_vec_legacy(&Enum::A).unwrap(), &[97, 65]);
+        assert_eq!(to_vec_legacy(&Enum::B).unwrap(), &[97, 66]);
+        assert_eq!(
+            to_vec_legacy(&DataEnum::A(42)).unwrap(),
+            &[130, 97, 65, 24, 42]
+        );
+        assert_eq!(
+            to_vec_legacy(&DataEnum::B(true, 9)).unwrap(),
+            &[131, 97, 66, 245, 9]
+        );
+    }
+
+    #[test]
+    fn test_newtype_struct() {
+        #[derive(Debug, Deserialize, Serialize, PartialEq, Eq)]
+        pub struct Newtype(u8);
+        assert_eq!(to_vec(&142u8).unwrap(), to_vec(&Newtype(142u8)).unwrap());
+        assert_eq!(from_slice::<Newtype>(&[24, 142]).unwrap(), Newtype(142));
+    }
+
+    #[derive(Deserialize, PartialEq, Debug)]
+    enum Foo {
+        #[serde(rename = "require")]
+        Require,
+    }
+
+    #[test]
+    fn test_variable_length_array() {
+        let slice = b"\x9F\x67\x72\x65\x71\x75\x69\x72\x65\xFF";
+        let value: Vec<Foo> = from_slice(slice).unwrap();
+        assert_eq!(value, [Foo::Require]);
+    }
+
+    #[derive(Serialize, Deserialize, PartialEq, Debug)]
+    enum Bar {
+        Empty,
+        Number(i32),
+        Flag(String, bool),
+        Point { x: i32, y: i32 },
+    }
+
+    #[test]
+    fn test_enum_as_map() {
+        // unit variants serialize like bare strings
+        let empty_s = to_vec_legacy(&Bar::Empty).unwrap();
+        let empty_str_s = to_vec_legacy(&"Empty").unwrap();
+        assert_eq!(empty_s, empty_str_s);
+
+        // tuple-variants serialize like ["<variant>", values..]
+        let number_s = to_vec_legacy(&Bar::Number(42)).unwrap();
+        let number_vec = vec![Value::Text("Number".to_string()), Value::Integer(42)];
+        let number_vec_s = to_vec_legacy(&number_vec).unwrap();
+        assert_eq!(number_s, number_vec_s);
+
+        let flag_s = to_vec_legacy(&Bar::Flag("foo".to_string(), true)).unwrap();
+        let flag_vec = vec![
+            Value::Text("Flag".to_string()),
+            Value::Text("foo".to_string()),
+            Value::Bool(true),
+        ];
+        let flag_vec_s = to_vec_legacy(&flag_vec).unwrap();
+        assert_eq!(flag_s, flag_vec_s);
+
+        // struct-variants serialize like ["<variant>", {struct..}]
+        let point_s = to_vec_legacy(&Bar::Point { x: 5, y: -5 }).unwrap();
+        let mut struct_map = BTreeMap::new();
+        struct_map.insert(Value::Text("x".to_string()), Value::Integer(5));
+        struct_map.insert(Value::Text("y".to_string()), Value::Integer(-5));
+        let point_vec = vec![
+            Value::Text("Point".to_string()),
+            Value::Map(struct_map.clone()),
+        ];
+        let point_vec_s = to_vec_legacy(&point_vec).unwrap();
+        assert_eq!(point_s, point_vec_s);
+
+        // enum_as_map matches serde_json's default serialization for enums.
+
+        // unit variants still serialize like bare strings
+        let empty_s = to_vec(&Bar::Empty).unwrap();
+        assert_eq!(empty_s, empty_str_s);
+
+        // 1-element tuple variants serialize like {"<variant>": value}
+        let number_s = to_vec(&Bar::Number(42)).unwrap();
+        let mut number_map = BTreeMap::new();
+        number_map.insert("Number", 42);
+        let number_map_s = to_vec(&number_map).unwrap();
+        assert_eq!(number_s, number_map_s);
+
+        // multi-element tuple variants serialize like {"<variant>": [values..]}
+        let flag_s = to_vec(&Bar::Flag("foo".to_string(), true)).unwrap();
+        let mut flag_map = BTreeMap::new();
+        flag_map.insert(
+            "Flag",
+            vec![Value::Text("foo".to_string()), Value::Bool(true)],
+        );
+        let flag_map_s = to_vec(&flag_map).unwrap();
+        assert_eq!(flag_s, flag_map_s);
+
+        // struct-variants serialize like {"<variant>", {struct..}}
+        let point_s = to_vec(&Bar::Point { x: 5, y: -5 }).unwrap();
+        let mut point_map = BTreeMap::new();
+        point_map.insert("Point", Value::Map(struct_map));
+        let point_map_s = to_vec(&point_map).unwrap();
+        assert_eq!(point_s, point_map_s);
+
+        // deserialization of all encodings should just work
+        let empty_str_ds = from_slice(&empty_str_s).unwrap();
+        assert_eq!(Bar::Empty, empty_str_ds);
+
+        let number_vec_ds = from_slice(&number_vec_s).unwrap();
+        assert_eq!(Bar::Number(42), number_vec_ds);
+        let number_map_ds = from_slice(&number_map_s).unwrap();
+        assert_eq!(Bar::Number(42), number_map_ds);
+
+        let flag_vec_ds = from_slice(&flag_vec_s).unwrap();
+        assert_eq!(Bar::Flag("foo".to_string(), true), flag_vec_ds);
+        let flag_map_ds = from_slice(&flag_map_s).unwrap();
+        assert_eq!(Bar::Flag("foo".to_string(), true), flag_map_ds);
+
+        let point_vec_ds = from_slice(&point_vec_s).unwrap();
+        assert_eq!(Bar::Point { x: 5, y: -5 }, point_vec_ds);
+        let point_map_ds = from_slice(&point_map_s).unwrap();
+        assert_eq!(Bar::Point { x: 5, y: -5 }, point_map_ds);
+    }
+}
diff --git a/crates/serde_cbor/tests/kietaub.cbor b/crates/serde_cbor/tests/kietaub.cbor
new file mode 100644
index 0000000..866a093
--- /dev/null
+++ b/crates/serde_cbor/tests/kietaub.cbor
Binary files differ
diff --git a/crates/serde_cbor/tests/ser.rs b/crates/serde_cbor/tests/ser.rs
new file mode 100644
index 0000000..d374ce2
--- /dev/null
+++ b/crates/serde_cbor/tests/ser.rs
@@ -0,0 +1,254 @@
+use serde::Serialize;
+use serde_cbor::ser::{Serializer, SliceWrite};
+
+#[test]
+fn test_str() {
+    serialize_and_compare("foobar", b"ffoobar");
+}
+
+#[test]
+fn test_list() {
+    serialize_and_compare(&[1, 2, 3], b"\x83\x01\x02\x03");
+}
+
+#[test]
+fn test_float() {
+    serialize_and_compare(12.3f64, b"\xfb@(\x99\x99\x99\x99\x99\x9a");
+}
+
+#[test]
+fn test_integer() {
+    // u8
+    serialize_and_compare(24, b"\x18\x18");
+    // i8
+    serialize_and_compare(-5, b"\x24");
+    // i16
+    serialize_and_compare(-300, b"\x39\x01\x2b");
+    // i32
+    serialize_and_compare(-23567997, b"\x3a\x01\x67\x9e\x7c");
+    // u64
+    serialize_and_compare(::core::u64::MAX, b"\x1b\xff\xff\xff\xff\xff\xff\xff\xff");
+}
+
+fn serialize_and_compare<T: Serialize>(value: T, expected: &[u8]) {
+    let mut slice = [0u8; 64];
+    let writer = SliceWrite::new(&mut slice);
+    let mut serializer = Serializer::new(writer);
+    value.serialize(&mut serializer).unwrap();
+    let writer = serializer.into_inner();
+    let end = writer.bytes_written();
+    let slice = writer.into_inner();
+    assert_eq!(&slice[..end], expected);
+}
+
+#[cfg(feature = "std")]
+mod std_tests {
+    use serde::Serializer;
+    use serde_cbor::ser;
+    use serde_cbor::{from_slice, to_vec};
+    use std::collections::BTreeMap;
+
+    #[test]
+    fn test_string() {
+        let value = "foobar".to_owned();
+        assert_eq!(&to_vec(&value).unwrap()[..], b"ffoobar");
+    }
+
+    #[test]
+    fn test_list() {
+        let value = vec![1, 2, 3];
+        assert_eq!(&to_vec(&value).unwrap()[..], b"\x83\x01\x02\x03");
+    }
+
+    #[test]
+    fn test_object() {
+        let mut object = BTreeMap::new();
+        object.insert("a".to_owned(), "A".to_owned());
+        object.insert("b".to_owned(), "B".to_owned());
+        object.insert("c".to_owned(), "C".to_owned());
+        object.insert("d".to_owned(), "D".to_owned());
+        object.insert("e".to_owned(), "E".to_owned());
+        let vec = to_vec(&object).unwrap();
+        let test_object = from_slice(&vec[..]).unwrap();
+        assert_eq!(object, test_object);
+    }
+
+    #[test]
+    fn test_object_list_keys() {
+        let mut object = BTreeMap::new();
+        object.insert(vec![0i64], ());
+        object.insert(vec![100i64], ());
+        object.insert(vec![-1i64], ());
+        object.insert(vec![-2i64], ());
+        object.insert(vec![0i64, 0i64], ());
+        object.insert(vec![0i64, -1i64], ());
+        let vec = to_vec(&serde_cbor::value::to_value(object.clone()).unwrap()).unwrap();
+        assert_eq!(
+            vec![
+                166, 129, 0, 246, 129, 24, 100, 246, 129, 32, 246, 129, 33, 246, 130, 0, 0, 246,
+                130, 0, 32, 246
+            ],
+            vec
+        );
+        let test_object = from_slice(&vec[..]).unwrap();
+        assert_eq!(object, test_object);
+    }
+
+    #[test]
+    fn test_object_object_keys() {
+        use std::iter::FromIterator;
+        let mut object = BTreeMap::new();
+        let keys = vec![
+            vec!["a"],
+            vec!["b"],
+            vec!["c"],
+            vec!["d"],
+            vec!["aa"],
+            vec!["a", "aa"],
+        ]
+        .into_iter()
+        .map(|v| BTreeMap::from_iter(v.into_iter().map(|s| (s.to_owned(), ()))));
+
+        for key in keys {
+            object.insert(key, ());
+        }
+        let vec = to_vec(&serde_cbor::value::to_value(object.clone()).unwrap()).unwrap();
+        assert_eq!(
+            vec![
+                166, 161, 97, 97, 246, 246, 161, 97, 98, 246, 246, 161, 97, 99, 246, 246, 161, 97,
+                100, 246, 246, 161, 98, 97, 97, 246, 246, 162, 97, 97, 246, 98, 97, 97, 246, 246
+            ],
+            vec
+        );
+        let test_object = from_slice(&vec[..]).unwrap();
+        assert_eq!(object, test_object);
+    }
+
+    #[test]
+    fn test_float() {
+        let vec = to_vec(&12.3f64).unwrap();
+        assert_eq!(vec, b"\xfb@(\x99\x99\x99\x99\x99\x9a");
+    }
+
+    #[test]
+    fn test_f32() {
+        let vec = to_vec(&4000.5f32).unwrap();
+        assert_eq!(vec, b"\xfa\x45\x7a\x08\x00");
+    }
+
+    #[test]
+    fn test_infinity() {
+        let vec = to_vec(&::std::f64::INFINITY).unwrap();
+        assert_eq!(vec, b"\xf9|\x00");
+    }
+
+    #[test]
+    fn test_neg_infinity() {
+        let vec = to_vec(&::std::f64::NEG_INFINITY).unwrap();
+        assert_eq!(vec, b"\xf9\xfc\x00");
+    }
+
+    #[test]
+    fn test_nan() {
+        let vec = to_vec(&::std::f32::NAN).unwrap();
+        assert_eq!(vec, b"\xf9\x7e\x00");
+    }
+
+    #[test]
+    fn test_integer() {
+        // u8
+        let vec = to_vec(&24).unwrap();
+        assert_eq!(vec, b"\x18\x18");
+        // i8
+        let vec = to_vec(&-5).unwrap();
+        assert_eq!(vec, b"\x24");
+        // i16
+        let vec = to_vec(&-300).unwrap();
+        assert_eq!(vec, b"\x39\x01\x2b");
+        // i32
+        let vec = to_vec(&-23567997).unwrap();
+        assert_eq!(vec, b"\x3a\x01\x67\x9e\x7c");
+        // u64
+        let vec = to_vec(&::std::u64::MAX).unwrap();
+        assert_eq!(vec, b"\x1b\xff\xff\xff\xff\xff\xff\xff\xff");
+    }
+
+    #[test]
+    fn test_self_describing() {
+        let mut vec = Vec::new();
+        {
+            let mut serializer = ser::Serializer::new(&mut vec);
+            serializer.self_describe().unwrap();
+            serializer.serialize_u64(9).unwrap();
+        }
+        assert_eq!(vec, b"\xd9\xd9\xf7\x09");
+    }
+
+    #[test]
+    fn test_ip_addr() {
+        use std::net::Ipv4Addr;
+
+        let addr = Ipv4Addr::new(8, 8, 8, 8);
+        let vec = to_vec(&addr).unwrap();
+        println!("{:?}", vec);
+        assert_eq!(vec.len(), 5);
+        let test_addr: Ipv4Addr = from_slice(&vec).unwrap();
+        assert_eq!(addr, test_addr);
+    }
+
+    /// Test all of CBOR's fixed-length byte string types
+    #[test]
+    fn test_byte_string() {
+        // Very short byte strings have 1-byte headers
+        let short = vec![0, 1, 2, 255];
+        let mut short_s = Vec::new();
+        serde_cbor::Serializer::new(&mut short_s)
+            .serialize_bytes(&short)
+            .unwrap();
+        assert_eq!(&short_s[..], [0x44, 0, 1, 2, 255]);
+
+        // byte strings > 23 bytes have 2-byte headers
+        let medium = vec![
+            0u8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 255,
+        ];
+        let mut medium_s = Vec::new();
+        serde_cbor::Serializer::new(&mut medium_s)
+            .serialize_bytes(&medium)
+            .unwrap();
+        assert_eq!(
+            &medium_s[..],
+            [
+                0x58, 24, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+                21, 22, 255
+            ]
+        );
+
+        // byte strings > 256 bytes have 3-byte headers
+        let long_vec = (0..256).map(|i| (i & 0xFF) as u8).collect::<Vec<_>>();
+        let mut long_s = Vec::new();
+        serde_cbor::Serializer::new(&mut long_s)
+            .serialize_bytes(&long_vec)
+            .unwrap();
+        assert_eq!(&long_s[0..3], [0x59, 1, 0]);
+        assert_eq!(&long_s[3..], &long_vec[..]);
+
+        // byte strings > 2^16 bytes have 5-byte headers
+        let very_long_vec = (0..65536).map(|i| (i & 0xFF) as u8).collect::<Vec<_>>();
+        let mut very_long_s = Vec::new();
+        serde_cbor::Serializer::new(&mut very_long_s)
+            .serialize_bytes(&very_long_vec)
+            .unwrap();
+        assert_eq!(&very_long_s[0..5], [0x5a, 0, 1, 0, 0]);
+        assert_eq!(&very_long_s[5..], &very_long_vec[..]);
+
+        // byte strings > 2^32 bytes have 9-byte headers, but they take too much RAM
+        // to test in Travis.
+    }
+
+    #[test]
+    fn test_half() {
+        let vec = to_vec(&42.5f32).unwrap();
+        assert_eq!(vec, b"\xF9\x51\x50");
+        assert_eq!(from_slice::<f32>(&vec[..]).unwrap(), 42.5f32);
+    }
+}
diff --git a/crates/serde_cbor/tests/std_types.rs b/crates/serde_cbor/tests/std_types.rs
new file mode 100644
index 0000000..7a7ded4
--- /dev/null
+++ b/crates/serde_cbor/tests/std_types.rs
@@ -0,0 +1,186 @@
+#[macro_use]
+extern crate serde_derive;
+
+#[cfg(feature = "std")]
+mod std_tests {
+    use std::u8;
+
+    use serde_cbor::de::from_mut_slice;
+    use serde_cbor::ser::{to_vec, to_vec_packed};
+    use serde_cbor::{from_reader, from_slice};
+
+    fn to_binary(s: &'static str) -> Vec<u8> {
+        assert!(s.len() % 2 == 0);
+        let mut b = Vec::with_capacity(s.len() / 2);
+        for i in 0..s.len() / 2 {
+            b.push(u8::from_str_radix(&s[i * 2..(i + 1) * 2], 16).unwrap());
+        }
+        b
+    }
+
+    macro_rules! testcase {
+        ($name:ident, f64, $expr:expr, $s:expr) => {
+            #[test]
+            fn $name() {
+                let expr: f64 = $expr;
+                let mut serialized = to_binary($s);
+                assert_eq!(to_vec(&expr).unwrap(), serialized);
+                let parsed: f64 = from_slice(&serialized[..]).unwrap();
+                if !expr.is_nan() {
+                    assert_eq!(expr, parsed);
+                } else {
+                    assert!(parsed.is_nan())
+                }
+
+                let parsed: f64 = from_reader(&mut &serialized[..]).unwrap();
+                if !expr.is_nan() {
+                    assert_eq!(expr, parsed);
+                } else {
+                    assert!(parsed.is_nan())
+                }
+
+                let parsed: f64 = from_mut_slice(&mut serialized[..]).unwrap();
+                if !expr.is_nan() {
+                    assert_eq!(expr, parsed);
+                } else {
+                    assert!(parsed.is_nan())
+                }
+            }
+        };
+        ($name:ident, $ty:ty, $expr:expr, $s:expr) => {
+            #[test]
+            fn $name() {
+                let expr: $ty = $expr;
+                let mut serialized = to_binary($s);
+                assert_eq!(
+                    to_vec(&expr).expect("ser1 works"),
+                    serialized,
+                    "serialization differs"
+                );
+                let parsed: $ty = from_slice(&serialized[..]).expect("de1 works");
+                assert_eq!(parsed, expr, "parsed result differs");
+                let packed = &to_vec_packed(&expr).expect("serializing packed")[..];
+                let parsed_from_packed: $ty = from_slice(packed).expect("parsing packed");
+                assert_eq!(parsed_from_packed, expr, "packed roundtrip fail");
+
+                let parsed: $ty = from_reader(&mut &serialized[..]).unwrap();
+                assert_eq!(parsed, expr, "parsed result differs");
+                let mut packed = to_vec_packed(&expr).expect("serializing packed");
+                let parsed_from_packed: $ty =
+                    from_reader(&mut &packed[..]).expect("parsing packed");
+                assert_eq!(parsed_from_packed, expr, "packed roundtrip fail");
+
+                let parsed: $ty = from_mut_slice(&mut serialized[..]).unwrap();
+                assert_eq!(parsed, expr, "parsed result differs");
+                let parsed_from_packed: $ty =
+                    from_mut_slice(&mut packed[..]).expect("parsing packed");
+                assert_eq!(parsed_from_packed, expr, "packed roundtrip fail");
+            }
+        };
+    }
+
+    testcase!(test_bool_false, bool, false, "f4");
+    testcase!(test_bool_true, bool, true, "f5");
+    testcase!(test_isize_neg_256, isize, -256, "38ff");
+    testcase!(test_isize_neg_257, isize, -257, "390100");
+    testcase!(test_isize_255, isize, 255, "18ff");
+    testcase!(test_i8_5, i8, 5, "05");
+    testcase!(test_i8_23, i8, 23, "17");
+    testcase!(test_i8_24, i8, 24, "1818");
+    testcase!(test_i8_neg_128, i8, -128, "387f");
+    testcase!(test_u32_98745874, u32, 98745874, "1a05e2be12");
+    testcase!(test_f32_1234_point_5, f32, 1234.5, "fa449a5000");
+    testcase!(test_f64_12345_point_6, f64, 12345.6, "fb40c81ccccccccccd");
+    testcase!(test_f64_nan, f64, ::std::f64::NAN, "f97e00");
+    testcase!(test_f64_infinity, f64, ::std::f64::INFINITY, "f97c00");
+    testcase!(test_f64_neg_infinity, f64, -::std::f64::INFINITY, "f9fc00");
+    testcase!(test_char_null, char, '\x00', "6100");
+    testcase!(test_char_broken_heart, char, '💔', "64f09f9294");
+    testcase!(
+        test_str_pangram_de,
+        String,
+        "aâø↓é".to_owned(),
+        "6a61c3a2c3b8e28693c3a9"
+    );
+    testcase!(test_unit, (), (), "f6");
+
+    #[derive(Debug, PartialEq, Deserialize, Serialize)]
+    struct UnitStruct;
+    testcase!(test_unit_struct, UnitStruct, UnitStruct, "f6");
+
+    #[derive(Debug, PartialEq, Deserialize, Serialize)]
+    struct NewtypeStruct(bool);
+    testcase!(
+        test_newtype_struct,
+        NewtypeStruct,
+        NewtypeStruct(true),
+        "f5"
+    );
+
+    testcase!(test_option_none, Option<u8>, None, "f6");
+    testcase!(test_option_some, Option<u8>, Some(42), "182a");
+
+    #[derive(Debug, PartialEq, Deserialize, Serialize)]
+    struct Person {
+        name: String,
+        year_of_birth: u16,
+        profession: Option<String>,
+    }
+
+    testcase!(test_person_struct,
+    Person,
+    Person {
+        name: "Grace Hopper".to_string(),
+        year_of_birth: 1906,
+        profession: Some("computer scientist".to_string()),
+    },
+    "a3646e616d656c477261636520486f707065726d796561725f6f665f62697274681907726a70726f66657373696f6e72636f6d707574657220736369656e74697374");
+
+    #[derive(Debug, PartialEq, Deserialize, Serialize)]
+    struct OptionalPerson {
+        name: String,
+        #[serde(skip_serializing_if = "Option::is_none")]
+        year_of_birth: Option<u16>,
+        profession: Option<String>,
+    }
+
+    testcase!(test_optional_person_struct,
+    OptionalPerson,
+    OptionalPerson {
+        name: "Grace Hopper".to_string(),
+        year_of_birth: None,
+        profession: Some("computer scientist".to_string()),
+    },
+    "a2646e616d656c477261636520486f707065726a70726f66657373696f6e72636f6d707574657220736369656e74697374");
+
+    #[derive(Debug, PartialEq, Deserialize, Serialize)]
+    enum Color {
+        Red,
+        Blue,
+        Yellow,
+        Other(u64),
+        Alpha(u64, u8),
+    }
+
+    testcase!(test_color_enum, Color, Color::Blue, "64426c7565");
+    testcase!(
+        test_color_enum_transparent,
+        Color,
+        Color::Other(42),
+        "a1654f74686572182a"
+    );
+    testcase!(
+        test_color_enum_with_alpha,
+        Color,
+        Color::Alpha(234567, 60),
+        "a165416c706861821a00039447183c"
+    );
+    testcase!(test_i128_a, i128, -1i128, "20");
+    testcase!(
+        test_i128_b,
+        i128,
+        -18446744073709551616i128,
+        "3BFFFFFFFFFFFFFFFF"
+    );
+    testcase!(test_u128, u128, 17, "11");
+}
diff --git a/crates/serde_cbor/tests/tags.rs b/crates/serde_cbor/tests/tags.rs
new file mode 100644
index 0000000..92534c0
--- /dev/null
+++ b/crates/serde_cbor/tests/tags.rs
@@ -0,0 +1,48 @@
+#[cfg(feature = "tags")]
+mod tagtests {
+    use serde_cbor::value::Value;
+    use serde_cbor::{from_slice, to_vec};
+
+    fn decode_hex(s: &str) -> std::result::Result<Vec<u8>, std::num::ParseIntError> {
+        (0..s.len())
+            .step_by(2)
+            .map(|i| u8::from_str_radix(&s[i..i + 2], 16))
+            .collect()
+    }
+
+    // get bytes from http://cbor.me/ trees
+    fn parse_cbor_me(example: &str) -> std::result::Result<Vec<u8>, std::num::ParseIntError> {
+        let hex = example
+            .split("\n")
+            .flat_map(|line| line.split("#").take(1))
+            .collect::<Vec<&str>>()
+            .join("")
+            .replace(" ", "");
+        decode_hex(&hex)
+    }
+
+    #[test]
+    fn tagged_cbor_roundtrip() {
+        let data = r#"
+C1                   # tag(1)
+   82                # array(2)
+      C2             # tag(2)
+         63          # text(3)
+            666F6F   # "foo"
+      C3             # tag(3)
+         A1          # map(1)
+            C4       # tag(4)
+               61    # text(1)
+                  61 # "a"
+            C5       # tag(5)
+               61    # text(1)
+                  62 # "b"
+            "#;
+        let bytes1 = parse_cbor_me(&data).unwrap();
+        let value1: Value = from_slice(&bytes1).unwrap();
+        let bytes2 = to_vec(&value1).unwrap();
+        let value2: Value = from_slice(&bytes2).unwrap();
+        assert_eq!(bytes1, bytes2);
+        assert_eq!(value1, value2);
+    }
+}
diff --git a/crates/serde_cbor/tests/value.rs b/crates/serde_cbor/tests/value.rs
new file mode 100644
index 0000000..554d742
--- /dev/null
+++ b/crates/serde_cbor/tests/value.rs
@@ -0,0 +1,100 @@
+#[macro_use]
+extern crate serde_derive;
+
+#[cfg(feature = "std")]
+mod std_tests {
+    use serde_cbor;
+
+    use std::collections::BTreeMap;
+
+    #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
+    struct TupleStruct(String, i32, u64);
+
+    #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
+    struct UnitStruct;
+
+    #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
+    struct Struct<'a> {
+        tuple_struct: TupleStruct,
+        tuple: (String, f32, f64),
+        map: BTreeMap<String, String>,
+        bytes: &'a [u8],
+        array: Vec<String>,
+        unit_array: Vec<UnitStruct>,
+    }
+
+    use serde_cbor::value::Value;
+    use std::iter::FromIterator;
+
+    #[test]
+    fn serde() {
+        let tuple_struct = TupleStruct(format!("test"), -60, 3000);
+
+        let tuple = (format!("hello"), -50.0040957, -12.094635556478);
+
+        let map = BTreeMap::from_iter(
+            [
+                (format!("key1"), format!("value1")),
+                (format!("key2"), format!("value2")),
+                (format!("key3"), format!("value3")),
+                (format!("key4"), format!("value4")),
+            ]
+            .iter()
+            .cloned(),
+        );
+
+        let bytes = b"test byte string";
+
+        let array = vec![format!("one"), format!("two"), format!("three")];
+        let unit_array = vec![UnitStruct, UnitStruct, UnitStruct];
+
+        let data = Struct {
+            tuple_struct,
+            tuple,
+            map,
+            bytes,
+            array,
+            unit_array,
+        };
+
+        let value = serde_cbor::value::to_value(data.clone()).unwrap();
+        println!("{:?}", value);
+
+        let data_ser = serde_cbor::to_vec(&value).unwrap();
+        let data_de_value: Value = serde_cbor::from_slice(&data_ser).unwrap();
+
+        fn as_object(value: &Value) -> &BTreeMap<Value, Value> {
+            if let Value::Map(ref v) = value {
+                return v;
+            }
+            panic!()
+        }
+
+        for ((k1, v1), (k2, v2)) in as_object(&value)
+            .iter()
+            .zip(as_object(&data_de_value).iter())
+        {
+            assert_eq!(k1, k2);
+            assert_eq!(v1, v2);
+        }
+
+        assert_eq!(value, data_de_value);
+    }
+
+    #[derive(Debug, Deserialize, Serialize)]
+    struct SmallStruct {
+        spam: u32,
+        eggs: u32,
+    }
+
+    #[test]
+    fn small_struct() {
+        // Test whether the packed format works.
+        // Field names should not be serialized,
+        // instead field indizes are serialized.
+        let value = SmallStruct { spam: 17, eggs: 42 };
+        let data = serde_cbor::ser::to_vec_packed(&value).unwrap();
+        let reference = b"\xa2\x00\x11\x01\x18\x2a";
+        assert_eq!(data, reference);
+    }
+}
diff --git a/crates/serde_derive/.cargo-checksum.json b/crates/serde_derive/.cargo-checksum.json
new file mode 100644
index 0000000..ea36a4a
--- /dev/null
+++ b/crates/serde_derive/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.toml":"938a2b8aae0b6d239b39f3195dd8fbd895ae0b3f7285456f011a3e6119aad187","LICENSE-APACHE":"62c7a1e35f56406896d7aa7ca52d0cc0d272ac022b5d2796e7d6905db8a3636a","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"13c66875efb67f64fdec817725f34ceb07913e1ebea4adc240868d2ed581d3da","build.rs":"d9a0df0c4dd552ff7fd0c3b3828cb1fff4fc4ab15bd98539881929b76b98003b","crates-io.md":"ee22254ee64c3189eef3e707c8d75dc66a8df2a7ee9e518d95238950780ec387","src/bound.rs":"9211d852730380be8e0af9ed5daa52e61563e598eef458739025551ba76aa7c6","src/de.rs":"c6793f652a1f8161f802c60f757e5c97fb0fdd772fb2b29d471e7bff577b592b","src/dummy.rs":"31e02c3313d12c88b3b26baa49a9cb143440804b557b5255aad9a9b80b8ea1c6","src/fragment.rs":"5548ba65a53d90a296f60c1328a7a7fb040db467f59c2f5210b2fb320457145d","src/internals/ast.rs":"b019865eef92c1ddbb9029423ac22179f132dc655a51c09fb2a42f4aaef172fd","src/internals/attr.rs":"e52f3ddc809ed47cba7857f163de489836584cafce8285d08161a79831180c2b","src/internals/case.rs":"9492f0c5142d7b7e8cd39c86d13a855e5ce4489425adb2b96aed89e1b7851ac0","src/internals/check.rs":"0449cc7653fc9e596f65028835bbb7d1545c10002c79c7608547f45a722c0040","src/internals/ctxt.rs":"6fa544ae52914498a62a395818ebdc1b36ac2fb5903c60afb741a864ad559f1c","src/internals/mod.rs":"f32138ff19d57eb00f88ba11f6b015efab2102657804f71ebbf386a3698dad91","src/internals/receiver.rs":"6b016351b8294539039095863d8c99e81dd4530d7f769003d12d4ca73cca172c","src/internals/respan.rs":"899753859c58ce5f532a3ec4584796a52f13ed5a0533191e48c953ba5c1b52ff","src/internals/symbol.rs":"2bf0287da64d28da7e8673af60f66aaf6b29efe33131e56b24d6fa55edb533ad","src/lib.rs":"021292277a8692099e1a8fb34f6bade6ce13fc4107b4498747cd2340b5fb9081","src/pretend.rs":"4aa53bf6c1350fbcfc8c4997f720cde61a8eb3aab73bb8c101b0f0a74901892b","src/ser.rs":"8f9ffe1d8bcd28bd40e8d94d688547fa1d518cc722d0292f47d951152c406dd9","src/this.rs":"a2c128955324c2994ed7cdc3fe4eeceb7ad8a0f9d071665a8378c85c8df64ce2","src/try.rs":"b171b0088c23ebf4bfa07ba457881b41ac5e547d55dd16f737ea988d34badf61"},"package":"e801c1712f48475582b7696ac71e0ca34ebb30e09338425384269d9717c62cad"}
\ No newline at end of file
diff --git a/crates/serde_derive/Android.bp b/crates/serde_derive/Android.bp
new file mode 100644
index 0000000..30a9b47
--- /dev/null
+++ b/crates/serde_derive/Android.bp
@@ -0,0 +1,52 @@
+// This file is generated by cargo_embargo.
+// Do not modify this file because the changes will be overridden on upgrade.
+
+package {
+    default_applicable_licenses: ["external_rust_crates_serde_derive_license"],
+    default_team: "trendy_team_android_rust",
+}
+
+license {
+    name: "external_rust_crates_serde_derive_license",
+    visibility: [":__subpackages__"],
+    license_kinds: ["SPDX-license-identifier-Apache-2.0"],
+    license_text: ["LICENSE"],
+}
+
+rust_proc_macro {
+    name: "libserde_derive",
+    crate_name: "serde_derive",
+    cargo_env_compat: true,
+    cargo_pkg_version: "1.0.158",
+    crate_root: "src/lib.rs",
+    edition: "2015",
+    features: ["default"],
+    rustlibs: [
+        "libproc_macro2",
+        "libquote",
+        "libsyn",
+    ],
+    compile_multilib: "first",
+}
+
+rust_test_host {
+    name: "serde_derive_test_src_lib",
+    host_cross_supported: false,
+    crate_name: "serde_derive",
+    cargo_env_compat: true,
+    cargo_pkg_version: "1.0.158",
+    crate_root: "src/lib.rs",
+    test_suites: ["general-tests"],
+    auto_gen_config: true,
+    test_options: {
+        unit_test: true,
+    },
+    edition: "2015",
+    features: ["default"],
+    rustlibs: [
+        "libproc_macro2",
+        "libquote",
+        "libserde",
+        "libsyn",
+    ],
+}
diff --git a/crates/serde_derive/Cargo.lock b/crates/serde_derive/Cargo.lock
new file mode 100644
index 0000000..b77649a
--- /dev/null
+++ b/crates/serde_derive/Cargo.lock
@@ -0,0 +1,68 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.86"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77"
+dependencies = [
+ "unicode-ident",
+]
+
+[[package]]
+name = "quote"
+version = "1.0.37"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af"
+dependencies = [
+ "proc-macro2",
+]
+
+[[package]]
+name = "serde"
+version = "1.0.209"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "99fce0ffe7310761ca6bf9faf5115afbc19688edd00171d81b1bb1b116c63e09"
+dependencies = [
+ "serde_derive 1.0.209",
+]
+
+[[package]]
+name = "serde_derive"
+version = "1.0.158"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "serde",
+ "syn",
+]
+
+[[package]]
+name = "serde_derive"
+version = "1.0.209"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a5831b979fd7b5439637af1752d535ff49f4860c0f341d1baeb6faf0f4242170"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "syn"
+version = "2.0.76"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "578e081a14e0cefc3279b0472138c513f37b41a08d5a3cca9b6e4e8ceb6cd525"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
+]
+
+[[package]]
+name = "unicode-ident"
+version = "1.0.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
diff --git a/crates/serde_derive/Cargo.toml b/crates/serde_derive/Cargo.toml
new file mode 100644
index 0000000..2b9e4df
--- /dev/null
+++ b/crates/serde_derive/Cargo.toml
@@ -0,0 +1,63 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+rust-version = "1.56"
+name = "serde_derive"
+version = "1.0.158"
+authors = [
+    "Erick Tryzelaar <erick.tryzelaar@gmail.com>",
+    "David Tolnay <dtolnay@gmail.com>",
+]
+include = [
+    "build.rs",
+    "src/**/*.rs",
+    "crates-io.md",
+    "README.md",
+    "LICENSE-APACHE",
+    "LICENSE-MIT",
+]
+description = "Macros 1.1 implementation of #[derive(Serialize, Deserialize)]"
+homepage = "https://serde.rs"
+documentation = "https://serde.rs/derive.html"
+readme = "crates-io.md"
+keywords = [
+    "serde",
+    "serialization",
+    "no_std",
+    "derive",
+]
+categories = ["no-std"]
+license = "MIT OR Apache-2.0"
+repository = "https://github.com/serde-rs/serde"
+
+[package.metadata.docs.rs]
+targets = ["x86_64-unknown-linux-gnu"]
+
+[lib]
+name = "serde_derive"
+proc-macro = true
+
+[dependencies.proc-macro2]
+version = "1.0"
+
+[dependencies.quote]
+version = "1.0"
+
+[dependencies.syn]
+version = "2.0.3"
+
+[dev-dependencies.serde]
+version = "1.0"
+
+[features]
+default = []
+deserialize_in_place = []
diff --git a/crates/serde_derive/LICENSE b/crates/serde_derive/LICENSE
new file mode 120000
index 0000000..6b579aa
--- /dev/null
+++ b/crates/serde_derive/LICENSE
@@ -0,0 +1 @@
+LICENSE-APACHE
\ No newline at end of file
diff --git a/crates/serde_derive/LICENSE-APACHE b/crates/serde_derive/LICENSE-APACHE
new file mode 100644
index 0000000..1b5ec8b
--- /dev/null
+++ b/crates/serde_derive/LICENSE-APACHE
@@ -0,0 +1,176 @@
+                              Apache License
+                        Version 2.0, January 2004
+                     http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+   "License" shall mean the terms and conditions for use, reproduction,
+   and distribution as defined by Sections 1 through 9 of this document.
+
+   "Licensor" shall mean the copyright owner or entity authorized by
+   the copyright owner that is granting the License.
+
+   "Legal Entity" shall mean the union of the acting entity and all
+   other entities that control, are controlled by, or are under common
+   control with that entity. For the purposes of this definition,
+   "control" means (i) the power, direct or indirect, to cause the
+   direction or management of such entity, whether by contract or
+   otherwise, or (ii) ownership of fifty percent (50%) or more of the
+   outstanding shares, or (iii) beneficial ownership of such entity.
+
+   "You" (or "Your") shall mean an individual or Legal Entity
+   exercising permissions granted by this License.
+
+   "Source" form shall mean the preferred form for making modifications,
+   including but not limited to software source code, documentation
+   source, and configuration files.
+
+   "Object" form shall mean any form resulting from mechanical
+   transformation or translation of a Source form, including but
+   not limited to compiled object code, generated documentation,
+   and conversions to other media types.
+
+   "Work" shall mean the work of authorship, whether in Source or
+   Object form, made available under the License, as indicated by a
+   copyright notice that is included in or attached to the work
+   (an example is provided in the Appendix below).
+
+   "Derivative Works" shall mean any work, whether in Source or Object
+   form, that is based on (or derived from) the Work and for which the
+   editorial revisions, annotations, elaborations, or other modifications
+   represent, as a whole, an original work of authorship. For the purposes
+   of this License, Derivative Works shall not include works that remain
+   separable from, or merely link (or bind by name) to the interfaces of,
+   the Work and Derivative Works thereof.
+
+   "Contribution" shall mean any work of authorship, including
+   the original version of the Work and any modifications or additions
+   to that Work or Derivative Works thereof, that is intentionally
+   submitted to Licensor for inclusion in the Work by the copyright owner
+   or by an individual or Legal Entity authorized to submit on behalf of
+   the copyright owner. For the purposes of this definition, "submitted"
+   means any form of electronic, verbal, or written communication sent
+   to the Licensor or its representatives, including but not limited to
+   communication on electronic mailing lists, source code control systems,
+   and issue tracking systems that are managed by, or on behalf of, the
+   Licensor for the purpose of discussing and improving the Work, but
+   excluding communication that is conspicuously marked or otherwise
+   designated in writing by the copyright owner as "Not a Contribution."
+
+   "Contributor" shall mean Licensor and any individual or Legal Entity
+   on behalf of whom a Contribution has been received by Licensor and
+   subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+   this License, each Contributor hereby grants to You a perpetual,
+   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+   copyright license to reproduce, prepare Derivative Works of,
+   publicly display, publicly perform, sublicense, and distribute the
+   Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+   this License, each Contributor hereby grants to You a perpetual,
+   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+   (except as stated in this section) patent license to make, have made,
+   use, offer to sell, sell, import, and otherwise transfer the Work,
+   where such license applies only to those patent claims licensable
+   by such Contributor that are necessarily infringed by their
+   Contribution(s) alone or by combination of their Contribution(s)
+   with the Work to which such Contribution(s) was submitted. If You
+   institute patent litigation against any entity (including a
+   cross-claim or counterclaim in a lawsuit) alleging that the Work
+   or a Contribution incorporated within the Work constitutes direct
+   or contributory patent infringement, then any patent licenses
+   granted to You under this License for that Work shall terminate
+   as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+   Work or Derivative Works thereof in any medium, with or without
+   modifications, and in Source or Object form, provided that You
+   meet the following conditions:
+
+   (a) You must give any other recipients of the Work or
+       Derivative Works a copy of this License; and
+
+   (b) You must cause any modified files to carry prominent notices
+       stating that You changed the files; and
+
+   (c) You must retain, in the Source form of any Derivative Works
+       that You distribute, all copyright, patent, trademark, and
+       attribution notices from the Source form of the Work,
+       excluding those notices that do not pertain to any part of
+       the Derivative Works; and
+
+   (d) If the Work includes a "NOTICE" text file as part of its
+       distribution, then any Derivative Works that You distribute must
+       include a readable copy of the attribution notices contained
+       within such NOTICE file, excluding those notices that do not
+       pertain to any part of the Derivative Works, in at least one
+       of the following places: within a NOTICE text file distributed
+       as part of the Derivative Works; within the Source form or
+       documentation, if provided along with the Derivative Works; or,
+       within a display generated by the Derivative Works, if and
+       wherever such third-party notices normally appear. The contents
+       of the NOTICE file are for informational purposes only and
+       do not modify the License. You may add Your own attribution
+       notices within Derivative Works that You distribute, alongside
+       or as an addendum to the NOTICE text from the Work, provided
+       that such additional attribution notices cannot be construed
+       as modifying the License.
+
+   You may add Your own copyright statement to Your modifications and
+   may provide additional or different license terms and conditions
+   for use, reproduction, or distribution of Your modifications, or
+   for any such Derivative Works as a whole, provided Your use,
+   reproduction, and distribution of the Work otherwise complies with
+   the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+   any Contribution intentionally submitted for inclusion in the Work
+   by You to the Licensor shall be under the terms and conditions of
+   this License, without any additional terms or conditions.
+   Notwithstanding the above, nothing herein shall supersede or modify
+   the terms of any separate license agreement you may have executed
+   with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+   names, trademarks, service marks, or product names of the Licensor,
+   except as required for reasonable and customary use in describing the
+   origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+   agreed to in writing, Licensor provides the Work (and each
+   Contributor provides its Contributions) on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+   implied, including, without limitation, any warranties or conditions
+   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+   PARTICULAR PURPOSE. You are solely responsible for determining the
+   appropriateness of using or redistributing the Work and assume any
+   risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+   whether in tort (including negligence), contract, or otherwise,
+   unless required by applicable law (such as deliberate and grossly
+   negligent acts) or agreed to in writing, shall any Contributor be
+   liable to You for damages, including any direct, indirect, special,
+   incidental, or consequential damages of any character arising as a
+   result of this License or out of the use or inability to use the
+   Work (including but not limited to damages for loss of goodwill,
+   work stoppage, computer failure or malfunction, or any and all
+   other commercial damages or losses), even if such Contributor
+   has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+   the Work or Derivative Works thereof, You may choose to offer,
+   and charge a fee for, acceptance of support, warranty, indemnity,
+   or other liability obligations and/or rights consistent with this
+   License. However, in accepting such obligations, You may act only
+   on Your own behalf and on Your sole responsibility, not on behalf
+   of any other Contributor, and only if You agree to indemnify,
+   defend, and hold each Contributor harmless for any liability
+   incurred by, or claims asserted against, such Contributor by reason
+   of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
diff --git a/crates/serde_derive/LICENSE-MIT b/crates/serde_derive/LICENSE-MIT
new file mode 100644
index 0000000..31aa793
--- /dev/null
+++ b/crates/serde_derive/LICENSE-MIT
@@ -0,0 +1,23 @@
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/crates/serde_derive/METADATA b/crates/serde_derive/METADATA
new file mode 100644
index 0000000..cc6c728
--- /dev/null
+++ b/crates/serde_derive/METADATA
@@ -0,0 +1,23 @@
+# This project was upgraded with external_updater.
+# Usage: tools/external_updater/updater.sh update rust/crates/serde_derive
+# For more info, check https://cs.android.com/android/platform/superproject/+/master:tools/external_updater/README.md
+
+name: "serde_derive"
+description: "Macros 1.1 implementation of #[derive(Serialize, Deserialize)]"
+third_party {
+  url {
+    type: HOMEPAGE
+    value: "https://crates.io/crates/serde_derive"
+  }
+  url {
+    type: ARCHIVE
+    value: "https://static.crates.io/crates/serde_derive/serde_derive-1.0.158.crate"
+  }
+  version: "1.0.158"
+  license_type: NOTICE
+  last_upgrade_date {
+    year: 2023
+    month: 3
+    day: 20
+  }
+}
diff --git a/crates/serde_derive/MODULE_LICENSE_APACHE2 b/crates/serde_derive/MODULE_LICENSE_APACHE2
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/crates/serde_derive/MODULE_LICENSE_APACHE2
diff --git a/crates/serde_derive/README.md b/crates/serde_derive/README.md
new file mode 100644
index 0000000..d53e572
--- /dev/null
+++ b/crates/serde_derive/README.md
@@ -0,0 +1,114 @@
+# Serde &emsp; [![Build Status]][actions] [![Latest Version]][crates.io] [![serde: rustc 1.19+]][Rust 1.19] [![serde_derive: rustc 1.56+]][Rust 1.56]
+
+[Build Status]: https://img.shields.io/github/actions/workflow/status/serde-rs/serde/ci.yml?branch=master
+[actions]: https://github.com/serde-rs/serde/actions?query=branch%3Amaster
+[Latest Version]: https://img.shields.io/crates/v/serde.svg
+[crates.io]: https://crates.io/crates/serde
+[serde: rustc 1.19+]: https://img.shields.io/badge/serde-rustc_1.19+-lightgray.svg
+[serde_derive: rustc 1.56+]: https://img.shields.io/badge/serde_derive-rustc_1.56+-lightgray.svg
+[Rust 1.19]: https://blog.rust-lang.org/2017/07/20/Rust-1.19.html
+[Rust 1.56]: https://blog.rust-lang.org/2021/10/21/Rust-1.56.0.html
+
+**Serde is a framework for *ser*ializing and *de*serializing Rust data structures efficiently and generically.**
+
+---
+
+You may be looking for:
+
+- [An overview of Serde](https://serde.rs/)
+- [Data formats supported by Serde](https://serde.rs/#data-formats)
+- [Setting up `#[derive(Serialize, Deserialize)]`](https://serde.rs/derive.html)
+- [Examples](https://serde.rs/examples.html)
+- [API documentation](https://docs.rs/serde)
+- [Release notes](https://github.com/serde-rs/serde/releases)
+
+## Serde in action
+
+<details>
+<summary>
+Click to show Cargo.toml.
+<a href="https://play.rust-lang.org/?edition=2018&gist=72755f28f99afc95e01d63174b28c1f5" target="_blank">Run this code in the playground.</a>
+</summary>
+
+```toml
+[dependencies]
+
+# The core APIs, including the Serialize and Deserialize traits. Always
+# required when using Serde. The "derive" feature is only required when
+# using #[derive(Serialize, Deserialize)] to make Serde work with structs
+# and enums defined in your crate.
+serde = { version = "1.0", features = ["derive"] }
+
+# Each data format lives in its own crate; the sample code below uses JSON
+# but you may be using a different one.
+serde_json = "1.0"
+```
+
+</details>
+<p></p>
+
+```rust
+use serde::{Serialize, Deserialize};
+
+#[derive(Serialize, Deserialize, Debug)]
+struct Point {
+    x: i32,
+    y: i32,
+}
+
+fn main() {
+    let point = Point { x: 1, y: 2 };
+
+    // Convert the Point to a JSON string.
+    let serialized = serde_json::to_string(&point).unwrap();
+
+    // Prints serialized = {"x":1,"y":2}
+    println!("serialized = {}", serialized);
+
+    // Convert the JSON string back to a Point.
+    let deserialized: Point = serde_json::from_str(&serialized).unwrap();
+
+    // Prints deserialized = Point { x: 1, y: 2 }
+    println!("deserialized = {:?}", deserialized);
+}
+```
+
+## Getting help
+
+Serde is one of the most widely used Rust libraries so any place that Rustaceans
+congregate will be able to help you out. For chat, consider trying the
+[#rust-questions] or [#rust-beginners] channels of the unofficial community
+Discord (invite: <https://discord.gg/rust-lang-community>), the [#rust-usage] or
+[#beginners] channels of the official Rust Project Discord (invite:
+<https://discord.gg/rust-lang>), or the [#general][zulip] stream in Zulip. For
+asynchronous, consider the [\[rust\] tag on StackOverflow][stackoverflow], the
+[/r/rust] subreddit which has a pinned weekly easy questions post, or the Rust
+[Discourse forum][discourse]. It's acceptable to file a support issue in this
+repo but they tend not to get as many eyes as any of the above and may get
+closed without a response after some time.
+
+[#rust-questions]: https://discord.com/channels/273534239310479360/274215136414400513
+[#rust-beginners]: https://discord.com/channels/273534239310479360/273541522815713281
+[#rust-usage]: https://discord.com/channels/442252698964721669/443150878111694848
+[#beginners]: https://discord.com/channels/442252698964721669/448238009733742612
+[zulip]: https://rust-lang.zulipchat.com/#narrow/stream/122651-general
+[stackoverflow]: https://stackoverflow.com/questions/tagged/rust
+[/r/rust]: https://www.reddit.com/r/rust
+[discourse]: https://users.rust-lang.org
+
+<br>
+
+#### License
+
+<sup>
+Licensed under either of <a href="LICENSE-APACHE">Apache License, Version
+2.0</a> or <a href="LICENSE-MIT">MIT license</a> at your option.
+</sup>
+
+<br>
+
+<sub>
+Unless you explicitly state otherwise, any contribution intentionally submitted
+for inclusion in Serde by you, as defined in the Apache-2.0 license, shall be
+dual licensed as above, without any additional terms or conditions.
+</sub>
diff --git a/crates/serde_derive/TEST_MAPPING b/crates/serde_derive/TEST_MAPPING
new file mode 100644
index 0000000..b230096
--- /dev/null
+++ b/crates/serde_derive/TEST_MAPPING
@@ -0,0 +1,101 @@
+// Generated by update_crate_tests.py for tests that depend on this crate.
+{
+  "imports": [
+    {
+      "path": "external/rust/crates/base64"
+    },
+    {
+      "path": "external/rust/crates/bitflags"
+    },
+    {
+      "path": "external/rust/crates/bytes"
+    },
+    {
+      "path": "external/rust/crates/coset"
+    },
+    {
+      "path": "external/rust/crates/either"
+    },
+    {
+      "path": "external/rust/crates/hashbrown"
+    },
+    {
+      "path": "external/rust/crates/hashlink"
+    },
+    {
+      "path": "external/rust/crates/rand_chacha"
+    },
+    {
+      "path": "external/rust/crates/serde"
+    },
+    {
+      "path": "external/rust/crates/serde-xml-rs"
+    },
+    {
+      "path": "external/rust/crates/serde_cbor"
+    },
+    {
+      "path": "external/rust/crates/slab"
+    },
+    {
+      "path": "external/rust/crates/tinytemplate"
+    },
+    {
+      "path": "external/rust/crates/tinyvec"
+    },
+    {
+      "path": "external/rust/crates/unicode-bidi"
+    },
+    {
+      "path": "external/rust/crates/unicode-xid"
+    },
+    {
+      "path": "external/rust/crates/url"
+    },
+    {
+      "path": "packages/modules/Virtualization/apkdmverity"
+    },
+    {
+      "path": "packages/modules/Virtualization/authfs"
+    },
+    {
+      "path": "packages/modules/Virtualization/avmd"
+    },
+    {
+      "path": "packages/modules/Virtualization/encryptedstore"
+    },
+    {
+      "path": "packages/modules/Virtualization/libs/apkverify"
+    },
+    {
+      "path": "packages/modules/Virtualization/libs/devicemapper"
+    },
+    {
+      "path": "packages/modules/Virtualization/microdroid_manager"
+    },
+    {
+      "path": "packages/modules/Virtualization/virtualizationmanager"
+    },
+    {
+      "path": "packages/modules/Virtualization/vm"
+    },
+    {
+      "path": "packages/modules/Virtualization/zipfuse"
+    },
+    {
+      "path": "system/keymint/derive"
+    },
+    {
+      "path": "system/keymint/hal"
+    },
+    {
+      "path": "system/security/diced"
+    },
+    {
+      "path": "system/security/keystore2"
+    },
+    {
+      "path": "system/security/keystore2/legacykeystore"
+    }
+  ]
+}
diff --git a/crates/serde_derive/build.rs b/crates/serde_derive/build.rs
new file mode 100644
index 0000000..1249dab
--- /dev/null
+++ b/crates/serde_derive/build.rs
@@ -0,0 +1,38 @@
+use std::env;
+use std::process::Command;
+use std::str;
+
+// The rustc-cfg strings below are *not* public API. Please let us know by
+// opening a GitHub issue if your build environment requires some way to enable
+// these cfgs other than by executing our build script.
+fn main() {
+    println!("cargo:rerun-if-changed=build.rs");
+
+    let minor = match rustc_minor_version() {
+        Some(minor) => minor,
+        None => return,
+    };
+
+    // Underscore const names stabilized in Rust 1.37:
+    // https://blog.rust-lang.org/2019/08/15/Rust-1.37.0.html#using-unnamed-const-items-for-macros
+    if minor < 37 {
+        println!("cargo:rustc-cfg=no_underscore_consts");
+    }
+
+    // The ptr::addr_of! macro stabilized in Rust 1.51:
+    // https://blog.rust-lang.org/2021/03/25/Rust-1.51.0.html#stabilized-apis
+    if minor < 51 {
+        println!("cargo:rustc-cfg=no_ptr_addr_of");
+    }
+}
+
+fn rustc_minor_version() -> Option<u32> {
+    let rustc = env::var_os("RUSTC")?;
+    let output = Command::new(rustc).arg("--version").output().ok()?;
+    let version = str::from_utf8(&output.stdout).ok()?;
+    let mut pieces = version.split('.');
+    if pieces.next() != Some("rustc 1") {
+        return None;
+    }
+    pieces.next()?.parse().ok()
+}
diff --git a/crates/serde_derive/cargo_embargo.json b/crates/serde_derive/cargo_embargo.json
new file mode 100644
index 0000000..f428a9e
--- /dev/null
+++ b/crates/serde_derive/cargo_embargo.json
@@ -0,0 +1,10 @@
+{
+  "package": {
+    "serde_derive": {
+      "device_supported": false,
+      "host_first_multilib": true
+    }
+  },
+  "run_cargo": false,
+  "tests": true
+}
diff --git a/crates/serde_derive/crates-io.md b/crates/serde_derive/crates-io.md
new file mode 100644
index 0000000..6e0ec28
--- /dev/null
+++ b/crates/serde_derive/crates-io.md
@@ -0,0 +1,65 @@
+<!-- Serde readme rendered on crates.io -->
+
+**Serde is a framework for *ser*ializing and *de*serializing Rust data structures efficiently and generically.**
+
+---
+
+You may be looking for:
+
+- [An overview of Serde](https://serde.rs/)
+- [Data formats supported by Serde](https://serde.rs/#data-formats)
+- [Setting up `#[derive(Serialize, Deserialize)]`](https://serde.rs/derive.html)
+- [Examples](https://serde.rs/examples.html)
+- [API documentation](https://docs.rs/serde)
+- [Release notes](https://github.com/serde-rs/serde/releases)
+
+## Serde in action
+
+```rust
+use serde::{Serialize, Deserialize};
+
+#[derive(Serialize, Deserialize, Debug)]
+struct Point {
+    x: i32,
+    y: i32,
+}
+
+fn main() {
+    let point = Point { x: 1, y: 2 };
+
+    // Convert the Point to a JSON string.
+    let serialized = serde_json::to_string(&point).unwrap();
+
+    // Prints serialized = {"x":1,"y":2}
+    println!("serialized = {}", serialized);
+
+    // Convert the JSON string back to a Point.
+    let deserialized: Point = serde_json::from_str(&serialized).unwrap();
+
+    // Prints deserialized = Point { x: 1, y: 2 }
+    println!("deserialized = {:?}", deserialized);
+}
+```
+
+## Getting help
+
+Serde is one of the most widely used Rust libraries so any place that Rustaceans
+congregate will be able to help you out. For chat, consider trying the
+[#rust-questions] or [#rust-beginners] channels of the unofficial community
+Discord (invite: <https://discord.gg/rust-lang-community>, the [#rust-usage] or
+[#beginners] channels of the official Rust Project Discord (invite:
+<https://discord.gg/rust-lang>), or the [#general][zulip] stream in Zulip. For
+asynchronous, consider the [\[rust\] tag on StackOverflow][stackoverflow], the
+[/r/rust] subreddit which has a pinned weekly easy questions post, or the Rust
+[Discourse forum][discourse]. It's acceptable to file a support issue in this
+repo but they tend not to get as many eyes as any of the above and may get
+closed without a response after some time.
+
+[#rust-questions]: https://discord.com/channels/273534239310479360/274215136414400513
+[#rust-beginners]: https://discord.com/channels/273534239310479360/273541522815713281
+[#rust-usage]: https://discord.com/channels/442252698964721669/443150878111694848
+[#beginners]: https://discord.com/channels/442252698964721669/448238009733742612
+[zulip]: https://rust-lang.zulipchat.com/#narrow/stream/122651-general
+[stackoverflow]: https://stackoverflow.com/questions/tagged/rust
+[/r/rust]: https://www.reddit.com/r/rust
+[discourse]: https://users.rust-lang.org
diff --git a/crates/serde_derive/src/bound.rs b/crates/serde_derive/src/bound.rs
new file mode 100644
index 0000000..7bdb046
--- /dev/null
+++ b/crates/serde_derive/src/bound.rs
@@ -0,0 +1,414 @@
+use std::collections::HashSet;
+
+use syn;
+use syn::punctuated::{Pair, Punctuated};
+
+use internals::ast::{Container, Data};
+use internals::{attr, ungroup};
+
+use proc_macro2::Span;
+
+// Remove the default from every type parameter because in the generated impls
+// they look like associated types: "error: associated type bindings are not
+// allowed here".
+pub fn without_defaults(generics: &syn::Generics) -> syn::Generics {
+    syn::Generics {
+        params: generics
+            .params
+            .iter()
+            .map(|param| match param {
+                syn::GenericParam::Type(param) => syn::GenericParam::Type(syn::TypeParam {
+                    eq_token: None,
+                    default: None,
+                    ..param.clone()
+                }),
+                _ => param.clone(),
+            })
+            .collect(),
+        ..generics.clone()
+    }
+}
+
+pub fn with_where_predicates(
+    generics: &syn::Generics,
+    predicates: &[syn::WherePredicate],
+) -> syn::Generics {
+    let mut generics = generics.clone();
+    generics
+        .make_where_clause()
+        .predicates
+        .extend(predicates.iter().cloned());
+    generics
+}
+
+pub fn with_where_predicates_from_fields(
+    cont: &Container,
+    generics: &syn::Generics,
+    from_field: fn(&attr::Field) -> Option<&[syn::WherePredicate]>,
+) -> syn::Generics {
+    let predicates = cont
+        .data
+        .all_fields()
+        .filter_map(|field| from_field(&field.attrs))
+        .flat_map(<[syn::WherePredicate]>::to_vec);
+
+    let mut generics = generics.clone();
+    generics.make_where_clause().predicates.extend(predicates);
+    generics
+}
+
+pub fn with_where_predicates_from_variants(
+    cont: &Container,
+    generics: &syn::Generics,
+    from_variant: fn(&attr::Variant) -> Option<&[syn::WherePredicate]>,
+) -> syn::Generics {
+    let variants = match &cont.data {
+        Data::Enum(variants) => variants,
+        Data::Struct(_, _) => {
+            return generics.clone();
+        }
+    };
+
+    let predicates = variants
+        .iter()
+        .filter_map(|variant| from_variant(&variant.attrs))
+        .flat_map(<[syn::WherePredicate]>::to_vec);
+
+    let mut generics = generics.clone();
+    generics.make_where_clause().predicates.extend(predicates);
+    generics
+}
+
+// Puts the given bound on any generic type parameters that are used in fields
+// for which filter returns true.
+//
+// For example, the following struct needs the bound `A: Serialize, B:
+// Serialize`.
+//
+//     struct S<'b, A, B: 'b, C> {
+//         a: A,
+//         b: Option<&'b B>
+//         #[serde(skip_serializing)]
+//         c: C,
+//     }
+pub fn with_bound(
+    cont: &Container,
+    generics: &syn::Generics,
+    filter: fn(&attr::Field, Option<&attr::Variant>) -> bool,
+    bound: &syn::Path,
+) -> syn::Generics {
+    struct FindTyParams<'ast> {
+        // Set of all generic type parameters on the current struct (A, B, C in
+        // the example). Initialized up front.
+        all_type_params: HashSet<syn::Ident>,
+
+        // Set of generic type parameters used in fields for which filter
+        // returns true (A and B in the example). Filled in as the visitor sees
+        // them.
+        relevant_type_params: HashSet<syn::Ident>,
+
+        // Fields whose type is an associated type of one of the generic type
+        // parameters.
+        associated_type_usage: Vec<&'ast syn::TypePath>,
+    }
+
+    impl<'ast> FindTyParams<'ast> {
+        fn visit_field(&mut self, field: &'ast syn::Field) {
+            if let syn::Type::Path(ty) = ungroup(&field.ty) {
+                if let Some(Pair::Punctuated(t, _)) = ty.path.segments.pairs().next() {
+                    if self.all_type_params.contains(&t.ident) {
+                        self.associated_type_usage.push(ty);
+                    }
+                }
+            }
+            self.visit_type(&field.ty);
+        }
+
+        fn visit_path(&mut self, path: &'ast syn::Path) {
+            if let Some(seg) = path.segments.last() {
+                if seg.ident == "PhantomData" {
+                    // Hardcoded exception, because PhantomData<T> implements
+                    // Serialize and Deserialize whether or not T implements it.
+                    return;
+                }
+            }
+            if path.leading_colon.is_none() && path.segments.len() == 1 {
+                let id = &path.segments[0].ident;
+                if self.all_type_params.contains(id) {
+                    self.relevant_type_params.insert(id.clone());
+                }
+            }
+            for segment in &path.segments {
+                self.visit_path_segment(segment);
+            }
+        }
+
+        // Everything below is simply traversing the syntax tree.
+
+        fn visit_type(&mut self, ty: &'ast syn::Type) {
+            match ty {
+                syn::Type::Array(ty) => self.visit_type(&ty.elem),
+                syn::Type::BareFn(ty) => {
+                    for arg in &ty.inputs {
+                        self.visit_type(&arg.ty);
+                    }
+                    self.visit_return_type(&ty.output);
+                }
+                syn::Type::Group(ty) => self.visit_type(&ty.elem),
+                syn::Type::ImplTrait(ty) => {
+                    for bound in &ty.bounds {
+                        self.visit_type_param_bound(bound);
+                    }
+                }
+                syn::Type::Macro(ty) => self.visit_macro(&ty.mac),
+                syn::Type::Paren(ty) => self.visit_type(&ty.elem),
+                syn::Type::Path(ty) => {
+                    if let Some(qself) = &ty.qself {
+                        self.visit_type(&qself.ty);
+                    }
+                    self.visit_path(&ty.path);
+                }
+                syn::Type::Ptr(ty) => self.visit_type(&ty.elem),
+                syn::Type::Reference(ty) => self.visit_type(&ty.elem),
+                syn::Type::Slice(ty) => self.visit_type(&ty.elem),
+                syn::Type::TraitObject(ty) => {
+                    for bound in &ty.bounds {
+                        self.visit_type_param_bound(bound);
+                    }
+                }
+                syn::Type::Tuple(ty) => {
+                    for elem in &ty.elems {
+                        self.visit_type(elem);
+                    }
+                }
+
+                syn::Type::Infer(_) | syn::Type::Never(_) | syn::Type::Verbatim(_) => {}
+
+                #[cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))]
+                _ => {}
+            }
+        }
+
+        fn visit_path_segment(&mut self, segment: &'ast syn::PathSegment) {
+            self.visit_path_arguments(&segment.arguments);
+        }
+
+        fn visit_path_arguments(&mut self, arguments: &'ast syn::PathArguments) {
+            match arguments {
+                syn::PathArguments::None => {}
+                syn::PathArguments::AngleBracketed(arguments) => {
+                    for arg in &arguments.args {
+                        match arg {
+                            syn::GenericArgument::Type(arg) => self.visit_type(arg),
+                            syn::GenericArgument::AssocType(arg) => self.visit_type(&arg.ty),
+                            syn::GenericArgument::Lifetime(_)
+                            | syn::GenericArgument::Const(_)
+                            | syn::GenericArgument::AssocConst(_)
+                            | syn::GenericArgument::Constraint(_) => {}
+                            #[cfg_attr(
+                                all(test, exhaustive),
+                                deny(non_exhaustive_omitted_patterns)
+                            )]
+                            _ => {}
+                        }
+                    }
+                }
+                syn::PathArguments::Parenthesized(arguments) => {
+                    for argument in &arguments.inputs {
+                        self.visit_type(argument);
+                    }
+                    self.visit_return_type(&arguments.output);
+                }
+            }
+        }
+
+        fn visit_return_type(&mut self, return_type: &'ast syn::ReturnType) {
+            match return_type {
+                syn::ReturnType::Default => {}
+                syn::ReturnType::Type(_, output) => self.visit_type(output),
+            }
+        }
+
+        fn visit_type_param_bound(&mut self, bound: &'ast syn::TypeParamBound) {
+            match bound {
+                syn::TypeParamBound::Trait(bound) => self.visit_path(&bound.path),
+                syn::TypeParamBound::Lifetime(_) | syn::TypeParamBound::Verbatim(_) => {}
+                #[cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))]
+                _ => {}
+            }
+        }
+
+        // Type parameter should not be considered used by a macro path.
+        //
+        //     struct TypeMacro<T> {
+        //         mac: T!(),
+        //         marker: PhantomData<T>,
+        //     }
+        fn visit_macro(&mut self, _mac: &'ast syn::Macro) {}
+    }
+
+    let all_type_params = generics
+        .type_params()
+        .map(|param| param.ident.clone())
+        .collect();
+
+    let mut visitor = FindTyParams {
+        all_type_params,
+        relevant_type_params: HashSet::new(),
+        associated_type_usage: Vec::new(),
+    };
+    match &cont.data {
+        Data::Enum(variants) => {
+            for variant in variants.iter() {
+                let relevant_fields = variant
+                    .fields
+                    .iter()
+                    .filter(|field| filter(&field.attrs, Some(&variant.attrs)));
+                for field in relevant_fields {
+                    visitor.visit_field(field.original);
+                }
+            }
+        }
+        Data::Struct(_, fields) => {
+            for field in fields.iter().filter(|field| filter(&field.attrs, None)) {
+                visitor.visit_field(field.original);
+            }
+        }
+    }
+
+    let relevant_type_params = visitor.relevant_type_params;
+    let associated_type_usage = visitor.associated_type_usage;
+    let new_predicates = generics
+        .type_params()
+        .map(|param| param.ident.clone())
+        .filter(|id| relevant_type_params.contains(id))
+        .map(|id| syn::TypePath {
+            qself: None,
+            path: id.into(),
+        })
+        .chain(associated_type_usage.into_iter().cloned())
+        .map(|bounded_ty| {
+            syn::WherePredicate::Type(syn::PredicateType {
+                lifetimes: None,
+                // the type parameter that is being bounded e.g. T
+                bounded_ty: syn::Type::Path(bounded_ty),
+                colon_token: <Token![:]>::default(),
+                // the bound e.g. Serialize
+                bounds: vec![syn::TypeParamBound::Trait(syn::TraitBound {
+                    paren_token: None,
+                    modifier: syn::TraitBoundModifier::None,
+                    lifetimes: None,
+                    path: bound.clone(),
+                })]
+                .into_iter()
+                .collect(),
+            })
+        });
+
+    let mut generics = generics.clone();
+    generics
+        .make_where_clause()
+        .predicates
+        .extend(new_predicates);
+    generics
+}
+
+pub fn with_self_bound(
+    cont: &Container,
+    generics: &syn::Generics,
+    bound: &syn::Path,
+) -> syn::Generics {
+    let mut generics = generics.clone();
+    generics
+        .make_where_clause()
+        .predicates
+        .push(syn::WherePredicate::Type(syn::PredicateType {
+            lifetimes: None,
+            // the type that is being bounded e.g. MyStruct<'a, T>
+            bounded_ty: type_of_item(cont),
+            colon_token: <Token![:]>::default(),
+            // the bound e.g. Default
+            bounds: vec![syn::TypeParamBound::Trait(syn::TraitBound {
+                paren_token: None,
+                modifier: syn::TraitBoundModifier::None,
+                lifetimes: None,
+                path: bound.clone(),
+            })]
+            .into_iter()
+            .collect(),
+        }));
+    generics
+}
+
+pub fn with_lifetime_bound(generics: &syn::Generics, lifetime: &str) -> syn::Generics {
+    let bound = syn::Lifetime::new(lifetime, Span::call_site());
+    let def = syn::LifetimeParam {
+        attrs: Vec::new(),
+        lifetime: bound.clone(),
+        colon_token: None,
+        bounds: Punctuated::new(),
+    };
+
+    let params = Some(syn::GenericParam::Lifetime(def))
+        .into_iter()
+        .chain(generics.params.iter().cloned().map(|mut param| {
+            match &mut param {
+                syn::GenericParam::Lifetime(param) => {
+                    param.bounds.push(bound.clone());
+                }
+                syn::GenericParam::Type(param) => {
+                    param
+                        .bounds
+                        .push(syn::TypeParamBound::Lifetime(bound.clone()));
+                }
+                syn::GenericParam::Const(_) => {}
+            }
+            param
+        }))
+        .collect();
+
+    syn::Generics {
+        params,
+        ..generics.clone()
+    }
+}
+
+fn type_of_item(cont: &Container) -> syn::Type {
+    syn::Type::Path(syn::TypePath {
+        qself: None,
+        path: syn::Path {
+            leading_colon: None,
+            segments: vec![syn::PathSegment {
+                ident: cont.ident.clone(),
+                arguments: syn::PathArguments::AngleBracketed(
+                    syn::AngleBracketedGenericArguments {
+                        colon2_token: None,
+                        lt_token: <Token![<]>::default(),
+                        args: cont
+                            .generics
+                            .params
+                            .iter()
+                            .map(|param| match param {
+                                syn::GenericParam::Type(param) => {
+                                    syn::GenericArgument::Type(syn::Type::Path(syn::TypePath {
+                                        qself: None,
+                                        path: param.ident.clone().into(),
+                                    }))
+                                }
+                                syn::GenericParam::Lifetime(param) => {
+                                    syn::GenericArgument::Lifetime(param.lifetime.clone())
+                                }
+                                syn::GenericParam::Const(_) => {
+                                    panic!("Serde does not support const generics yet");
+                                }
+                            })
+                            .collect(),
+                        gt_token: <Token![>]>::default(),
+                    },
+                ),
+            }]
+            .into_iter()
+            .collect(),
+        },
+    })
+}
diff --git a/crates/serde_derive/src/de.rs b/crates/serde_derive/src/de.rs
new file mode 100644
index 0000000..d4238f1
--- /dev/null
+++ b/crates/serde_derive/src/de.rs
@@ -0,0 +1,3146 @@
+use proc_macro2::{Literal, Span, TokenStream};
+use quote::ToTokens;
+use syn::punctuated::Punctuated;
+use syn::spanned::Spanned;
+use syn::{self, Ident, Index, Member};
+
+use bound;
+use dummy;
+use fragment::{Expr, Fragment, Match, Stmts};
+use internals::ast::{Container, Data, Field, Style, Variant};
+use internals::{attr, replace_receiver, ungroup, Ctxt, Derive};
+use pretend;
+use this;
+
+use std::collections::BTreeSet;
+use std::ptr;
+
+pub fn expand_derive_deserialize(
+    input: &mut syn::DeriveInput,
+) -> Result<TokenStream, Vec<syn::Error>> {
+    replace_receiver(input);
+
+    let ctxt = Ctxt::new();
+    let cont = match Container::from_ast(&ctxt, input, Derive::Deserialize) {
+        Some(cont) => cont,
+        None => return Err(ctxt.check().unwrap_err()),
+    };
+    precondition(&ctxt, &cont);
+    ctxt.check()?;
+
+    let ident = &cont.ident;
+    let params = Parameters::new(&cont);
+    let (de_impl_generics, _, ty_generics, where_clause) = split_with_de_lifetime(&params);
+    let body = Stmts(deserialize_body(&cont, &params));
+    let delife = params.borrowed.de_lifetime();
+    let serde = cont.attrs.serde_path();
+
+    let impl_block = if let Some(remote) = cont.attrs.remote() {
+        let vis = &input.vis;
+        let used = pretend::pretend_used(&cont, params.is_packed);
+        quote! {
+            impl #de_impl_generics #ident #ty_generics #where_clause {
+                #vis fn deserialize<__D>(__deserializer: __D) -> #serde::__private::Result<#remote #ty_generics, __D::Error>
+                where
+                    __D: #serde::Deserializer<#delife>,
+                {
+                    #used
+                    #body
+                }
+            }
+        }
+    } else {
+        let fn_deserialize_in_place = deserialize_in_place_body(&cont, &params);
+
+        quote! {
+            #[automatically_derived]
+            impl #de_impl_generics #serde::Deserialize<#delife> for #ident #ty_generics #where_clause {
+                fn deserialize<__D>(__deserializer: __D) -> #serde::__private::Result<Self, __D::Error>
+                where
+                    __D: #serde::Deserializer<#delife>,
+                {
+                    #body
+                }
+
+                #fn_deserialize_in_place
+            }
+        }
+    };
+
+    Ok(dummy::wrap_in_const(
+        cont.attrs.custom_serde_path(),
+        "DESERIALIZE",
+        ident,
+        impl_block,
+    ))
+}
+
+fn precondition(cx: &Ctxt, cont: &Container) {
+    precondition_sized(cx, cont);
+    precondition_no_de_lifetime(cx, cont);
+}
+
+fn precondition_sized(cx: &Ctxt, cont: &Container) {
+    if let Data::Struct(_, fields) = &cont.data {
+        if let Some(last) = fields.last() {
+            if let syn::Type::Slice(_) = ungroup(last.ty) {
+                cx.error_spanned_by(
+                    cont.original,
+                    "cannot deserialize a dynamically sized struct",
+                );
+            }
+        }
+    }
+}
+
+fn precondition_no_de_lifetime(cx: &Ctxt, cont: &Container) {
+    if let BorrowedLifetimes::Borrowed(_) = borrowed_lifetimes(cont) {
+        for param in cont.generics.lifetimes() {
+            if param.lifetime.to_string() == "'de" {
+                cx.error_spanned_by(
+                    &param.lifetime,
+                    "cannot deserialize when there is a lifetime parameter called 'de",
+                );
+                return;
+            }
+        }
+    }
+}
+
+struct Parameters {
+    /// Name of the type the `derive` is on.
+    local: syn::Ident,
+
+    /// Path to the type the impl is for. Either a single `Ident` for local
+    /// types (does not include generic parameters) or `some::remote::Path` for
+    /// remote types.
+    this_type: syn::Path,
+
+    /// Same as `this_type` but using `::<T>` for generic parameters for use in
+    /// expression position.
+    this_value: syn::Path,
+
+    /// Generics including any explicit and inferred bounds for the impl.
+    generics: syn::Generics,
+
+    /// Lifetimes borrowed from the deserializer. These will become bounds on
+    /// the `'de` lifetime of the deserializer.
+    borrowed: BorrowedLifetimes,
+
+    /// At least one field has a serde(getter) attribute, implying that the
+    /// remote type has a private field.
+    has_getter: bool,
+
+    /// Type has a repr(packed) attribute.
+    is_packed: bool,
+}
+
+impl Parameters {
+    fn new(cont: &Container) -> Self {
+        let local = cont.ident.clone();
+        let this_type = this::this_type(cont);
+        let this_value = this::this_value(cont);
+        let borrowed = borrowed_lifetimes(cont);
+        let generics = build_generics(cont, &borrowed);
+        let has_getter = cont.data.has_getter();
+        let is_packed = cont.attrs.is_packed();
+
+        Parameters {
+            local,
+            this_type,
+            this_value,
+            generics,
+            borrowed,
+            has_getter,
+            is_packed,
+        }
+    }
+
+    /// Type name to use in error messages and `&'static str` arguments to
+    /// various Deserializer methods.
+    fn type_name(&self) -> String {
+        self.this_type.segments.last().unwrap().ident.to_string()
+    }
+}
+
+// All the generics in the input, plus a bound `T: Deserialize` for each generic
+// field type that will be deserialized by us, plus a bound `T: Default` for
+// each generic field type that will be set to a default value.
+fn build_generics(cont: &Container, borrowed: &BorrowedLifetimes) -> syn::Generics {
+    let generics = bound::without_defaults(cont.generics);
+
+    let generics = bound::with_where_predicates_from_fields(cont, &generics, attr::Field::de_bound);
+
+    let generics =
+        bound::with_where_predicates_from_variants(cont, &generics, attr::Variant::de_bound);
+
+    match cont.attrs.de_bound() {
+        Some(predicates) => bound::with_where_predicates(&generics, predicates),
+        None => {
+            let generics = match *cont.attrs.default() {
+                attr::Default::Default => bound::with_self_bound(
+                    cont,
+                    &generics,
+                    &parse_quote!(_serde::__private::Default),
+                ),
+                attr::Default::None | attr::Default::Path(_) => generics,
+            };
+
+            let delife = borrowed.de_lifetime();
+            let generics = bound::with_bound(
+                cont,
+                &generics,
+                needs_deserialize_bound,
+                &parse_quote!(_serde::Deserialize<#delife>),
+            );
+
+            bound::with_bound(
+                cont,
+                &generics,
+                requires_default,
+                &parse_quote!(_serde::__private::Default),
+            )
+        }
+    }
+}
+
+// Fields with a `skip_deserializing` or `deserialize_with` attribute, or which
+// belong to a variant with a `skip_deserializing` or `deserialize_with`
+// attribute, are not deserialized by us so we do not generate a bound. Fields
+// with a `bound` attribute specify their own bound so we do not generate one.
+// All other fields may need a `T: Deserialize` bound where T is the type of the
+// field.
+fn needs_deserialize_bound(field: &attr::Field, variant: Option<&attr::Variant>) -> bool {
+    !field.skip_deserializing()
+        && field.deserialize_with().is_none()
+        && field.de_bound().is_none()
+        && variant.map_or(true, |variant| {
+            !variant.skip_deserializing()
+                && variant.deserialize_with().is_none()
+                && variant.de_bound().is_none()
+        })
+}
+
+// Fields with a `default` attribute (not `default=...`), and fields with a
+// `skip_deserializing` attribute that do not also have `default=...`.
+fn requires_default(field: &attr::Field, _variant: Option<&attr::Variant>) -> bool {
+    if let attr::Default::Default = *field.default() {
+        true
+    } else {
+        false
+    }
+}
+
+enum BorrowedLifetimes {
+    Borrowed(BTreeSet<syn::Lifetime>),
+    Static,
+}
+
+impl BorrowedLifetimes {
+    fn de_lifetime(&self) -> syn::Lifetime {
+        match *self {
+            BorrowedLifetimes::Borrowed(_) => syn::Lifetime::new("'de", Span::call_site()),
+            BorrowedLifetimes::Static => syn::Lifetime::new("'static", Span::call_site()),
+        }
+    }
+
+    fn de_lifetime_param(&self) -> Option<syn::LifetimeParam> {
+        match self {
+            BorrowedLifetimes::Borrowed(bounds) => Some(syn::LifetimeParam {
+                attrs: Vec::new(),
+                lifetime: syn::Lifetime::new("'de", Span::call_site()),
+                colon_token: None,
+                bounds: bounds.iter().cloned().collect(),
+            }),
+            BorrowedLifetimes::Static => None,
+        }
+    }
+}
+
+// The union of lifetimes borrowed by each field of the container.
+//
+// These turn into bounds on the `'de` lifetime of the Deserialize impl. If
+// lifetimes `'a` and `'b` are borrowed but `'c` is not, the impl is:
+//
+//     impl<'de: 'a + 'b, 'a, 'b, 'c> Deserialize<'de> for S<'a, 'b, 'c>
+//
+// If any borrowed lifetime is `'static`, then `'de: 'static` would be redundant
+// and we use plain `'static` instead of `'de`.
+fn borrowed_lifetimes(cont: &Container) -> BorrowedLifetimes {
+    let mut lifetimes = BTreeSet::new();
+    for field in cont.data.all_fields() {
+        if !field.attrs.skip_deserializing() {
+            lifetimes.extend(field.attrs.borrowed_lifetimes().iter().cloned());
+        }
+    }
+    if lifetimes.iter().any(|b| b.to_string() == "'static") {
+        BorrowedLifetimes::Static
+    } else {
+        BorrowedLifetimes::Borrowed(lifetimes)
+    }
+}
+
+fn deserialize_body(cont: &Container, params: &Parameters) -> Fragment {
+    if cont.attrs.transparent() {
+        deserialize_transparent(cont, params)
+    } else if let Some(type_from) = cont.attrs.type_from() {
+        deserialize_from(type_from)
+    } else if let Some(type_try_from) = cont.attrs.type_try_from() {
+        deserialize_try_from(type_try_from)
+    } else if let attr::Identifier::No = cont.attrs.identifier() {
+        match &cont.data {
+            Data::Enum(variants) => deserialize_enum(params, variants, &cont.attrs),
+            Data::Struct(Style::Struct, fields) => {
+                deserialize_struct(None, params, fields, &cont.attrs, None, &Untagged::No)
+            }
+            Data::Struct(Style::Tuple, fields) | Data::Struct(Style::Newtype, fields) => {
+                deserialize_tuple(None, params, fields, &cont.attrs, None)
+            }
+            Data::Struct(Style::Unit, _) => deserialize_unit_struct(params, &cont.attrs),
+        }
+    } else {
+        match &cont.data {
+            Data::Enum(variants) => deserialize_custom_identifier(params, variants, &cont.attrs),
+            Data::Struct(_, _) => unreachable!("checked in serde_derive_internals"),
+        }
+    }
+}
+
+#[cfg(feature = "deserialize_in_place")]
+fn deserialize_in_place_body(cont: &Container, params: &Parameters) -> Option<Stmts> {
+    // Only remote derives have getters, and we do not generate
+    // deserialize_in_place for remote derives.
+    assert!(!params.has_getter);
+
+    if cont.attrs.transparent()
+        || cont.attrs.type_from().is_some()
+        || cont.attrs.type_try_from().is_some()
+        || cont.attrs.identifier().is_some()
+        || cont
+            .data
+            .all_fields()
+            .all(|f| f.attrs.deserialize_with().is_some())
+    {
+        return None;
+    }
+
+    let code = match &cont.data {
+        Data::Struct(Style::Struct, fields) => {
+            deserialize_struct_in_place(None, params, fields, &cont.attrs, None)?
+        }
+        Data::Struct(Style::Tuple, fields) | Data::Struct(Style::Newtype, fields) => {
+            deserialize_tuple_in_place(None, params, fields, &cont.attrs, None)
+        }
+        Data::Enum(_) | Data::Struct(Style::Unit, _) => {
+            return None;
+        }
+    };
+
+    let delife = params.borrowed.de_lifetime();
+    let stmts = Stmts(code);
+
+    let fn_deserialize_in_place = quote_block! {
+        fn deserialize_in_place<__D>(__deserializer: __D, __place: &mut Self) -> _serde::__private::Result<(), __D::Error>
+        where
+            __D: _serde::Deserializer<#delife>,
+        {
+            #stmts
+        }
+    };
+
+    Some(Stmts(fn_deserialize_in_place))
+}
+
+#[cfg(not(feature = "deserialize_in_place"))]
+fn deserialize_in_place_body(_cont: &Container, _params: &Parameters) -> Option<Stmts> {
+    None
+}
+
+fn deserialize_transparent(cont: &Container, params: &Parameters) -> Fragment {
+    let fields = match &cont.data {
+        Data::Struct(_, fields) => fields,
+        Data::Enum(_) => unreachable!(),
+    };
+
+    let this_value = &params.this_value;
+    let transparent_field = fields.iter().find(|f| f.attrs.transparent()).unwrap();
+
+    let path = match transparent_field.attrs.deserialize_with() {
+        Some(path) => quote!(#path),
+        None => {
+            let span = transparent_field.original.span();
+            quote_spanned!(span=> _serde::Deserialize::deserialize)
+        }
+    };
+
+    let assign = fields.iter().map(|field| {
+        let member = &field.member;
+        if ptr::eq(field, transparent_field) {
+            quote!(#member: __transparent)
+        } else {
+            let value = match field.attrs.default() {
+                attr::Default::Default => quote!(_serde::__private::Default::default()),
+                attr::Default::Path(path) => quote!(#path()),
+                attr::Default::None => quote!(_serde::__private::PhantomData),
+            };
+            quote!(#member: #value)
+        }
+    });
+
+    quote_block! {
+        _serde::__private::Result::map(
+            #path(__deserializer),
+            |__transparent| #this_value { #(#assign),* })
+    }
+}
+
+fn deserialize_from(type_from: &syn::Type) -> Fragment {
+    quote_block! {
+        _serde::__private::Result::map(
+            <#type_from as _serde::Deserialize>::deserialize(__deserializer),
+            _serde::__private::From::from)
+    }
+}
+
+fn deserialize_try_from(type_try_from: &syn::Type) -> Fragment {
+    quote_block! {
+        _serde::__private::Result::and_then(
+            <#type_try_from as _serde::Deserialize>::deserialize(__deserializer),
+            |v| _serde::__private::TryFrom::try_from(v).map_err(_serde::de::Error::custom))
+    }
+}
+
+fn deserialize_unit_struct(params: &Parameters, cattrs: &attr::Container) -> Fragment {
+    let this_type = &params.this_type;
+    let this_value = &params.this_value;
+    let type_name = cattrs.name().deserialize_name();
+
+    let expecting = format!("unit struct {}", params.type_name());
+    let expecting = cattrs.expecting().unwrap_or(&expecting);
+
+    quote_block! {
+        struct __Visitor;
+
+        impl<'de> _serde::de::Visitor<'de> for __Visitor {
+            type Value = #this_type;
+
+            fn expecting(&self, __formatter: &mut _serde::__private::Formatter) -> _serde::__private::fmt::Result {
+                _serde::__private::Formatter::write_str(__formatter, #expecting)
+            }
+
+            #[inline]
+            fn visit_unit<__E>(self) -> _serde::__private::Result<Self::Value, __E>
+            where
+                __E: _serde::de::Error,
+            {
+                _serde::__private::Ok(#this_value)
+            }
+        }
+
+        _serde::Deserializer::deserialize_unit_struct(__deserializer, #type_name, __Visitor)
+    }
+}
+
+fn deserialize_tuple(
+    variant_ident: Option<&syn::Ident>,
+    params: &Parameters,
+    fields: &[Field],
+    cattrs: &attr::Container,
+    deserializer: Option<TokenStream>,
+) -> Fragment {
+    let this_type = &params.this_type;
+    let this_value = &params.this_value;
+    let (de_impl_generics, de_ty_generics, ty_generics, where_clause) =
+        split_with_de_lifetime(params);
+    let delife = params.borrowed.de_lifetime();
+
+    assert!(!cattrs.has_flatten());
+
+    // If there are getters (implying private fields), construct the local type
+    // and use an `Into` conversion to get the remote type. If there are no
+    // getters then construct the target type directly.
+    let construct = if params.has_getter {
+        let local = &params.local;
+        quote!(#local)
+    } else {
+        quote!(#this_value)
+    };
+
+    let is_enum = variant_ident.is_some();
+    let type_path = match variant_ident {
+        Some(variant_ident) => quote!(#construct::#variant_ident),
+        None => construct,
+    };
+    let expecting = match variant_ident {
+        Some(variant_ident) => format!("tuple variant {}::{}", params.type_name(), variant_ident),
+        None => format!("tuple struct {}", params.type_name()),
+    };
+    let expecting = cattrs.expecting().unwrap_or(&expecting);
+
+    let nfields = fields.len();
+
+    let visit_newtype_struct = if !is_enum && nfields == 1 {
+        Some(deserialize_newtype_struct(&type_path, params, &fields[0]))
+    } else {
+        None
+    };
+
+    let visit_seq = Stmts(deserialize_seq(
+        &type_path, params, fields, false, cattrs, expecting,
+    ));
+
+    let visitor_expr = quote! {
+        __Visitor {
+            marker: _serde::__private::PhantomData::<#this_type #ty_generics>,
+            lifetime: _serde::__private::PhantomData,
+        }
+    };
+    let dispatch = if let Some(deserializer) = deserializer {
+        quote!(_serde::Deserializer::deserialize_tuple(#deserializer, #nfields, #visitor_expr))
+    } else if is_enum {
+        quote!(_serde::de::VariantAccess::tuple_variant(__variant, #nfields, #visitor_expr))
+    } else if nfields == 1 {
+        let type_name = cattrs.name().deserialize_name();
+        quote!(_serde::Deserializer::deserialize_newtype_struct(__deserializer, #type_name, #visitor_expr))
+    } else {
+        let type_name = cattrs.name().deserialize_name();
+        quote!(_serde::Deserializer::deserialize_tuple_struct(__deserializer, #type_name, #nfields, #visitor_expr))
+    };
+
+    let all_skipped = fields.iter().all(|field| field.attrs.skip_deserializing());
+    let visitor_var = if all_skipped {
+        quote!(_)
+    } else {
+        quote!(mut __seq)
+    };
+
+    quote_block! {
+        struct __Visitor #de_impl_generics #where_clause {
+            marker: _serde::__private::PhantomData<#this_type #ty_generics>,
+            lifetime: _serde::__private::PhantomData<&#delife ()>,
+        }
+
+        impl #de_impl_generics _serde::de::Visitor<#delife> for __Visitor #de_ty_generics #where_clause {
+            type Value = #this_type #ty_generics;
+
+            fn expecting(&self, __formatter: &mut _serde::__private::Formatter) -> _serde::__private::fmt::Result {
+                _serde::__private::Formatter::write_str(__formatter, #expecting)
+            }
+
+            #visit_newtype_struct
+
+            #[inline]
+            fn visit_seq<__A>(self, #visitor_var: __A) -> _serde::__private::Result<Self::Value, __A::Error>
+            where
+                __A: _serde::de::SeqAccess<#delife>,
+            {
+                #visit_seq
+            }
+        }
+
+        #dispatch
+    }
+}
+
+#[cfg(feature = "deserialize_in_place")]
+fn deserialize_tuple_in_place(
+    variant_ident: Option<syn::Ident>,
+    params: &Parameters,
+    fields: &[Field],
+    cattrs: &attr::Container,
+    deserializer: Option<TokenStream>,
+) -> Fragment {
+    let this_type = &params.this_type;
+    let (de_impl_generics, de_ty_generics, ty_generics, where_clause) =
+        split_with_de_lifetime(params);
+    let delife = params.borrowed.de_lifetime();
+
+    assert!(!cattrs.has_flatten());
+
+    let is_enum = variant_ident.is_some();
+    let expecting = match variant_ident {
+        Some(variant_ident) => format!("tuple variant {}::{}", params.type_name(), variant_ident),
+        None => format!("tuple struct {}", params.type_name()),
+    };
+    let expecting = cattrs.expecting().unwrap_or(&expecting);
+
+    let nfields = fields.len();
+
+    let visit_newtype_struct = if !is_enum && nfields == 1 {
+        Some(deserialize_newtype_struct_in_place(params, &fields[0]))
+    } else {
+        None
+    };
+
+    let visit_seq = Stmts(deserialize_seq_in_place(params, fields, cattrs, expecting));
+
+    let visitor_expr = quote! {
+        __Visitor {
+            place: __place,
+            lifetime: _serde::__private::PhantomData,
+        }
+    };
+
+    let dispatch = if let Some(deserializer) = deserializer {
+        quote!(_serde::Deserializer::deserialize_tuple(#deserializer, #nfields, #visitor_expr))
+    } else if is_enum {
+        quote!(_serde::de::VariantAccess::tuple_variant(__variant, #nfields, #visitor_expr))
+    } else if nfields == 1 {
+        let type_name = cattrs.name().deserialize_name();
+        quote!(_serde::Deserializer::deserialize_newtype_struct(__deserializer, #type_name, #visitor_expr))
+    } else {
+        let type_name = cattrs.name().deserialize_name();
+        quote!(_serde::Deserializer::deserialize_tuple_struct(__deserializer, #type_name, #nfields, #visitor_expr))
+    };
+
+    let all_skipped = fields.iter().all(|field| field.attrs.skip_deserializing());
+    let visitor_var = if all_skipped {
+        quote!(_)
+    } else {
+        quote!(mut __seq)
+    };
+
+    let in_place_impl_generics = de_impl_generics.in_place();
+    let in_place_ty_generics = de_ty_generics.in_place();
+    let place_life = place_lifetime();
+
+    quote_block! {
+        struct __Visitor #in_place_impl_generics #where_clause {
+            place: &#place_life mut #this_type #ty_generics,
+            lifetime: _serde::__private::PhantomData<&#delife ()>,
+        }
+
+        impl #in_place_impl_generics _serde::de::Visitor<#delife> for __Visitor #in_place_ty_generics #where_clause {
+            type Value = ();
+
+            fn expecting(&self, __formatter: &mut _serde::__private::Formatter) -> _serde::__private::fmt::Result {
+                _serde::__private::Formatter::write_str(__formatter, #expecting)
+            }
+
+            #visit_newtype_struct
+
+            #[inline]
+            fn visit_seq<__A>(self, #visitor_var: __A) -> _serde::__private::Result<Self::Value, __A::Error>
+            where
+                __A: _serde::de::SeqAccess<#delife>,
+            {
+                #visit_seq
+            }
+        }
+
+        #dispatch
+    }
+}
+
+fn deserialize_seq(
+    type_path: &TokenStream,
+    params: &Parameters,
+    fields: &[Field],
+    is_struct: bool,
+    cattrs: &attr::Container,
+    expecting: &str,
+) -> Fragment {
+    let vars = (0..fields.len()).map(field_i as fn(_) -> _);
+
+    let deserialized_count = fields
+        .iter()
+        .filter(|field| !field.attrs.skip_deserializing())
+        .count();
+    let expecting = if deserialized_count == 1 {
+        format!("{} with 1 element", expecting)
+    } else {
+        format!("{} with {} elements", expecting, deserialized_count)
+    };
+    let expecting = cattrs.expecting().unwrap_or(&expecting);
+
+    let mut index_in_seq = 0_usize;
+    let let_values = vars.clone().zip(fields).map(|(var, field)| {
+        if field.attrs.skip_deserializing() {
+            let default = Expr(expr_is_missing(field, cattrs));
+            quote! {
+                let #var = #default;
+            }
+        } else {
+            let visit = match field.attrs.deserialize_with() {
+                None => {
+                    let field_ty = field.ty;
+                    let span = field.original.span();
+                    let func =
+                        quote_spanned!(span=> _serde::de::SeqAccess::next_element::<#field_ty>);
+                    quote!(try!(#func(&mut __seq)))
+                }
+                Some(path) => {
+                    let (wrapper, wrapper_ty) = wrap_deserialize_field_with(params, field.ty, path);
+                    quote!({
+                        #wrapper
+                        _serde::__private::Option::map(
+                            try!(_serde::de::SeqAccess::next_element::<#wrapper_ty>(&mut __seq)),
+                            |__wrap| __wrap.value)
+                    })
+                }
+            };
+            let value_if_none = match field.attrs.default() {
+                attr::Default::Default => quote!(_serde::__private::Default::default()),
+                attr::Default::Path(path) => quote!(#path()),
+                attr::Default::None => quote!(
+                    return _serde::__private::Err(_serde::de::Error::invalid_length(#index_in_seq, &#expecting));
+                ),
+            };
+            let assign = quote! {
+                let #var = match #visit {
+                    _serde::__private::Some(__value) => __value,
+                    _serde::__private::None => {
+                        #value_if_none
+                    }
+                };
+            };
+            index_in_seq += 1;
+            assign
+        }
+    });
+
+    let mut result = if is_struct {
+        let names = fields.iter().map(|f| &f.member);
+        quote! {
+            #type_path { #( #names: #vars ),* }
+        }
+    } else {
+        quote! {
+            #type_path ( #(#vars),* )
+        }
+    };
+
+    if params.has_getter {
+        let this_type = &params.this_type;
+        let (_, ty_generics, _) = params.generics.split_for_impl();
+        result = quote! {
+            _serde::__private::Into::<#this_type #ty_generics>::into(#result)
+        };
+    }
+
+    let let_default = match cattrs.default() {
+        attr::Default::Default => Some(quote!(
+            let __default: Self::Value = _serde::__private::Default::default();
+        )),
+        attr::Default::Path(path) => Some(quote!(
+            let __default: Self::Value = #path();
+        )),
+        attr::Default::None => {
+            // We don't need the default value, to prevent an unused variable warning
+            // we'll leave the line empty.
+            None
+        }
+    };
+
+    quote_block! {
+        #let_default
+        #(#let_values)*
+        _serde::__private::Ok(#result)
+    }
+}
+
+#[cfg(feature = "deserialize_in_place")]
+fn deserialize_seq_in_place(
+    params: &Parameters,
+    fields: &[Field],
+    cattrs: &attr::Container,
+    expecting: &str,
+) -> Fragment {
+    let deserialized_count = fields
+        .iter()
+        .filter(|field| !field.attrs.skip_deserializing())
+        .count();
+    let expecting = if deserialized_count == 1 {
+        format!("{} with 1 element", expecting)
+    } else {
+        format!("{} with {} elements", expecting, deserialized_count)
+    };
+    let expecting = cattrs.expecting().unwrap_or(&expecting);
+
+    let mut index_in_seq = 0usize;
+    let write_values = fields.iter().map(|field| {
+        let member = &field.member;
+
+        if field.attrs.skip_deserializing() {
+            let default = Expr(expr_is_missing(field, cattrs));
+            quote! {
+                self.place.#member = #default;
+            }
+        } else {
+            let value_if_none = match field.attrs.default() {
+                attr::Default::Default => quote!(
+                    self.place.#member = _serde::__private::Default::default();
+                ),
+                attr::Default::Path(path) => quote!(
+                    self.place.#member = #path();
+                ),
+                attr::Default::None => quote!(
+                    return _serde::__private::Err(_serde::de::Error::invalid_length(#index_in_seq, &#expecting));
+                ),
+            };
+            let write = match field.attrs.deserialize_with() {
+                None => {
+                    quote! {
+                        if let _serde::__private::None = try!(_serde::de::SeqAccess::next_element_seed(&mut __seq,
+                            _serde::__private::de::InPlaceSeed(&mut self.place.#member)))
+                        {
+                            #value_if_none
+                        }
+                    }
+                }
+                Some(path) => {
+                    let (wrapper, wrapper_ty) = wrap_deserialize_field_with(params, field.ty, path);
+                    quote!({
+                        #wrapper
+                        match try!(_serde::de::SeqAccess::next_element::<#wrapper_ty>(&mut __seq)) {
+                            _serde::__private::Some(__wrap) => {
+                                self.place.#member = __wrap.value;
+                            }
+                            _serde::__private::None => {
+                                #value_if_none
+                            }
+                        }
+                    })
+                }
+            };
+            index_in_seq += 1;
+            write
+        }
+    });
+
+    let this_type = &params.this_type;
+    let (_, ty_generics, _) = params.generics.split_for_impl();
+    let let_default = match cattrs.default() {
+        attr::Default::Default => Some(quote!(
+            let __default: #this_type #ty_generics = _serde::__private::Default::default();
+        )),
+        attr::Default::Path(path) => Some(quote!(
+            let __default: #this_type #ty_generics = #path();
+        )),
+        attr::Default::None => {
+            // We don't need the default value, to prevent an unused variable warning
+            // we'll leave the line empty.
+            None
+        }
+    };
+
+    quote_block! {
+        #let_default
+        #(#write_values)*
+        _serde::__private::Ok(())
+    }
+}
+
+fn deserialize_newtype_struct(
+    type_path: &TokenStream,
+    params: &Parameters,
+    field: &Field,
+) -> TokenStream {
+    let delife = params.borrowed.de_lifetime();
+    let field_ty = field.ty;
+
+    let value = match field.attrs.deserialize_with() {
+        None => {
+            let span = field.original.span();
+            let func = quote_spanned!(span=> <#field_ty as _serde::Deserialize>::deserialize);
+            quote! {
+                try!(#func(__e))
+            }
+        }
+        Some(path) => {
+            quote! {
+                try!(#path(__e))
+            }
+        }
+    };
+
+    let mut result = quote!(#type_path(__field0));
+    if params.has_getter {
+        let this_type = &params.this_type;
+        let (_, ty_generics, _) = params.generics.split_for_impl();
+        result = quote! {
+            _serde::__private::Into::<#this_type #ty_generics>::into(#result)
+        };
+    }
+
+    quote! {
+        #[inline]
+        fn visit_newtype_struct<__E>(self, __e: __E) -> _serde::__private::Result<Self::Value, __E::Error>
+        where
+            __E: _serde::Deserializer<#delife>,
+        {
+            let __field0: #field_ty = #value;
+            _serde::__private::Ok(#result)
+        }
+    }
+}
+
+#[cfg(feature = "deserialize_in_place")]
+fn deserialize_newtype_struct_in_place(params: &Parameters, field: &Field) -> TokenStream {
+    // We do not generate deserialize_in_place if every field has a
+    // deserialize_with.
+    assert!(field.attrs.deserialize_with().is_none());
+
+    let delife = params.borrowed.de_lifetime();
+
+    quote! {
+        #[inline]
+        fn visit_newtype_struct<__E>(self, __e: __E) -> _serde::__private::Result<Self::Value, __E::Error>
+        where
+            __E: _serde::Deserializer<#delife>,
+        {
+            _serde::Deserialize::deserialize_in_place(__e, &mut self.place.0)
+        }
+    }
+}
+
+enum Untagged {
+    Yes,
+    No,
+}
+
+fn deserialize_struct(
+    variant_ident: Option<&syn::Ident>,
+    params: &Parameters,
+    fields: &[Field],
+    cattrs: &attr::Container,
+    deserializer: Option<TokenStream>,
+    untagged: &Untagged,
+) -> Fragment {
+    let is_enum = variant_ident.is_some();
+
+    let this_type = &params.this_type;
+    let this_value = &params.this_value;
+    let (de_impl_generics, de_ty_generics, ty_generics, where_clause) =
+        split_with_de_lifetime(params);
+    let delife = params.borrowed.de_lifetime();
+
+    // If there are getters (implying private fields), construct the local type
+    // and use an `Into` conversion to get the remote type. If there are no
+    // getters then construct the target type directly.
+    let construct = if params.has_getter {
+        let local = &params.local;
+        quote!(#local)
+    } else {
+        quote!(#this_value)
+    };
+
+    let type_path = match variant_ident {
+        Some(variant_ident) => quote!(#construct::#variant_ident),
+        None => construct,
+    };
+    let expecting = match variant_ident {
+        Some(variant_ident) => format!("struct variant {}::{}", params.type_name(), variant_ident),
+        None => format!("struct {}", params.type_name()),
+    };
+    let expecting = cattrs.expecting().unwrap_or(&expecting);
+
+    let visit_seq = Stmts(deserialize_seq(
+        &type_path, params, fields, true, cattrs, expecting,
+    ));
+
+    let (field_visitor, fields_stmt, visit_map) = if cattrs.has_flatten() {
+        deserialize_struct_as_map_visitor(&type_path, params, fields, cattrs)
+    } else {
+        deserialize_struct_as_struct_visitor(&type_path, params, fields, cattrs)
+    };
+    let field_visitor = Stmts(field_visitor);
+    let fields_stmt = fields_stmt.map(Stmts);
+    let visit_map = Stmts(visit_map);
+
+    let visitor_expr = quote! {
+        __Visitor {
+            marker: _serde::__private::PhantomData::<#this_type #ty_generics>,
+            lifetime: _serde::__private::PhantomData,
+        }
+    };
+    let need_seed = deserializer.is_none();
+    let dispatch = if let Some(deserializer) = deserializer {
+        quote! {
+            _serde::Deserializer::deserialize_any(#deserializer, #visitor_expr)
+        }
+    } else if is_enum && cattrs.has_flatten() {
+        quote! {
+            _serde::de::VariantAccess::newtype_variant_seed(__variant, #visitor_expr)
+        }
+    } else if is_enum {
+        quote! {
+            _serde::de::VariantAccess::struct_variant(__variant, FIELDS, #visitor_expr)
+        }
+    } else if cattrs.has_flatten() {
+        quote! {
+            _serde::Deserializer::deserialize_map(__deserializer, #visitor_expr)
+        }
+    } else {
+        let type_name = cattrs.name().deserialize_name();
+        quote! {
+            _serde::Deserializer::deserialize_struct(__deserializer, #type_name, FIELDS, #visitor_expr)
+        }
+    };
+
+    let all_skipped = fields.iter().all(|field| field.attrs.skip_deserializing());
+    let visitor_var = if all_skipped {
+        quote!(_)
+    } else {
+        quote!(mut __seq)
+    };
+
+    // untagged struct variants do not get a visit_seq method. The same applies to
+    // structs that only have a map representation.
+    let visit_seq = match *untagged {
+        Untagged::No if !cattrs.has_flatten() => Some(quote! {
+            #[inline]
+            fn visit_seq<__A>(self, #visitor_var: __A) -> _serde::__private::Result<Self::Value, __A::Error>
+            where
+                __A: _serde::de::SeqAccess<#delife>,
+            {
+                #visit_seq
+            }
+        }),
+        _ => None,
+    };
+
+    let visitor_seed = if need_seed && is_enum && cattrs.has_flatten() {
+        Some(quote! {
+            impl #de_impl_generics _serde::de::DeserializeSeed<#delife> for __Visitor #de_ty_generics #where_clause {
+                type Value = #this_type #ty_generics;
+
+                fn deserialize<__D>(self, __deserializer: __D) -> _serde::__private::Result<Self::Value, __D::Error>
+                where
+                    __D: _serde::Deserializer<#delife>,
+                {
+                    _serde::Deserializer::deserialize_map(__deserializer, self)
+                }
+            }
+        })
+    } else {
+        None
+    };
+
+    quote_block! {
+        #field_visitor
+
+        struct __Visitor #de_impl_generics #where_clause {
+            marker: _serde::__private::PhantomData<#this_type #ty_generics>,
+            lifetime: _serde::__private::PhantomData<&#delife ()>,
+        }
+
+        impl #de_impl_generics _serde::de::Visitor<#delife> for __Visitor #de_ty_generics #where_clause {
+            type Value = #this_type #ty_generics;
+
+            fn expecting(&self, __formatter: &mut _serde::__private::Formatter) -> _serde::__private::fmt::Result {
+                _serde::__private::Formatter::write_str(__formatter, #expecting)
+            }
+
+            #visit_seq
+
+            #[inline]
+            fn visit_map<__A>(self, mut __map: __A) -> _serde::__private::Result<Self::Value, __A::Error>
+            where
+                __A: _serde::de::MapAccess<#delife>,
+            {
+                #visit_map
+            }
+        }
+
+        #visitor_seed
+
+        #fields_stmt
+
+        #dispatch
+    }
+}
+
+#[cfg(feature = "deserialize_in_place")]
+fn deserialize_struct_in_place(
+    variant_ident: Option<syn::Ident>,
+    params: &Parameters,
+    fields: &[Field],
+    cattrs: &attr::Container,
+    deserializer: Option<TokenStream>,
+) -> Option<Fragment> {
+    let is_enum = variant_ident.is_some();
+
+    // for now we do not support in_place deserialization for structs that
+    // are represented as map.
+    if cattrs.has_flatten() {
+        return None;
+    }
+
+    let this_type = &params.this_type;
+    let (de_impl_generics, de_ty_generics, ty_generics, where_clause) =
+        split_with_de_lifetime(params);
+    let delife = params.borrowed.de_lifetime();
+
+    let expecting = match variant_ident {
+        Some(variant_ident) => format!("struct variant {}::{}", params.type_name(), variant_ident),
+        None => format!("struct {}", params.type_name()),
+    };
+    let expecting = cattrs.expecting().unwrap_or(&expecting);
+
+    let visit_seq = Stmts(deserialize_seq_in_place(params, fields, cattrs, expecting));
+
+    let (field_visitor, fields_stmt, visit_map) =
+        deserialize_struct_as_struct_in_place_visitor(params, fields, cattrs);
+
+    let field_visitor = Stmts(field_visitor);
+    let fields_stmt = Stmts(fields_stmt);
+    let visit_map = Stmts(visit_map);
+
+    let visitor_expr = quote! {
+        __Visitor {
+            place: __place,
+            lifetime: _serde::__private::PhantomData,
+        }
+    };
+    let dispatch = if let Some(deserializer) = deserializer {
+        quote! {
+            _serde::Deserializer::deserialize_any(#deserializer, #visitor_expr)
+        }
+    } else if is_enum {
+        quote! {
+            _serde::de::VariantAccess::struct_variant(__variant, FIELDS, #visitor_expr)
+        }
+    } else {
+        let type_name = cattrs.name().deserialize_name();
+        quote! {
+            _serde::Deserializer::deserialize_struct(__deserializer, #type_name, FIELDS, #visitor_expr)
+        }
+    };
+
+    let all_skipped = fields.iter().all(|field| field.attrs.skip_deserializing());
+    let visitor_var = if all_skipped {
+        quote!(_)
+    } else {
+        quote!(mut __seq)
+    };
+
+    let visit_seq = quote! {
+        #[inline]
+        fn visit_seq<__A>(self, #visitor_var: __A) -> _serde::__private::Result<Self::Value, __A::Error>
+        where
+            __A: _serde::de::SeqAccess<#delife>,
+        {
+            #visit_seq
+        }
+    };
+
+    let in_place_impl_generics = de_impl_generics.in_place();
+    let in_place_ty_generics = de_ty_generics.in_place();
+    let place_life = place_lifetime();
+
+    Some(quote_block! {
+        #field_visitor
+
+        struct __Visitor #in_place_impl_generics #where_clause {
+            place: &#place_life mut #this_type #ty_generics,
+            lifetime: _serde::__private::PhantomData<&#delife ()>,
+        }
+
+        impl #in_place_impl_generics _serde::de::Visitor<#delife> for __Visitor #in_place_ty_generics #where_clause {
+            type Value = ();
+
+            fn expecting(&self, __formatter: &mut _serde::__private::Formatter) -> _serde::__private::fmt::Result {
+                _serde::__private::Formatter::write_str(__formatter, #expecting)
+            }
+
+            #visit_seq
+
+            #[inline]
+            fn visit_map<__A>(self, mut __map: __A) -> _serde::__private::Result<Self::Value, __A::Error>
+            where
+                __A: _serde::de::MapAccess<#delife>,
+            {
+                #visit_map
+            }
+        }
+
+        #fields_stmt
+
+        #dispatch
+    })
+}
+
+fn deserialize_enum(
+    params: &Parameters,
+    variants: &[Variant],
+    cattrs: &attr::Container,
+) -> Fragment {
+    match cattrs.tag() {
+        attr::TagType::External => deserialize_externally_tagged_enum(params, variants, cattrs),
+        attr::TagType::Internal { tag } => {
+            deserialize_internally_tagged_enum(params, variants, cattrs, tag)
+        }
+        attr::TagType::Adjacent { tag, content } => {
+            deserialize_adjacently_tagged_enum(params, variants, cattrs, tag, content)
+        }
+        attr::TagType::None => deserialize_untagged_enum(params, variants, cattrs),
+    }
+}
+
+fn prepare_enum_variant_enum(
+    variants: &[Variant],
+    cattrs: &attr::Container,
+) -> (TokenStream, Stmts) {
+    let mut deserialized_variants = variants
+        .iter()
+        .enumerate()
+        .filter(|&(_, variant)| !variant.attrs.skip_deserializing());
+
+    let variant_names_idents: Vec<_> = deserialized_variants
+        .clone()
+        .map(|(i, variant)| {
+            (
+                variant.attrs.name().deserialize_name(),
+                field_i(i),
+                variant.attrs.aliases(),
+            )
+        })
+        .collect();
+
+    let other_idx = deserialized_variants.position(|(_, variant)| variant.attrs.other());
+
+    let variants_stmt = {
+        let variant_names = variant_names_idents.iter().map(|(name, _, _)| name);
+        quote! {
+            const VARIANTS: &'static [&'static str] = &[ #(#variant_names),* ];
+        }
+    };
+
+    let variant_visitor = Stmts(deserialize_generated_identifier(
+        &variant_names_idents,
+        cattrs,
+        true,
+        other_idx,
+    ));
+
+    (variants_stmt, variant_visitor)
+}
+
+fn deserialize_externally_tagged_enum(
+    params: &Parameters,
+    variants: &[Variant],
+    cattrs: &attr::Container,
+) -> Fragment {
+    let this_type = &params.this_type;
+    let (de_impl_generics, de_ty_generics, ty_generics, where_clause) =
+        split_with_de_lifetime(params);
+    let delife = params.borrowed.de_lifetime();
+
+    let type_name = cattrs.name().deserialize_name();
+    let expecting = format!("enum {}", params.type_name());
+    let expecting = cattrs.expecting().unwrap_or(&expecting);
+
+    let (variants_stmt, variant_visitor) = prepare_enum_variant_enum(variants, cattrs);
+
+    // Match arms to extract a variant from a string
+    let variant_arms = variants
+        .iter()
+        .enumerate()
+        .filter(|&(_, variant)| !variant.attrs.skip_deserializing())
+        .map(|(i, variant)| {
+            let variant_name = field_i(i);
+
+            let block = Match(deserialize_externally_tagged_variant(
+                params, variant, cattrs,
+            ));
+
+            quote! {
+                (__Field::#variant_name, __variant) => #block
+            }
+        });
+
+    let all_skipped = variants
+        .iter()
+        .all(|variant| variant.attrs.skip_deserializing());
+    let match_variant = if all_skipped {
+        // This is an empty enum like `enum Impossible {}` or an enum in which
+        // all variants have `#[serde(skip_deserializing)]`.
+        quote! {
+            // FIXME: Once feature(exhaustive_patterns) is stable:
+            // let _serde::__private::Err(__err) = _serde::de::EnumAccess::variant::<__Field>(__data);
+            // _serde::__private::Err(__err)
+            _serde::__private::Result::map(
+                _serde::de::EnumAccess::variant::<__Field>(__data),
+                |(__impossible, _)| match __impossible {})
+        }
+    } else {
+        quote! {
+            match try!(_serde::de::EnumAccess::variant(__data)) {
+                #(#variant_arms)*
+            }
+        }
+    };
+
+    quote_block! {
+        #variant_visitor
+
+        struct __Visitor #de_impl_generics #where_clause {
+            marker: _serde::__private::PhantomData<#this_type #ty_generics>,
+            lifetime: _serde::__private::PhantomData<&#delife ()>,
+        }
+
+        impl #de_impl_generics _serde::de::Visitor<#delife> for __Visitor #de_ty_generics #where_clause {
+            type Value = #this_type #ty_generics;
+
+            fn expecting(&self, __formatter: &mut _serde::__private::Formatter) -> _serde::__private::fmt::Result {
+                _serde::__private::Formatter::write_str(__formatter, #expecting)
+            }
+
+            fn visit_enum<__A>(self, __data: __A) -> _serde::__private::Result<Self::Value, __A::Error>
+            where
+                __A: _serde::de::EnumAccess<#delife>,
+            {
+                #match_variant
+            }
+        }
+
+        #variants_stmt
+
+        _serde::Deserializer::deserialize_enum(
+            __deserializer,
+            #type_name,
+            VARIANTS,
+            __Visitor {
+                marker: _serde::__private::PhantomData::<#this_type #ty_generics>,
+                lifetime: _serde::__private::PhantomData,
+            },
+        )
+    }
+}
+
+fn deserialize_internally_tagged_enum(
+    params: &Parameters,
+    variants: &[Variant],
+    cattrs: &attr::Container,
+    tag: &str,
+) -> Fragment {
+    let (variants_stmt, variant_visitor) = prepare_enum_variant_enum(variants, cattrs);
+
+    // Match arms to extract a variant from a string
+    let variant_arms = variants
+        .iter()
+        .enumerate()
+        .filter(|&(_, variant)| !variant.attrs.skip_deserializing())
+        .map(|(i, variant)| {
+            let variant_name = field_i(i);
+
+            let block = Match(deserialize_internally_tagged_variant(
+                params,
+                variant,
+                cattrs,
+                quote! {
+                    _serde::__private::de::ContentDeserializer::<__D::Error>::new(__tagged.content)
+                },
+            ));
+
+            quote! {
+                __Field::#variant_name => #block
+            }
+        });
+
+    let expecting = format!("internally tagged enum {}", params.type_name());
+    let expecting = cattrs.expecting().unwrap_or(&expecting);
+
+    quote_block! {
+        #variant_visitor
+
+        #variants_stmt
+
+        let __tagged = try!(_serde::Deserializer::deserialize_any(
+            __deserializer,
+            _serde::__private::de::TaggedContentVisitor::<__Field>::new(#tag, #expecting)));
+
+        match __tagged.tag {
+            #(#variant_arms)*
+        }
+    }
+}
+
+fn deserialize_adjacently_tagged_enum(
+    params: &Parameters,
+    variants: &[Variant],
+    cattrs: &attr::Container,
+    tag: &str,
+    content: &str,
+) -> Fragment {
+    let this_type = &params.this_type;
+    let this_value = &params.this_value;
+    let (de_impl_generics, de_ty_generics, ty_generics, where_clause) =
+        split_with_de_lifetime(params);
+    let delife = params.borrowed.de_lifetime();
+
+    let (variants_stmt, variant_visitor) = prepare_enum_variant_enum(variants, cattrs);
+
+    let variant_arms: &Vec<_> = &variants
+        .iter()
+        .enumerate()
+        .filter(|&(_, variant)| !variant.attrs.skip_deserializing())
+        .map(|(i, variant)| {
+            let variant_index = field_i(i);
+
+            let block = Match(deserialize_untagged_variant(
+                params,
+                variant,
+                cattrs,
+                quote!(__deserializer),
+            ));
+
+            quote! {
+                __Field::#variant_index => #block
+            }
+        })
+        .collect();
+
+    let expecting = format!("adjacently tagged enum {}", params.type_name());
+    let expecting = cattrs.expecting().unwrap_or(&expecting);
+    let type_name = cattrs.name().deserialize_name();
+    let deny_unknown_fields = cattrs.deny_unknown_fields();
+
+    // If unknown fields are allowed, we pick the visitor that can step over
+    // those. Otherwise we pick the visitor that fails on unknown keys.
+    let field_visitor_ty = if deny_unknown_fields {
+        quote! { _serde::__private::de::TagOrContentFieldVisitor }
+    } else {
+        quote! { _serde::__private::de::TagContentOtherFieldVisitor }
+    };
+
+    let tag_or_content = quote! {
+        #field_visitor_ty {
+            tag: #tag,
+            content: #content,
+        }
+    };
+
+    let mut missing_content = quote! {
+        _serde::__private::Err(<__A::Error as _serde::de::Error>::missing_field(#content))
+    };
+    let mut missing_content_fallthrough = quote!();
+    let missing_content_arms = variants
+        .iter()
+        .enumerate()
+        .filter(|&(_, variant)| !variant.attrs.skip_deserializing())
+        .filter_map(|(i, variant)| {
+            let variant_index = field_i(i);
+            let variant_ident = &variant.ident;
+
+            let arm = match variant.style {
+                Style::Unit => quote! {
+                    _serde::__private::Ok(#this_value::#variant_ident)
+                },
+                Style::Newtype if variant.attrs.deserialize_with().is_none() => {
+                    let span = variant.original.span();
+                    let func = quote_spanned!(span=> _serde::__private::de::missing_field);
+                    quote! {
+                        #func(#content).map(#this_value::#variant_ident)
+                    }
+                }
+                _ => {
+                    missing_content_fallthrough = quote!(_ => #missing_content);
+                    return None;
+                }
+            };
+            Some(quote! {
+                __Field::#variant_index => #arm,
+            })
+        })
+        .collect::<Vec<_>>();
+    if !missing_content_arms.is_empty() {
+        missing_content = quote! {
+            match __field {
+                #(#missing_content_arms)*
+                #missing_content_fallthrough
+            }
+        };
+    }
+
+    // Advance the map by one key, returning early in case of error.
+    let next_key = quote! {
+        try!(_serde::de::MapAccess::next_key_seed(&mut __map, #tag_or_content))
+    };
+
+    // When allowing unknown fields, we want to transparently step through keys
+    // we don't care about until we find `tag`, `content`, or run out of keys.
+    let next_relevant_key = if deny_unknown_fields {
+        next_key
+    } else {
+        quote!({
+            let mut __rk : _serde::__private::Option<_serde::__private::de::TagOrContentField> = _serde::__private::None;
+            while let _serde::__private::Some(__k) = #next_key {
+                match __k {
+                    _serde::__private::de::TagContentOtherField::Other => {
+                        let _ = try!(_serde::de::MapAccess::next_value::<_serde::de::IgnoredAny>(&mut __map));
+                        continue;
+                    },
+                    _serde::__private::de::TagContentOtherField::Tag => {
+                        __rk = _serde::__private::Some(_serde::__private::de::TagOrContentField::Tag);
+                        break;
+                    }
+                    _serde::__private::de::TagContentOtherField::Content => {
+                        __rk = _serde::__private::Some(_serde::__private::de::TagOrContentField::Content);
+                        break;
+                    }
+                }
+            }
+
+            __rk
+        })
+    };
+
+    // Step through remaining keys, looking for duplicates of previously-seen
+    // keys. When unknown fields are denied, any key that isn't a duplicate will
+    // at this point immediately produce an error.
+    let visit_remaining_keys = quote! {
+        match #next_relevant_key {
+            _serde::__private::Some(_serde::__private::de::TagOrContentField::Tag) => {
+                _serde::__private::Err(<__A::Error as _serde::de::Error>::duplicate_field(#tag))
+            }
+            _serde::__private::Some(_serde::__private::de::TagOrContentField::Content) => {
+                _serde::__private::Err(<__A::Error as _serde::de::Error>::duplicate_field(#content))
+            }
+            _serde::__private::None => _serde::__private::Ok(__ret),
+        }
+    };
+
+    let finish_content_then_tag = if variant_arms.is_empty() {
+        quote! {
+            match try!(_serde::de::MapAccess::next_value::<__Field>(&mut __map)) {}
+        }
+    } else {
+        quote! {
+            let __ret = try!(match try!(_serde::de::MapAccess::next_value(&mut __map)) {
+                // Deserialize the buffered content now that we know the variant.
+                #(#variant_arms)*
+            });
+            // Visit remaining keys, looking for duplicates.
+            #visit_remaining_keys
+        }
+    };
+
+    quote_block! {
+        #variant_visitor
+
+        #variants_stmt
+
+        struct __Seed #de_impl_generics #where_clause {
+            field: __Field,
+            marker: _serde::__private::PhantomData<#this_type #ty_generics>,
+            lifetime: _serde::__private::PhantomData<&#delife ()>,
+        }
+
+        impl #de_impl_generics _serde::de::DeserializeSeed<#delife> for __Seed #de_ty_generics #where_clause {
+            type Value = #this_type #ty_generics;
+
+            fn deserialize<__D>(self, __deserializer: __D) -> _serde::__private::Result<Self::Value, __D::Error>
+            where
+                __D: _serde::Deserializer<#delife>,
+            {
+                match self.field {
+                    #(#variant_arms)*
+                }
+            }
+        }
+
+        struct __Visitor #de_impl_generics #where_clause {
+            marker: _serde::__private::PhantomData<#this_type #ty_generics>,
+            lifetime: _serde::__private::PhantomData<&#delife ()>,
+        }
+
+        impl #de_impl_generics _serde::de::Visitor<#delife> for __Visitor #de_ty_generics #where_clause {
+            type Value = #this_type #ty_generics;
+
+            fn expecting(&self, __formatter: &mut _serde::__private::Formatter) -> _serde::__private::fmt::Result {
+                _serde::__private::Formatter::write_str(__formatter, #expecting)
+            }
+
+            fn visit_map<__A>(self, mut __map: __A) -> _serde::__private::Result<Self::Value, __A::Error>
+            where
+                __A: _serde::de::MapAccess<#delife>,
+            {
+                // Visit the first relevant key.
+                match #next_relevant_key {
+                    // First key is the tag.
+                    _serde::__private::Some(_serde::__private::de::TagOrContentField::Tag) => {
+                        // Parse the tag.
+                        let __field = try!(_serde::de::MapAccess::next_value(&mut __map));
+                        // Visit the second key.
+                        match #next_relevant_key {
+                            // Second key is a duplicate of the tag.
+                            _serde::__private::Some(_serde::__private::de::TagOrContentField::Tag) => {
+                                _serde::__private::Err(<__A::Error as _serde::de::Error>::duplicate_field(#tag))
+                            }
+                            // Second key is the content.
+                            _serde::__private::Some(_serde::__private::de::TagOrContentField::Content) => {
+                                let __ret = try!(_serde::de::MapAccess::next_value_seed(&mut __map,
+                                    __Seed {
+                                        field: __field,
+                                        marker: _serde::__private::PhantomData,
+                                        lifetime: _serde::__private::PhantomData,
+                                    }));
+                                // Visit remaining keys, looking for duplicates.
+                                #visit_remaining_keys
+                            }
+                            // There is no second key; might be okay if the we have a unit variant.
+                            _serde::__private::None => #missing_content
+                        }
+                    }
+                    // First key is the content.
+                    _serde::__private::Some(_serde::__private::de::TagOrContentField::Content) => {
+                        // Buffer up the content.
+                        let __content = try!(_serde::de::MapAccess::next_value::<_serde::__private::de::Content>(&mut __map));
+                        // Visit the second key.
+                        match #next_relevant_key {
+                            // Second key is the tag.
+                            _serde::__private::Some(_serde::__private::de::TagOrContentField::Tag) => {
+                                let __deserializer = _serde::__private::de::ContentDeserializer::<__A::Error>::new(__content);
+                                #finish_content_then_tag
+                            }
+                            // Second key is a duplicate of the content.
+                            _serde::__private::Some(_serde::__private::de::TagOrContentField::Content) => {
+                                _serde::__private::Err(<__A::Error as _serde::de::Error>::duplicate_field(#content))
+                            }
+                            // There is no second key.
+                            _serde::__private::None => {
+                                _serde::__private::Err(<__A::Error as _serde::de::Error>::missing_field(#tag))
+                            }
+                        }
+                    }
+                    // There is no first key.
+                    _serde::__private::None => {
+                        _serde::__private::Err(<__A::Error as _serde::de::Error>::missing_field(#tag))
+                    }
+                }
+            }
+
+            fn visit_seq<__A>(self, mut __seq: __A) -> _serde::__private::Result<Self::Value, __A::Error>
+            where
+                __A: _serde::de::SeqAccess<#delife>,
+            {
+                // Visit the first element - the tag.
+                match try!(_serde::de::SeqAccess::next_element(&mut __seq)) {
+                    _serde::__private::Some(__field) => {
+                        // Visit the second element - the content.
+                        match try!(_serde::de::SeqAccess::next_element_seed(
+                            &mut __seq,
+                            __Seed {
+                                field: __field,
+                                marker: _serde::__private::PhantomData,
+                                lifetime: _serde::__private::PhantomData,
+                            },
+                        )) {
+                            _serde::__private::Some(__ret) => _serde::__private::Ok(__ret),
+                            // There is no second element.
+                            _serde::__private::None => {
+                                _serde::__private::Err(_serde::de::Error::invalid_length(1, &self))
+                            }
+                        }
+                    }
+                    // There is no first element.
+                    _serde::__private::None => {
+                        _serde::__private::Err(_serde::de::Error::invalid_length(0, &self))
+                    }
+                }
+            }
+        }
+
+        const FIELDS: &'static [&'static str] = &[#tag, #content];
+        _serde::Deserializer::deserialize_struct(
+            __deserializer,
+            #type_name,
+            FIELDS,
+            __Visitor {
+                marker: _serde::__private::PhantomData::<#this_type #ty_generics>,
+                lifetime: _serde::__private::PhantomData,
+            },
+        )
+    }
+}
+
+fn deserialize_untagged_enum(
+    params: &Parameters,
+    variants: &[Variant],
+    cattrs: &attr::Container,
+) -> Fragment {
+    let attempts = variants
+        .iter()
+        .filter(|variant| !variant.attrs.skip_deserializing())
+        .map(|variant| {
+            Expr(deserialize_untagged_variant(
+                params,
+                variant,
+                cattrs,
+                quote!(
+                    _serde::__private::de::ContentRefDeserializer::<__D::Error>::new(&__content)
+                ),
+            ))
+        });
+
+    // TODO this message could be better by saving the errors from the failed
+    // attempts. The heuristic used by TOML was to count the number of fields
+    // processed before an error, and use the error that happened after the
+    // largest number of fields. I'm not sure I like that. Maybe it would be
+    // better to save all the errors and combine them into one message that
+    // explains why none of the variants matched.
+    let fallthrough_msg = format!(
+        "data did not match any variant of untagged enum {}",
+        params.type_name()
+    );
+    let fallthrough_msg = cattrs.expecting().unwrap_or(&fallthrough_msg);
+
+    quote_block! {
+        let __content = try!(<_serde::__private::de::Content as _serde::Deserialize>::deserialize(__deserializer));
+
+        #(
+            if let _serde::__private::Ok(__ok) = #attempts {
+                return _serde::__private::Ok(__ok);
+            }
+        )*
+
+        _serde::__private::Err(_serde::de::Error::custom(#fallthrough_msg))
+    }
+}
+
+fn deserialize_externally_tagged_variant(
+    params: &Parameters,
+    variant: &Variant,
+    cattrs: &attr::Container,
+) -> Fragment {
+    if let Some(path) = variant.attrs.deserialize_with() {
+        let (wrapper, wrapper_ty, unwrap_fn) = wrap_deserialize_variant_with(params, variant, path);
+        return quote_block! {
+            #wrapper
+            _serde::__private::Result::map(
+                _serde::de::VariantAccess::newtype_variant::<#wrapper_ty>(__variant), #unwrap_fn)
+        };
+    }
+
+    let variant_ident = &variant.ident;
+
+    match variant.style {
+        Style::Unit => {
+            let this_value = &params.this_value;
+            quote_block! {
+                try!(_serde::de::VariantAccess::unit_variant(__variant));
+                _serde::__private::Ok(#this_value::#variant_ident)
+            }
+        }
+        Style::Newtype => deserialize_externally_tagged_newtype_variant(
+            variant_ident,
+            params,
+            &variant.fields[0],
+            cattrs,
+        ),
+        Style::Tuple => {
+            deserialize_tuple(Some(variant_ident), params, &variant.fields, cattrs, None)
+        }
+        Style::Struct => deserialize_struct(
+            Some(variant_ident),
+            params,
+            &variant.fields,
+            cattrs,
+            None,
+            &Untagged::No,
+        ),
+    }
+}
+
+// Generates significant part of the visit_seq and visit_map bodies of visitors
+// for the variants of internally tagged enum.
+fn deserialize_internally_tagged_variant(
+    params: &Parameters,
+    variant: &Variant,
+    cattrs: &attr::Container,
+    deserializer: TokenStream,
+) -> Fragment {
+    if variant.attrs.deserialize_with().is_some() {
+        return deserialize_untagged_variant(params, variant, cattrs, deserializer);
+    }
+
+    let variant_ident = &variant.ident;
+
+    match effective_style(variant) {
+        Style::Unit => {
+            let this_value = &params.this_value;
+            let type_name = params.type_name();
+            let variant_name = variant.ident.to_string();
+            let default = variant.fields.get(0).map(|field| {
+                let default = Expr(expr_is_missing(field, cattrs));
+                quote!((#default))
+            });
+            quote_block! {
+                try!(_serde::Deserializer::deserialize_any(#deserializer, _serde::__private::de::InternallyTaggedUnitVisitor::new(#type_name, #variant_name)));
+                _serde::__private::Ok(#this_value::#variant_ident #default)
+            }
+        }
+        Style::Newtype => deserialize_untagged_newtype_variant(
+            variant_ident,
+            params,
+            &variant.fields[0],
+            &deserializer,
+        ),
+        Style::Struct => deserialize_struct(
+            Some(variant_ident),
+            params,
+            &variant.fields,
+            cattrs,
+            Some(deserializer),
+            &Untagged::No,
+        ),
+        Style::Tuple => unreachable!("checked in serde_derive_internals"),
+    }
+}
+
+fn deserialize_untagged_variant(
+    params: &Parameters,
+    variant: &Variant,
+    cattrs: &attr::Container,
+    deserializer: TokenStream,
+) -> Fragment {
+    if let Some(path) = variant.attrs.deserialize_with() {
+        let unwrap_fn = unwrap_to_variant_closure(params, variant, false);
+        return quote_block! {
+            _serde::__private::Result::map(#path(#deserializer), #unwrap_fn)
+        };
+    }
+
+    let variant_ident = &variant.ident;
+
+    match effective_style(variant) {
+        Style::Unit => {
+            let this_value = &params.this_value;
+            let type_name = params.type_name();
+            let variant_name = variant.ident.to_string();
+            let default = variant.fields.get(0).map(|field| {
+                let default = Expr(expr_is_missing(field, cattrs));
+                quote!((#default))
+            });
+            quote_expr! {
+                match _serde::Deserializer::deserialize_any(
+                    #deserializer,
+                    _serde::__private::de::UntaggedUnitVisitor::new(#type_name, #variant_name)
+                ) {
+                    _serde::__private::Ok(()) => _serde::__private::Ok(#this_value::#variant_ident #default),
+                    _serde::__private::Err(__err) => _serde::__private::Err(__err),
+                }
+            }
+        }
+        Style::Newtype => deserialize_untagged_newtype_variant(
+            variant_ident,
+            params,
+            &variant.fields[0],
+            &deserializer,
+        ),
+        Style::Tuple => deserialize_tuple(
+            Some(variant_ident),
+            params,
+            &variant.fields,
+            cattrs,
+            Some(deserializer),
+        ),
+        Style::Struct => deserialize_struct(
+            Some(variant_ident),
+            params,
+            &variant.fields,
+            cattrs,
+            Some(deserializer),
+            &Untagged::Yes,
+        ),
+    }
+}
+
+fn deserialize_externally_tagged_newtype_variant(
+    variant_ident: &syn::Ident,
+    params: &Parameters,
+    field: &Field,
+    cattrs: &attr::Container,
+) -> Fragment {
+    let this_value = &params.this_value;
+
+    if field.attrs.skip_deserializing() {
+        let default = Expr(expr_is_missing(field, cattrs));
+        return quote_block! {
+            try!(_serde::de::VariantAccess::unit_variant(__variant));
+            _serde::__private::Ok(#this_value::#variant_ident(#default))
+        };
+    }
+
+    match field.attrs.deserialize_with() {
+        None => {
+            let field_ty = field.ty;
+            let span = field.original.span();
+            let func =
+                quote_spanned!(span=> _serde::de::VariantAccess::newtype_variant::<#field_ty>);
+            quote_expr! {
+                _serde::__private::Result::map(#func(__variant), #this_value::#variant_ident)
+            }
+        }
+        Some(path) => {
+            let (wrapper, wrapper_ty) = wrap_deserialize_field_with(params, field.ty, path);
+            quote_block! {
+                #wrapper
+                _serde::__private::Result::map(
+                    _serde::de::VariantAccess::newtype_variant::<#wrapper_ty>(__variant),
+                    |__wrapper| #this_value::#variant_ident(__wrapper.value))
+            }
+        }
+    }
+}
+
+fn deserialize_untagged_newtype_variant(
+    variant_ident: &syn::Ident,
+    params: &Parameters,
+    field: &Field,
+    deserializer: &TokenStream,
+) -> Fragment {
+    let this_value = &params.this_value;
+    let field_ty = field.ty;
+    match field.attrs.deserialize_with() {
+        None => {
+            let span = field.original.span();
+            let func = quote_spanned!(span=> <#field_ty as _serde::Deserialize>::deserialize);
+            quote_expr! {
+                _serde::__private::Result::map(#func(#deserializer), #this_value::#variant_ident)
+            }
+        }
+        Some(path) => {
+            quote_block! {
+                let __value: _serde::__private::Result<#field_ty, _> = #path(#deserializer);
+                _serde::__private::Result::map(__value, #this_value::#variant_ident)
+            }
+        }
+    }
+}
+
+fn deserialize_generated_identifier(
+    fields: &[(String, Ident, Vec<String>)],
+    cattrs: &attr::Container,
+    is_variant: bool,
+    other_idx: Option<usize>,
+) -> Fragment {
+    let this_value = quote!(__Field);
+    let field_idents: &Vec<_> = &fields.iter().map(|(_, ident, _)| ident).collect();
+
+    let (ignore_variant, fallthrough) = if !is_variant && cattrs.has_flatten() {
+        let ignore_variant = quote!(__other(_serde::__private::de::Content<'de>),);
+        let fallthrough = quote!(_serde::__private::Ok(__Field::__other(__value)));
+        (Some(ignore_variant), Some(fallthrough))
+    } else if let Some(other_idx) = other_idx {
+        let ignore_variant = fields[other_idx].1.clone();
+        let fallthrough = quote!(_serde::__private::Ok(__Field::#ignore_variant));
+        (None, Some(fallthrough))
+    } else if is_variant || cattrs.deny_unknown_fields() {
+        (None, None)
+    } else {
+        let ignore_variant = quote!(__ignore,);
+        let fallthrough = quote!(_serde::__private::Ok(__Field::__ignore));
+        (Some(ignore_variant), Some(fallthrough))
+    };
+
+    let visitor_impl = Stmts(deserialize_identifier(
+        &this_value,
+        fields,
+        is_variant,
+        fallthrough,
+        None,
+        !is_variant && cattrs.has_flatten(),
+        None,
+    ));
+
+    let lifetime = if !is_variant && cattrs.has_flatten() {
+        Some(quote!(<'de>))
+    } else {
+        None
+    };
+
+    quote_block! {
+        #[allow(non_camel_case_types)]
+        enum __Field #lifetime {
+            #(#field_idents,)*
+            #ignore_variant
+        }
+
+        struct __FieldVisitor;
+
+        impl<'de> _serde::de::Visitor<'de> for __FieldVisitor {
+            type Value = __Field #lifetime;
+
+            #visitor_impl
+        }
+
+        impl<'de> _serde::Deserialize<'de> for __Field #lifetime {
+            #[inline]
+            fn deserialize<__D>(__deserializer: __D) -> _serde::__private::Result<Self, __D::Error>
+            where
+                __D: _serde::Deserializer<'de>,
+            {
+                _serde::Deserializer::deserialize_identifier(__deserializer, __FieldVisitor)
+            }
+        }
+    }
+}
+
+// Generates `Deserialize::deserialize` body for an enum with
+// `serde(field_identifier)` or `serde(variant_identifier)` attribute.
+fn deserialize_custom_identifier(
+    params: &Parameters,
+    variants: &[Variant],
+    cattrs: &attr::Container,
+) -> Fragment {
+    let is_variant = match cattrs.identifier() {
+        attr::Identifier::Variant => true,
+        attr::Identifier::Field => false,
+        attr::Identifier::No => unreachable!(),
+    };
+
+    let this_type = params.this_type.to_token_stream();
+    let this_value = params.this_value.to_token_stream();
+
+    let (ordinary, fallthrough, fallthrough_borrowed) = if let Some(last) = variants.last() {
+        let last_ident = &last.ident;
+        if last.attrs.other() {
+            // Process `serde(other)` attribute. It would always be found on the
+            // last variant (checked in `check_identifier`), so all preceding
+            // are ordinary variants.
+            let ordinary = &variants[..variants.len() - 1];
+            let fallthrough = quote!(_serde::__private::Ok(#this_value::#last_ident));
+            (ordinary, Some(fallthrough), None)
+        } else if let Style::Newtype = last.style {
+            let ordinary = &variants[..variants.len() - 1];
+            let fallthrough = |value| {
+                quote! {
+                    _serde::__private::Result::map(
+                        _serde::Deserialize::deserialize(
+                            _serde::__private::de::IdentifierDeserializer::from(#value)
+                        ),
+                        #this_value::#last_ident)
+                }
+            };
+            (
+                ordinary,
+                Some(fallthrough(quote!(__value))),
+                Some(fallthrough(quote!(_serde::__private::de::Borrowed(
+                    __value
+                )))),
+            )
+        } else {
+            (variants, None, None)
+        }
+    } else {
+        (variants, None, None)
+    };
+
+    let names_idents: Vec<_> = ordinary
+        .iter()
+        .map(|variant| {
+            (
+                variant.attrs.name().deserialize_name(),
+                variant.ident.clone(),
+                variant.attrs.aliases(),
+            )
+        })
+        .collect();
+
+    let names = names_idents.iter().map(|(name, _, _)| name);
+
+    let names_const = if fallthrough.is_some() {
+        None
+    } else if is_variant {
+        let variants = quote! {
+            const VARIANTS: &'static [&'static str] = &[ #(#names),* ];
+        };
+        Some(variants)
+    } else {
+        let fields = quote! {
+            const FIELDS: &'static [&'static str] = &[ #(#names),* ];
+        };
+        Some(fields)
+    };
+
+    let (de_impl_generics, de_ty_generics, ty_generics, where_clause) =
+        split_with_de_lifetime(params);
+    let delife = params.borrowed.de_lifetime();
+    let visitor_impl = Stmts(deserialize_identifier(
+        &this_value,
+        &names_idents,
+        is_variant,
+        fallthrough,
+        fallthrough_borrowed,
+        false,
+        cattrs.expecting(),
+    ));
+
+    quote_block! {
+        #names_const
+
+        struct __FieldVisitor #de_impl_generics #where_clause {
+            marker: _serde::__private::PhantomData<#this_type #ty_generics>,
+            lifetime: _serde::__private::PhantomData<&#delife ()>,
+        }
+
+        impl #de_impl_generics _serde::de::Visitor<#delife> for __FieldVisitor #de_ty_generics #where_clause {
+            type Value = #this_type #ty_generics;
+
+            #visitor_impl
+        }
+
+        let __visitor = __FieldVisitor {
+            marker: _serde::__private::PhantomData::<#this_type #ty_generics>,
+            lifetime: _serde::__private::PhantomData,
+        };
+        _serde::Deserializer::deserialize_identifier(__deserializer, __visitor)
+    }
+}
+
+fn deserialize_identifier(
+    this_value: &TokenStream,
+    fields: &[(String, Ident, Vec<String>)],
+    is_variant: bool,
+    fallthrough: Option<TokenStream>,
+    fallthrough_borrowed: Option<TokenStream>,
+    collect_other_fields: bool,
+    expecting: Option<&str>,
+) -> Fragment {
+    let mut flat_fields = Vec::new();
+    for (_, ident, aliases) in fields {
+        flat_fields.extend(aliases.iter().map(|alias| (alias, ident)));
+    }
+
+    let field_strs: &Vec<_> = &flat_fields.iter().map(|(name, _)| name).collect();
+    let field_bytes: &Vec<_> = &flat_fields
+        .iter()
+        .map(|(name, _)| Literal::byte_string(name.as_bytes()))
+        .collect();
+
+    let constructors: &Vec<_> = &flat_fields
+        .iter()
+        .map(|(_, ident)| quote!(#this_value::#ident))
+        .collect();
+    let main_constructors: &Vec<_> = &fields
+        .iter()
+        .map(|(_, ident, _)| quote!(#this_value::#ident))
+        .collect();
+
+    let expecting = expecting.unwrap_or(if is_variant {
+        "variant identifier"
+    } else {
+        "field identifier"
+    });
+
+    let index_expecting = if is_variant { "variant" } else { "field" };
+
+    let bytes_to_str = if fallthrough.is_some() || collect_other_fields {
+        None
+    } else {
+        Some(quote! {
+            let __value = &_serde::__private::from_utf8_lossy(__value);
+        })
+    };
+
+    let (
+        value_as_str_content,
+        value_as_borrowed_str_content,
+        value_as_bytes_content,
+        value_as_borrowed_bytes_content,
+    ) = if collect_other_fields {
+        (
+            Some(quote! {
+                let __value = _serde::__private::de::Content::String(_serde::__private::ToString::to_string(__value));
+            }),
+            Some(quote! {
+                let __value = _serde::__private::de::Content::Str(__value);
+            }),
+            Some(quote! {
+                let __value = _serde::__private::de::Content::ByteBuf(__value.to_vec());
+            }),
+            Some(quote! {
+                let __value = _serde::__private::de::Content::Bytes(__value);
+            }),
+        )
+    } else {
+        (None, None, None, None)
+    };
+
+    let fallthrough_arm_tokens;
+    let fallthrough_arm = if let Some(fallthrough) = &fallthrough {
+        fallthrough
+    } else if is_variant {
+        fallthrough_arm_tokens = quote! {
+            _serde::__private::Err(_serde::de::Error::unknown_variant(__value, VARIANTS))
+        };
+        &fallthrough_arm_tokens
+    } else {
+        fallthrough_arm_tokens = quote! {
+            _serde::__private::Err(_serde::de::Error::unknown_field(__value, FIELDS))
+        };
+        &fallthrough_arm_tokens
+    };
+
+    let u64_fallthrough_arm_tokens;
+    let u64_fallthrough_arm = if let Some(fallthrough) = &fallthrough {
+        fallthrough
+    } else {
+        let fallthrough_msg = format!("{} index 0 <= i < {}", index_expecting, fields.len());
+        u64_fallthrough_arm_tokens = quote! {
+            _serde::__private::Err(_serde::de::Error::invalid_value(
+                _serde::de::Unexpected::Unsigned(__value),
+                &#fallthrough_msg,
+            ))
+        };
+        &u64_fallthrough_arm_tokens
+    };
+
+    let variant_indices = 0_u64..;
+    let visit_other = if collect_other_fields {
+        quote! {
+            fn visit_bool<__E>(self, __value: bool) -> _serde::__private::Result<Self::Value, __E>
+            where
+                __E: _serde::de::Error,
+            {
+                _serde::__private::Ok(__Field::__other(_serde::__private::de::Content::Bool(__value)))
+            }
+
+            fn visit_i8<__E>(self, __value: i8) -> _serde::__private::Result<Self::Value, __E>
+            where
+                __E: _serde::de::Error,
+            {
+                _serde::__private::Ok(__Field::__other(_serde::__private::de::Content::I8(__value)))
+            }
+
+            fn visit_i16<__E>(self, __value: i16) -> _serde::__private::Result<Self::Value, __E>
+            where
+                __E: _serde::de::Error,
+            {
+                _serde::__private::Ok(__Field::__other(_serde::__private::de::Content::I16(__value)))
+            }
+
+            fn visit_i32<__E>(self, __value: i32) -> _serde::__private::Result<Self::Value, __E>
+            where
+                __E: _serde::de::Error,
+            {
+                _serde::__private::Ok(__Field::__other(_serde::__private::de::Content::I32(__value)))
+            }
+
+            fn visit_i64<__E>(self, __value: i64) -> _serde::__private::Result<Self::Value, __E>
+            where
+                __E: _serde::de::Error,
+            {
+                _serde::__private::Ok(__Field::__other(_serde::__private::de::Content::I64(__value)))
+            }
+
+            fn visit_u8<__E>(self, __value: u8) -> _serde::__private::Result<Self::Value, __E>
+            where
+                __E: _serde::de::Error,
+            {
+                _serde::__private::Ok(__Field::__other(_serde::__private::de::Content::U8(__value)))
+            }
+
+            fn visit_u16<__E>(self, __value: u16) -> _serde::__private::Result<Self::Value, __E>
+            where
+                __E: _serde::de::Error,
+            {
+                _serde::__private::Ok(__Field::__other(_serde::__private::de::Content::U16(__value)))
+            }
+
+            fn visit_u32<__E>(self, __value: u32) -> _serde::__private::Result<Self::Value, __E>
+            where
+                __E: _serde::de::Error,
+            {
+                _serde::__private::Ok(__Field::__other(_serde::__private::de::Content::U32(__value)))
+            }
+
+            fn visit_u64<__E>(self, __value: u64) -> _serde::__private::Result<Self::Value, __E>
+            where
+                __E: _serde::de::Error,
+            {
+                _serde::__private::Ok(__Field::__other(_serde::__private::de::Content::U64(__value)))
+            }
+
+            fn visit_f32<__E>(self, __value: f32) -> _serde::__private::Result<Self::Value, __E>
+            where
+                __E: _serde::de::Error,
+            {
+                _serde::__private::Ok(__Field::__other(_serde::__private::de::Content::F32(__value)))
+            }
+
+            fn visit_f64<__E>(self, __value: f64) -> _serde::__private::Result<Self::Value, __E>
+            where
+                __E: _serde::de::Error,
+            {
+                _serde::__private::Ok(__Field::__other(_serde::__private::de::Content::F64(__value)))
+            }
+
+            fn visit_char<__E>(self, __value: char) -> _serde::__private::Result<Self::Value, __E>
+            where
+                __E: _serde::de::Error,
+            {
+                _serde::__private::Ok(__Field::__other(_serde::__private::de::Content::Char(__value)))
+            }
+
+            fn visit_unit<__E>(self) -> _serde::__private::Result<Self::Value, __E>
+            where
+                __E: _serde::de::Error,
+            {
+                _serde::__private::Ok(__Field::__other(_serde::__private::de::Content::Unit))
+            }
+        }
+    } else {
+        quote! {
+            fn visit_u64<__E>(self, __value: u64) -> _serde::__private::Result<Self::Value, __E>
+            where
+                __E: _serde::de::Error,
+            {
+                match __value {
+                    #(
+                        #variant_indices => _serde::__private::Ok(#main_constructors),
+                    )*
+                    _ => #u64_fallthrough_arm,
+                }
+            }
+        }
+    };
+
+    let visit_borrowed = if fallthrough_borrowed.is_some() || collect_other_fields {
+        let fallthrough_borrowed_arm = fallthrough_borrowed.as_ref().unwrap_or(fallthrough_arm);
+        Some(quote! {
+            fn visit_borrowed_str<__E>(self, __value: &'de str) -> _serde::__private::Result<Self::Value, __E>
+            where
+                __E: _serde::de::Error,
+            {
+                match __value {
+                    #(
+                        #field_strs => _serde::__private::Ok(#constructors),
+                    )*
+                    _ => {
+                        #value_as_borrowed_str_content
+                        #fallthrough_borrowed_arm
+                    }
+                }
+            }
+
+            fn visit_borrowed_bytes<__E>(self, __value: &'de [u8]) -> _serde::__private::Result<Self::Value, __E>
+            where
+                __E: _serde::de::Error,
+            {
+                match __value {
+                    #(
+                        #field_bytes => _serde::__private::Ok(#constructors),
+                    )*
+                    _ => {
+                        #bytes_to_str
+                        #value_as_borrowed_bytes_content
+                        #fallthrough_borrowed_arm
+                    }
+                }
+            }
+        })
+    } else {
+        None
+    };
+
+    quote_block! {
+        fn expecting(&self, __formatter: &mut _serde::__private::Formatter) -> _serde::__private::fmt::Result {
+            _serde::__private::Formatter::write_str(__formatter, #expecting)
+        }
+
+        #visit_other
+
+        fn visit_str<__E>(self, __value: &str) -> _serde::__private::Result<Self::Value, __E>
+        where
+            __E: _serde::de::Error,
+        {
+            match __value {
+                #(
+                    #field_strs => _serde::__private::Ok(#constructors),
+                )*
+                _ => {
+                    #value_as_str_content
+                    #fallthrough_arm
+                }
+            }
+        }
+
+        fn visit_bytes<__E>(self, __value: &[u8]) -> _serde::__private::Result<Self::Value, __E>
+        where
+            __E: _serde::de::Error,
+        {
+            match __value {
+                #(
+                    #field_bytes => _serde::__private::Ok(#constructors),
+                )*
+                _ => {
+                    #bytes_to_str
+                    #value_as_bytes_content
+                    #fallthrough_arm
+                }
+            }
+        }
+
+        #visit_borrowed
+    }
+}
+
+fn deserialize_struct_as_struct_visitor(
+    struct_path: &TokenStream,
+    params: &Parameters,
+    fields: &[Field],
+    cattrs: &attr::Container,
+) -> (Fragment, Option<Fragment>, Fragment) {
+    assert!(!cattrs.has_flatten());
+
+    let field_names_idents: Vec<_> = fields
+        .iter()
+        .enumerate()
+        .filter(|&(_, field)| !field.attrs.skip_deserializing())
+        .map(|(i, field)| {
+            (
+                field.attrs.name().deserialize_name(),
+                field_i(i),
+                field.attrs.aliases(),
+            )
+        })
+        .collect();
+
+    let fields_stmt = {
+        let field_names = field_names_idents
+            .iter()
+            .flat_map(|(_, _, aliases)| aliases);
+
+        quote_block! {
+            const FIELDS: &'static [&'static str] = &[ #(#field_names),* ];
+        }
+    };
+
+    let field_visitor = deserialize_generated_identifier(&field_names_idents, cattrs, false, None);
+
+    let visit_map = deserialize_map(struct_path, params, fields, cattrs);
+
+    (field_visitor, Some(fields_stmt), visit_map)
+}
+
+fn deserialize_struct_as_map_visitor(
+    struct_path: &TokenStream,
+    params: &Parameters,
+    fields: &[Field],
+    cattrs: &attr::Container,
+) -> (Fragment, Option<Fragment>, Fragment) {
+    let field_names_idents: Vec<_> = fields
+        .iter()
+        .enumerate()
+        .filter(|&(_, field)| !field.attrs.skip_deserializing() && !field.attrs.flatten())
+        .map(|(i, field)| {
+            (
+                field.attrs.name().deserialize_name(),
+                field_i(i),
+                field.attrs.aliases(),
+            )
+        })
+        .collect();
+
+    let field_visitor = deserialize_generated_identifier(&field_names_idents, cattrs, false, None);
+
+    let visit_map = deserialize_map(struct_path, params, fields, cattrs);
+
+    (field_visitor, None, visit_map)
+}
+
+fn deserialize_map(
+    struct_path: &TokenStream,
+    params: &Parameters,
+    fields: &[Field],
+    cattrs: &attr::Container,
+) -> Fragment {
+    // Create the field names for the fields.
+    let fields_names: Vec<_> = fields
+        .iter()
+        .enumerate()
+        .map(|(i, field)| (field, field_i(i)))
+        .collect();
+
+    // Declare each field that will be deserialized.
+    let let_values = fields_names
+        .iter()
+        .filter(|&&(field, _)| !field.attrs.skip_deserializing() && !field.attrs.flatten())
+        .map(|(field, name)| {
+            let field_ty = field.ty;
+            quote! {
+                let mut #name: _serde::__private::Option<#field_ty> = _serde::__private::None;
+            }
+        });
+
+    // Collect contents for flatten fields into a buffer
+    let let_collect = if cattrs.has_flatten() {
+        Some(quote! {
+            let mut __collect = _serde::__private::Vec::<_serde::__private::Option<(
+                _serde::__private::de::Content,
+                _serde::__private::de::Content
+            )>>::new();
+        })
+    } else {
+        None
+    };
+
+    // Match arms to extract a value for a field.
+    let value_arms = fields_names
+        .iter()
+        .filter(|&&(field, _)| !field.attrs.skip_deserializing() && !field.attrs.flatten())
+        .map(|(field, name)| {
+            let deser_name = field.attrs.name().deserialize_name();
+
+            let visit = match field.attrs.deserialize_with() {
+                None => {
+                    let field_ty = field.ty;
+                    let span = field.original.span();
+                    let func =
+                        quote_spanned!(span=> _serde::de::MapAccess::next_value::<#field_ty>);
+                    quote! {
+                        try!(#func(&mut __map))
+                    }
+                }
+                Some(path) => {
+                    let (wrapper, wrapper_ty) = wrap_deserialize_field_with(params, field.ty, path);
+                    quote!({
+                        #wrapper
+                        match _serde::de::MapAccess::next_value::<#wrapper_ty>(&mut __map) {
+                            _serde::__private::Ok(__wrapper) => __wrapper.value,
+                            _serde::__private::Err(__err) => {
+                                return _serde::__private::Err(__err);
+                            }
+                        }
+                    })
+                }
+            };
+            quote! {
+                __Field::#name => {
+                    if _serde::__private::Option::is_some(&#name) {
+                        return _serde::__private::Err(<__A::Error as _serde::de::Error>::duplicate_field(#deser_name));
+                    }
+                    #name = _serde::__private::Some(#visit);
+                }
+            }
+        });
+
+    // Visit ignored values to consume them
+    let ignored_arm = if cattrs.has_flatten() {
+        Some(quote! {
+            __Field::__other(__name) => {
+                __collect.push(_serde::__private::Some((
+                    __name,
+                    try!(_serde::de::MapAccess::next_value(&mut __map)))));
+            }
+        })
+    } else if cattrs.deny_unknown_fields() {
+        None
+    } else {
+        Some(quote! {
+            _ => { let _ = try!(_serde::de::MapAccess::next_value::<_serde::de::IgnoredAny>(&mut __map)); }
+        })
+    };
+
+    let all_skipped = fields.iter().all(|field| field.attrs.skip_deserializing());
+    let match_keys = if cattrs.deny_unknown_fields() && all_skipped {
+        quote! {
+            // FIXME: Once feature(exhaustive_patterns) is stable:
+            // let _serde::__private::None::<__Field> = try!(_serde::de::MapAccess::next_key(&mut __map));
+            _serde::__private::Option::map(
+                try!(_serde::de::MapAccess::next_key::<__Field>(&mut __map)),
+                |__impossible| match __impossible {});
+        }
+    } else {
+        quote! {
+            while let _serde::__private::Some(__key) = try!(_serde::de::MapAccess::next_key::<__Field>(&mut __map)) {
+                match __key {
+                    #(#value_arms)*
+                    #ignored_arm
+                }
+            }
+        }
+    };
+
+    let extract_values = fields_names
+        .iter()
+        .filter(|&&(field, _)| !field.attrs.skip_deserializing() && !field.attrs.flatten())
+        .map(|(field, name)| {
+            let missing_expr = Match(expr_is_missing(field, cattrs));
+
+            quote! {
+                let #name = match #name {
+                    _serde::__private::Some(#name) => #name,
+                    _serde::__private::None => #missing_expr
+                };
+            }
+        });
+
+    let extract_collected = fields_names
+        .iter()
+        .filter(|&&(field, _)| field.attrs.flatten() && !field.attrs.skip_deserializing())
+        .map(|(field, name)| {
+            let field_ty = field.ty;
+            let func = match field.attrs.deserialize_with() {
+                None => {
+                    let span = field.original.span();
+                    quote_spanned!(span=> _serde::de::Deserialize::deserialize)
+                }
+                Some(path) => quote!(#path),
+            };
+            quote! {
+                let #name: #field_ty = try!(#func(
+                    _serde::__private::de::FlatMapDeserializer(
+                        &mut __collect,
+                        _serde::__private::PhantomData)));
+            }
+        });
+
+    let collected_deny_unknown_fields = if cattrs.has_flatten() && cattrs.deny_unknown_fields() {
+        Some(quote! {
+            if let _serde::__private::Some(_serde::__private::Some((__key, _))) =
+                __collect.into_iter().filter(_serde::__private::Option::is_some).next()
+            {
+                if let _serde::__private::Some(__key) = __key.as_str() {
+                    return _serde::__private::Err(
+                        _serde::de::Error::custom(format_args!("unknown field `{}`", &__key)));
+                } else {
+                    return _serde::__private::Err(
+                        _serde::de::Error::custom(format_args!("unexpected map key")));
+                }
+            }
+        })
+    } else {
+        None
+    };
+
+    let result = fields_names.iter().map(|(field, name)| {
+        let member = &field.member;
+        if field.attrs.skip_deserializing() {
+            let value = Expr(expr_is_missing(field, cattrs));
+            quote!(#member: #value)
+        } else {
+            quote!(#member: #name)
+        }
+    });
+
+    let let_default = match cattrs.default() {
+        attr::Default::Default => Some(quote!(
+            let __default: Self::Value = _serde::__private::Default::default();
+        )),
+        attr::Default::Path(path) => Some(quote!(
+            let __default: Self::Value = #path();
+        )),
+        attr::Default::None => {
+            // We don't need the default value, to prevent an unused variable warning
+            // we'll leave the line empty.
+            None
+        }
+    };
+
+    let mut result = quote!(#struct_path { #(#result),* });
+    if params.has_getter {
+        let this_type = &params.this_type;
+        let (_, ty_generics, _) = params.generics.split_for_impl();
+        result = quote! {
+            _serde::__private::Into::<#this_type #ty_generics>::into(#result)
+        };
+    }
+
+    quote_block! {
+        #(#let_values)*
+
+        #let_collect
+
+        #match_keys
+
+        #let_default
+
+        #(#extract_values)*
+
+        #(#extract_collected)*
+
+        #collected_deny_unknown_fields
+
+        _serde::__private::Ok(#result)
+    }
+}
+
+#[cfg(feature = "deserialize_in_place")]
+fn deserialize_struct_as_struct_in_place_visitor(
+    params: &Parameters,
+    fields: &[Field],
+    cattrs: &attr::Container,
+) -> (Fragment, Fragment, Fragment) {
+    assert!(!cattrs.has_flatten());
+
+    let field_names_idents: Vec<_> = fields
+        .iter()
+        .enumerate()
+        .filter(|&(_, field)| !field.attrs.skip_deserializing())
+        .map(|(i, field)| {
+            (
+                field.attrs.name().deserialize_name(),
+                field_i(i),
+                field.attrs.aliases(),
+            )
+        })
+        .collect();
+
+    let fields_stmt = {
+        let field_names = field_names_idents.iter().map(|(name, _, _)| name);
+        quote_block! {
+            const FIELDS: &'static [&'static str] = &[ #(#field_names),* ];
+        }
+    };
+
+    let field_visitor = deserialize_generated_identifier(&field_names_idents, cattrs, false, None);
+
+    let visit_map = deserialize_map_in_place(params, fields, cattrs);
+
+    (field_visitor, fields_stmt, visit_map)
+}
+
+#[cfg(feature = "deserialize_in_place")]
+fn deserialize_map_in_place(
+    params: &Parameters,
+    fields: &[Field],
+    cattrs: &attr::Container,
+) -> Fragment {
+    assert!(!cattrs.has_flatten());
+
+    // Create the field names for the fields.
+    let fields_names: Vec<_> = fields
+        .iter()
+        .enumerate()
+        .map(|(i, field)| (field, field_i(i)))
+        .collect();
+
+    // For deserialize_in_place, declare booleans for each field that will be
+    // deserialized.
+    let let_flags = fields_names
+        .iter()
+        .filter(|&&(field, _)| !field.attrs.skip_deserializing())
+        .map(|(_, name)| {
+            quote! {
+                let mut #name: bool = false;
+            }
+        });
+
+    // Match arms to extract a value for a field.
+    let value_arms_from = fields_names
+        .iter()
+        .filter(|&&(field, _)| !field.attrs.skip_deserializing())
+        .map(|(field, name)| {
+            let deser_name = field.attrs.name().deserialize_name();
+            let member = &field.member;
+
+            let visit = match field.attrs.deserialize_with() {
+                None => {
+                    quote! {
+                        try!(_serde::de::MapAccess::next_value_seed(&mut __map, _serde::__private::de::InPlaceSeed(&mut self.place.#member)))
+                    }
+                }
+                Some(path) => {
+                    let (wrapper, wrapper_ty) = wrap_deserialize_field_with(params, field.ty, path);
+                    quote!({
+                        #wrapper
+                        self.place.#member = match _serde::de::MapAccess::next_value::<#wrapper_ty>(&mut __map) {
+                            _serde::__private::Ok(__wrapper) => __wrapper.value,
+                            _serde::__private::Err(__err) => {
+                                return _serde::__private::Err(__err);
+                            }
+                        };
+                    })
+                }
+            };
+            quote! {
+                __Field::#name => {
+                    if #name {
+                        return _serde::__private::Err(<__A::Error as _serde::de::Error>::duplicate_field(#deser_name));
+                    }
+                    #visit;
+                    #name = true;
+                }
+            }
+        });
+
+    // Visit ignored values to consume them
+    let ignored_arm = if cattrs.deny_unknown_fields() {
+        None
+    } else {
+        Some(quote! {
+            _ => { let _ = try!(_serde::de::MapAccess::next_value::<_serde::de::IgnoredAny>(&mut __map)); }
+        })
+    };
+
+    let all_skipped = fields.iter().all(|field| field.attrs.skip_deserializing());
+
+    let match_keys = if cattrs.deny_unknown_fields() && all_skipped {
+        quote! {
+            // FIXME: Once feature(exhaustive_patterns) is stable:
+            // let _serde::__private::None::<__Field> = try!(_serde::de::MapAccess::next_key(&mut __map));
+            _serde::__private::Option::map(
+                try!(_serde::de::MapAccess::next_key::<__Field>(&mut __map)),
+                |__impossible| match __impossible {});
+        }
+    } else {
+        quote! {
+            while let _serde::__private::Some(__key) = try!(_serde::de::MapAccess::next_key::<__Field>(&mut __map)) {
+                match __key {
+                    #(#value_arms_from)*
+                    #ignored_arm
+                }
+            }
+        }
+    };
+
+    let check_flags = fields_names
+        .iter()
+        .filter(|&&(field, _)| !field.attrs.skip_deserializing())
+        .map(|(field, name)| {
+            let missing_expr = expr_is_missing(field, cattrs);
+            // If missing_expr unconditionally returns an error, don't try
+            // to assign its value to self.place.
+            if field.attrs.default().is_none()
+                && cattrs.default().is_none()
+                && field.attrs.deserialize_with().is_some()
+            {
+                let missing_expr = Stmts(missing_expr);
+                quote! {
+                    if !#name {
+                        #missing_expr;
+                    }
+                }
+            } else {
+                let member = &field.member;
+                let missing_expr = Expr(missing_expr);
+                quote! {
+                    if !#name {
+                        self.place.#member = #missing_expr;
+                    };
+                }
+            }
+        });
+
+    let this_type = &params.this_type;
+    let (_, _, ty_generics, _) = split_with_de_lifetime(params);
+
+    let let_default = match cattrs.default() {
+        attr::Default::Default => Some(quote!(
+            let __default: #this_type #ty_generics = _serde::__private::Default::default();
+        )),
+        attr::Default::Path(path) => Some(quote!(
+            let __default: #this_type #ty_generics = #path();
+        )),
+        attr::Default::None => {
+            // We don't need the default value, to prevent an unused variable warning
+            // we'll leave the line empty.
+            None
+        }
+    };
+
+    quote_block! {
+        #(#let_flags)*
+
+        #match_keys
+
+        #let_default
+
+        #(#check_flags)*
+
+        _serde::__private::Ok(())
+    }
+}
+
+fn field_i(i: usize) -> Ident {
+    Ident::new(&format!("__field{}", i), Span::call_site())
+}
+
+/// This function wraps the expression in `#[serde(deserialize_with = "...")]`
+/// in a trait to prevent it from accessing the internal `Deserialize` state.
+fn wrap_deserialize_with(
+    params: &Parameters,
+    value_ty: &TokenStream,
+    deserialize_with: &syn::ExprPath,
+) -> (TokenStream, TokenStream) {
+    let this_type = &params.this_type;
+    let (de_impl_generics, de_ty_generics, ty_generics, where_clause) =
+        split_with_de_lifetime(params);
+    let delife = params.borrowed.de_lifetime();
+
+    let wrapper = quote! {
+        struct __DeserializeWith #de_impl_generics #where_clause {
+            value: #value_ty,
+            phantom: _serde::__private::PhantomData<#this_type #ty_generics>,
+            lifetime: _serde::__private::PhantomData<&#delife ()>,
+        }
+
+        impl #de_impl_generics _serde::Deserialize<#delife> for __DeserializeWith #de_ty_generics #where_clause {
+            fn deserialize<__D>(__deserializer: __D) -> _serde::__private::Result<Self, __D::Error>
+            where
+                __D: _serde::Deserializer<#delife>,
+            {
+                _serde::__private::Ok(__DeserializeWith {
+                    value: try!(#deserialize_with(__deserializer)),
+                    phantom: _serde::__private::PhantomData,
+                    lifetime: _serde::__private::PhantomData,
+                })
+            }
+        }
+    };
+
+    let wrapper_ty = quote!(__DeserializeWith #de_ty_generics);
+
+    (wrapper, wrapper_ty)
+}
+
+fn wrap_deserialize_field_with(
+    params: &Parameters,
+    field_ty: &syn::Type,
+    deserialize_with: &syn::ExprPath,
+) -> (TokenStream, TokenStream) {
+    wrap_deserialize_with(params, &quote!(#field_ty), deserialize_with)
+}
+
+fn wrap_deserialize_variant_with(
+    params: &Parameters,
+    variant: &Variant,
+    deserialize_with: &syn::ExprPath,
+) -> (TokenStream, TokenStream, TokenStream) {
+    let field_tys = variant.fields.iter().map(|field| field.ty);
+    let (wrapper, wrapper_ty) =
+        wrap_deserialize_with(params, &quote!((#(#field_tys),*)), deserialize_with);
+
+    let unwrap_fn = unwrap_to_variant_closure(params, variant, true);
+
+    (wrapper, wrapper_ty, unwrap_fn)
+}
+
+// Generates closure that converts single input parameter to the final value.
+fn unwrap_to_variant_closure(
+    params: &Parameters,
+    variant: &Variant,
+    with_wrapper: bool,
+) -> TokenStream {
+    let this_value = &params.this_value;
+    let variant_ident = &variant.ident;
+
+    let (arg, wrapper) = if with_wrapper {
+        (quote! { __wrap }, quote! { __wrap.value })
+    } else {
+        let field_tys = variant.fields.iter().map(|field| field.ty);
+        (quote! { __wrap: (#(#field_tys),*) }, quote! { __wrap })
+    };
+
+    let field_access = (0..variant.fields.len()).map(|n| {
+        Member::Unnamed(Index {
+            index: n as u32,
+            span: Span::call_site(),
+        })
+    });
+
+    match variant.style {
+        Style::Struct if variant.fields.len() == 1 => {
+            let member = &variant.fields[0].member;
+            quote! {
+                |#arg| #this_value::#variant_ident { #member: #wrapper }
+            }
+        }
+        Style::Struct => {
+            let members = variant.fields.iter().map(|field| &field.member);
+            quote! {
+                |#arg| #this_value::#variant_ident { #(#members: #wrapper.#field_access),* }
+            }
+        }
+        Style::Tuple => quote! {
+            |#arg| #this_value::#variant_ident(#(#wrapper.#field_access),*)
+        },
+        Style::Newtype => quote! {
+            |#arg| #this_value::#variant_ident(#wrapper)
+        },
+        Style::Unit => quote! {
+            |#arg| #this_value::#variant_ident
+        },
+    }
+}
+
+fn expr_is_missing(field: &Field, cattrs: &attr::Container) -> Fragment {
+    match field.attrs.default() {
+        attr::Default::Default => {
+            let span = field.original.span();
+            let func = quote_spanned!(span=> _serde::__private::Default::default);
+            return quote_expr!(#func());
+        }
+        attr::Default::Path(path) => {
+            return quote_expr!(#path());
+        }
+        attr::Default::None => { /* below */ }
+    }
+
+    match *cattrs.default() {
+        attr::Default::Default | attr::Default::Path(_) => {
+            let member = &field.member;
+            return quote_expr!(__default.#member);
+        }
+        attr::Default::None => { /* below */ }
+    }
+
+    let name = field.attrs.name().deserialize_name();
+    match field.attrs.deserialize_with() {
+        None => {
+            let span = field.original.span();
+            let func = quote_spanned!(span=> _serde::__private::de::missing_field);
+            quote_expr! {
+                try!(#func(#name))
+            }
+        }
+        Some(_) => {
+            quote_expr! {
+                return _serde::__private::Err(<__A::Error as _serde::de::Error>::missing_field(#name))
+            }
+        }
+    }
+}
+
+fn effective_style(variant: &Variant) -> Style {
+    match variant.style {
+        Style::Newtype if variant.fields[0].attrs.skip_deserializing() => Style::Unit,
+        other => other,
+    }
+}
+
+struct DeImplGenerics<'a>(&'a Parameters);
+#[cfg(feature = "deserialize_in_place")]
+struct InPlaceImplGenerics<'a>(&'a Parameters);
+
+impl<'a> ToTokens for DeImplGenerics<'a> {
+    fn to_tokens(&self, tokens: &mut TokenStream) {
+        let mut generics = self.0.generics.clone();
+        if let Some(de_lifetime) = self.0.borrowed.de_lifetime_param() {
+            generics.params = Some(syn::GenericParam::Lifetime(de_lifetime))
+                .into_iter()
+                .chain(generics.params)
+                .collect();
+        }
+        let (impl_generics, _, _) = generics.split_for_impl();
+        impl_generics.to_tokens(tokens);
+    }
+}
+
+#[cfg(feature = "deserialize_in_place")]
+impl<'a> ToTokens for InPlaceImplGenerics<'a> {
+    fn to_tokens(&self, tokens: &mut TokenStream) {
+        let place_lifetime = place_lifetime();
+        let mut generics = self.0.generics.clone();
+
+        // Add lifetime for `&'place mut Self, and `'a: 'place`
+        for param in &mut generics.params {
+            match param {
+                syn::GenericParam::Lifetime(param) => {
+                    param.bounds.push(place_lifetime.lifetime.clone());
+                }
+                syn::GenericParam::Type(param) => {
+                    param.bounds.push(syn::TypeParamBound::Lifetime(
+                        place_lifetime.lifetime.clone(),
+                    ));
+                }
+                syn::GenericParam::Const(_) => {}
+            }
+        }
+        generics.params = Some(syn::GenericParam::Lifetime(place_lifetime))
+            .into_iter()
+            .chain(generics.params)
+            .collect();
+        if let Some(de_lifetime) = self.0.borrowed.de_lifetime_param() {
+            generics.params = Some(syn::GenericParam::Lifetime(de_lifetime))
+                .into_iter()
+                .chain(generics.params)
+                .collect();
+        }
+        let (impl_generics, _, _) = generics.split_for_impl();
+        impl_generics.to_tokens(tokens);
+    }
+}
+
+#[cfg(feature = "deserialize_in_place")]
+impl<'a> DeImplGenerics<'a> {
+    fn in_place(self) -> InPlaceImplGenerics<'a> {
+        InPlaceImplGenerics(self.0)
+    }
+}
+
+struct DeTypeGenerics<'a>(&'a Parameters);
+#[cfg(feature = "deserialize_in_place")]
+struct InPlaceTypeGenerics<'a>(&'a Parameters);
+
+impl<'a> ToTokens for DeTypeGenerics<'a> {
+    fn to_tokens(&self, tokens: &mut TokenStream) {
+        let mut generics = self.0.generics.clone();
+        if self.0.borrowed.de_lifetime_param().is_some() {
+            let def = syn::LifetimeParam {
+                attrs: Vec::new(),
+                lifetime: syn::Lifetime::new("'de", Span::call_site()),
+                colon_token: None,
+                bounds: Punctuated::new(),
+            };
+            generics.params = Some(syn::GenericParam::Lifetime(def))
+                .into_iter()
+                .chain(generics.params)
+                .collect();
+        }
+        let (_, ty_generics, _) = generics.split_for_impl();
+        ty_generics.to_tokens(tokens);
+    }
+}
+
+#[cfg(feature = "deserialize_in_place")]
+impl<'a> ToTokens for InPlaceTypeGenerics<'a> {
+    fn to_tokens(&self, tokens: &mut TokenStream) {
+        let mut generics = self.0.generics.clone();
+        generics.params = Some(syn::GenericParam::Lifetime(place_lifetime()))
+            .into_iter()
+            .chain(generics.params)
+            .collect();
+
+        if self.0.borrowed.de_lifetime_param().is_some() {
+            let def = syn::LifetimeParam {
+                attrs: Vec::new(),
+                lifetime: syn::Lifetime::new("'de", Span::call_site()),
+                colon_token: None,
+                bounds: Punctuated::new(),
+            };
+            generics.params = Some(syn::GenericParam::Lifetime(def))
+                .into_iter()
+                .chain(generics.params)
+                .collect();
+        }
+        let (_, ty_generics, _) = generics.split_for_impl();
+        ty_generics.to_tokens(tokens);
+    }
+}
+
+#[cfg(feature = "deserialize_in_place")]
+impl<'a> DeTypeGenerics<'a> {
+    fn in_place(self) -> InPlaceTypeGenerics<'a> {
+        InPlaceTypeGenerics(self.0)
+    }
+}
+
+#[cfg(feature = "deserialize_in_place")]
+fn place_lifetime() -> syn::LifetimeParam {
+    syn::LifetimeParam {
+        attrs: Vec::new(),
+        lifetime: syn::Lifetime::new("'place", Span::call_site()),
+        colon_token: None,
+        bounds: Punctuated::new(),
+    }
+}
+
+fn split_with_de_lifetime(
+    params: &Parameters,
+) -> (
+    DeImplGenerics,
+    DeTypeGenerics,
+    syn::TypeGenerics,
+    Option<&syn::WhereClause>,
+) {
+    let de_impl_generics = DeImplGenerics(params);
+    let de_ty_generics = DeTypeGenerics(params);
+    let (_, ty_generics, where_clause) = params.generics.split_for_impl();
+    (de_impl_generics, de_ty_generics, ty_generics, where_clause)
+}
diff --git a/crates/serde_derive/src/dummy.rs b/crates/serde_derive/src/dummy.rs
new file mode 100644
index 0000000..2be5027
--- /dev/null
+++ b/crates/serde_derive/src/dummy.rs
@@ -0,0 +1,44 @@
+use proc_macro2::{Ident, TokenStream};
+use quote::format_ident;
+
+use syn;
+use try;
+
+pub fn wrap_in_const(
+    serde_path: Option<&syn::Path>,
+    trait_: &str,
+    ty: &Ident,
+    code: TokenStream,
+) -> TokenStream {
+    let try_replacement = try::replacement();
+
+    let dummy_const = if cfg!(no_underscore_consts) {
+        format_ident!("_IMPL_{}_FOR_{}", trait_, unraw(ty))
+    } else {
+        format_ident!("_")
+    };
+
+    let use_serde = match serde_path {
+        Some(path) => quote! {
+            use #path as _serde;
+        },
+        None => quote! {
+            #[allow(unused_extern_crates, clippy::useless_attribute)]
+            extern crate serde as _serde;
+        },
+    };
+
+    quote! {
+        #[doc(hidden)]
+        #[allow(non_upper_case_globals, unused_attributes, unused_qualifications)]
+        const #dummy_const: () = {
+            #use_serde
+            #try_replacement
+            #code
+        };
+    }
+}
+
+fn unraw(ident: &Ident) -> String {
+    ident.to_string().trim_start_matches("r#").to_owned()
+}
diff --git a/crates/serde_derive/src/fragment.rs b/crates/serde_derive/src/fragment.rs
new file mode 100644
index 0000000..324504a
--- /dev/null
+++ b/crates/serde_derive/src/fragment.rs
@@ -0,0 +1,74 @@
+use proc_macro2::TokenStream;
+use quote::ToTokens;
+use syn::token;
+
+pub enum Fragment {
+    /// Tokens that can be used as an expression.
+    Expr(TokenStream),
+    /// Tokens that can be used inside a block. The surrounding curly braces are
+    /// not part of these tokens.
+    Block(TokenStream),
+}
+
+macro_rules! quote_expr {
+    ($($tt:tt)*) => {
+        $crate::fragment::Fragment::Expr(quote!($($tt)*))
+    }
+}
+
+macro_rules! quote_block {
+    ($($tt:tt)*) => {
+        $crate::fragment::Fragment::Block(quote!($($tt)*))
+    }
+}
+
+/// Interpolate a fragment in place of an expression. This involves surrounding
+/// Block fragments in curly braces.
+pub struct Expr(pub Fragment);
+impl ToTokens for Expr {
+    fn to_tokens(&self, out: &mut TokenStream) {
+        match &self.0 {
+            Fragment::Expr(expr) => expr.to_tokens(out),
+            Fragment::Block(block) => {
+                token::Brace::default().surround(out, |out| block.to_tokens(out));
+            }
+        }
+    }
+}
+
+/// Interpolate a fragment as the statements of a block.
+pub struct Stmts(pub Fragment);
+impl ToTokens for Stmts {
+    fn to_tokens(&self, out: &mut TokenStream) {
+        match &self.0 {
+            Fragment::Expr(expr) => expr.to_tokens(out),
+            Fragment::Block(block) => block.to_tokens(out),
+        }
+    }
+}
+
+/// Interpolate a fragment as the value part of a `match` expression. This
+/// involves putting a comma after expressions and curly braces around blocks.
+pub struct Match(pub Fragment);
+impl ToTokens for Match {
+    fn to_tokens(&self, out: &mut TokenStream) {
+        match &self.0 {
+            Fragment::Expr(expr) => {
+                expr.to_tokens(out);
+                <Token![,]>::default().to_tokens(out);
+            }
+            Fragment::Block(block) => {
+                token::Brace::default().surround(out, |out| block.to_tokens(out));
+            }
+        }
+    }
+}
+
+impl AsRef<TokenStream> for Fragment {
+    fn as_ref(&self) -> &TokenStream {
+        match self {
+            Fragment::Expr(expr) => expr,
+            Fragment::Block(block) => block,
+        }
+    }
+}
diff --git a/crates/serde_derive/src/internals/ast.rs b/crates/serde_derive/src/internals/ast.rs
new file mode 100644
index 0000000..2a6950b
--- /dev/null
+++ b/crates/serde_derive/src/internals/ast.rs
@@ -0,0 +1,202 @@
+//! A Serde ast, parsed from the Syn ast and ready to generate Rust code.
+
+use internals::attr;
+use internals::check;
+use internals::{Ctxt, Derive};
+use syn;
+use syn::punctuated::Punctuated;
+
+/// A source data structure annotated with `#[derive(Serialize)]` and/or `#[derive(Deserialize)]`,
+/// parsed into an internal representation.
+pub struct Container<'a> {
+    /// The struct or enum name (without generics).
+    pub ident: syn::Ident,
+    /// Attributes on the structure, parsed for Serde.
+    pub attrs: attr::Container,
+    /// The contents of the struct or enum.
+    pub data: Data<'a>,
+    /// Any generics on the struct or enum.
+    pub generics: &'a syn::Generics,
+    /// Original input.
+    pub original: &'a syn::DeriveInput,
+}
+
+/// The fields of a struct or enum.
+///
+/// Analogous to `syn::Data`.
+pub enum Data<'a> {
+    Enum(Vec<Variant<'a>>),
+    Struct(Style, Vec<Field<'a>>),
+}
+
+/// A variant of an enum.
+pub struct Variant<'a> {
+    pub ident: syn::Ident,
+    pub attrs: attr::Variant,
+    pub style: Style,
+    pub fields: Vec<Field<'a>>,
+    pub original: &'a syn::Variant,
+}
+
+/// A field of a struct.
+pub struct Field<'a> {
+    pub member: syn::Member,
+    pub attrs: attr::Field,
+    pub ty: &'a syn::Type,
+    pub original: &'a syn::Field,
+}
+
+#[derive(Copy, Clone)]
+pub enum Style {
+    /// Named fields.
+    Struct,
+    /// Many unnamed fields.
+    Tuple,
+    /// One unnamed field.
+    Newtype,
+    /// No fields.
+    Unit,
+}
+
+impl<'a> Container<'a> {
+    /// Convert the raw Syn ast into a parsed container object, collecting errors in `cx`.
+    pub fn from_ast(
+        cx: &Ctxt,
+        item: &'a syn::DeriveInput,
+        derive: Derive,
+    ) -> Option<Container<'a>> {
+        let mut attrs = attr::Container::from_ast(cx, item);
+
+        let mut data = match &item.data {
+            syn::Data::Enum(data) => Data::Enum(enum_from_ast(cx, &data.variants, attrs.default())),
+            syn::Data::Struct(data) => {
+                let (style, fields) = struct_from_ast(cx, &data.fields, None, attrs.default());
+                Data::Struct(style, fields)
+            }
+            syn::Data::Union(_) => {
+                cx.error_spanned_by(item, "Serde does not support derive for unions");
+                return None;
+            }
+        };
+
+        let mut has_flatten = false;
+        match &mut data {
+            Data::Enum(variants) => {
+                for variant in variants {
+                    variant.attrs.rename_by_rules(attrs.rename_all_rules());
+                    for field in &mut variant.fields {
+                        if field.attrs.flatten() {
+                            has_flatten = true;
+                        }
+                        field
+                            .attrs
+                            .rename_by_rules(variant.attrs.rename_all_rules());
+                    }
+                }
+            }
+            Data::Struct(_, fields) => {
+                for field in fields {
+                    if field.attrs.flatten() {
+                        has_flatten = true;
+                    }
+                    field.attrs.rename_by_rules(attrs.rename_all_rules());
+                }
+            }
+        }
+
+        if has_flatten {
+            attrs.mark_has_flatten();
+        }
+
+        let mut item = Container {
+            ident: item.ident.clone(),
+            attrs,
+            data,
+            generics: &item.generics,
+            original: item,
+        };
+        check::check(cx, &mut item, derive);
+        Some(item)
+    }
+}
+
+impl<'a> Data<'a> {
+    pub fn all_fields(&'a self) -> Box<Iterator<Item = &'a Field<'a>> + 'a> {
+        match self {
+            Data::Enum(variants) => {
+                Box::new(variants.iter().flat_map(|variant| variant.fields.iter()))
+            }
+            Data::Struct(_, fields) => Box::new(fields.iter()),
+        }
+    }
+
+    pub fn has_getter(&self) -> bool {
+        self.all_fields().any(|f| f.attrs.getter().is_some())
+    }
+}
+
+fn enum_from_ast<'a>(
+    cx: &Ctxt,
+    variants: &'a Punctuated<syn::Variant, Token![,]>,
+    container_default: &attr::Default,
+) -> Vec<Variant<'a>> {
+    variants
+        .iter()
+        .map(|variant| {
+            let attrs = attr::Variant::from_ast(cx, variant);
+            let (style, fields) =
+                struct_from_ast(cx, &variant.fields, Some(&attrs), container_default);
+            Variant {
+                ident: variant.ident.clone(),
+                attrs,
+                style,
+                fields,
+                original: variant,
+            }
+        })
+        .collect()
+}
+
+fn struct_from_ast<'a>(
+    cx: &Ctxt,
+    fields: &'a syn::Fields,
+    attrs: Option<&attr::Variant>,
+    container_default: &attr::Default,
+) -> (Style, Vec<Field<'a>>) {
+    match fields {
+        syn::Fields::Named(fields) => (
+            Style::Struct,
+            fields_from_ast(cx, &fields.named, attrs, container_default),
+        ),
+        syn::Fields::Unnamed(fields) if fields.unnamed.len() == 1 => (
+            Style::Newtype,
+            fields_from_ast(cx, &fields.unnamed, attrs, container_default),
+        ),
+        syn::Fields::Unnamed(fields) => (
+            Style::Tuple,
+            fields_from_ast(cx, &fields.unnamed, attrs, container_default),
+        ),
+        syn::Fields::Unit => (Style::Unit, Vec::new()),
+    }
+}
+
+fn fields_from_ast<'a>(
+    cx: &Ctxt,
+    fields: &'a Punctuated<syn::Field, Token![,]>,
+    attrs: Option<&attr::Variant>,
+    container_default: &attr::Default,
+) -> Vec<Field<'a>> {
+    fields
+        .iter()
+        .enumerate()
+        .map(|(i, field)| Field {
+            member: match &field.ident {
+                Some(ident) => syn::Member::Named(ident.clone()),
+                None => syn::Member::Unnamed(i.into()),
+            },
+            attrs: attr::Field::from_ast(cx, i, field, attrs, container_default),
+            ty: &field.ty,
+            original: field,
+        })
+        .collect()
+}
diff --git a/crates/serde_derive/src/internals/attr.rs b/crates/serde_derive/src/internals/attr.rs
new file mode 100644
index 0000000..9875b66
--- /dev/null
+++ b/crates/serde_derive/src/internals/attr.rs
@@ -0,0 +1,1778 @@
+use internals::symbol::*;
+use internals::{ungroup, Ctxt};
+use proc_macro2::{Spacing, Span, TokenStream, TokenTree};
+use quote::ToTokens;
+use std::borrow::Cow;
+use std::collections::BTreeSet;
+use std::iter::FromIterator;
+use syn;
+use syn::meta::ParseNestedMeta;
+use syn::parse::ParseStream;
+use syn::punctuated::Punctuated;
+use syn::{token, Ident, Lifetime};
+
+// This module handles parsing of `#[serde(...)]` attributes. The entrypoints
+// are `attr::Container::from_ast`, `attr::Variant::from_ast`, and
+// `attr::Field::from_ast`. Each returns an instance of the corresponding
+// struct. Note that none of them return a Result. Unrecognized, malformed, or
+// duplicated attributes result in a span_err but otherwise are ignored. The
+// user will see errors simultaneously for all bad attributes in the crate
+// rather than just the first.
+
+pub use internals::case::RenameRule;
+
+struct Attr<'c, T> {
+    cx: &'c Ctxt,
+    name: Symbol,
+    tokens: TokenStream,
+    value: Option<T>,
+}
+
+impl<'c, T> Attr<'c, T> {
+    fn none(cx: &'c Ctxt, name: Symbol) -> Self {
+        Attr {
+            cx,
+            name,
+            tokens: TokenStream::new(),
+            value: None,
+        }
+    }
+
+    fn set<A: ToTokens>(&mut self, obj: A, value: T) {
+        let tokens = obj.into_token_stream();
+
+        if self.value.is_some() {
+            let msg = format!("duplicate serde attribute `{}`", self.name);
+            self.cx.error_spanned_by(tokens, msg);
+        } else {
+            self.tokens = tokens;
+            self.value = Some(value);
+        }
+    }
+
+    fn set_opt<A: ToTokens>(&mut self, obj: A, value: Option<T>) {
+        if let Some(value) = value {
+            self.set(obj, value);
+        }
+    }
+
+    fn set_if_none(&mut self, value: T) {
+        if self.value.is_none() {
+            self.value = Some(value);
+        }
+    }
+
+    fn get(self) -> Option<T> {
+        self.value
+    }
+
+    fn get_with_tokens(self) -> Option<(TokenStream, T)> {
+        match self.value {
+            Some(v) => Some((self.tokens, v)),
+            None => None,
+        }
+    }
+}
+
+struct BoolAttr<'c>(Attr<'c, ()>);
+
+impl<'c> BoolAttr<'c> {
+    fn none(cx: &'c Ctxt, name: Symbol) -> Self {
+        BoolAttr(Attr::none(cx, name))
+    }
+
+    fn set_true<A: ToTokens>(&mut self, obj: A) {
+        self.0.set(obj, ());
+    }
+
+    fn get(&self) -> bool {
+        self.0.value.is_some()
+    }
+}
+
+struct VecAttr<'c, T> {
+    cx: &'c Ctxt,
+    name: Symbol,
+    first_dup_tokens: TokenStream,
+    values: Vec<T>,
+}
+
+impl<'c, T> VecAttr<'c, T> {
+    fn none(cx: &'c Ctxt, name: Symbol) -> Self {
+        VecAttr {
+            cx,
+            name,
+            first_dup_tokens: TokenStream::new(),
+            values: Vec::new(),
+        }
+    }
+
+    fn insert<A: ToTokens>(&mut self, obj: A, value: T) {
+        if self.values.len() == 1 {
+            self.first_dup_tokens = obj.into_token_stream();
+        }
+        self.values.push(value);
+    }
+
+    fn at_most_one(mut self) -> Option<T> {
+        if self.values.len() > 1 {
+            let dup_token = self.first_dup_tokens;
+            let msg = format!("duplicate serde attribute `{}`", self.name);
+            self.cx.error_spanned_by(dup_token, msg);
+            None
+        } else {
+            self.values.pop()
+        }
+    }
+
+    fn get(self) -> Vec<T> {
+        self.values
+    }
+}
+
+pub struct Name {
+    serialize: String,
+    serialize_renamed: bool,
+    deserialize: String,
+    deserialize_renamed: bool,
+    deserialize_aliases: Vec<String>,
+}
+
+fn unraw(ident: &Ident) -> String {
+    ident.to_string().trim_start_matches("r#").to_owned()
+}
+
+impl Name {
+    fn from_attrs(
+        source_name: String,
+        ser_name: Attr<String>,
+        de_name: Attr<String>,
+        de_aliases: Option<VecAttr<String>>,
+    ) -> Name {
+        let deserialize_aliases = match de_aliases {
+            Some(de_aliases) => {
+                let mut alias_list = BTreeSet::new();
+                for alias_name in de_aliases.get() {
+                    alias_list.insert(alias_name);
+                }
+                alias_list.into_iter().collect()
+            }
+            None => Vec::new(),
+        };
+
+        let ser_name = ser_name.get();
+        let ser_renamed = ser_name.is_some();
+        let de_name = de_name.get();
+        let de_renamed = de_name.is_some();
+        Name {
+            serialize: ser_name.unwrap_or_else(|| source_name.clone()),
+            serialize_renamed: ser_renamed,
+            deserialize: de_name.unwrap_or(source_name),
+            deserialize_renamed: de_renamed,
+            deserialize_aliases,
+        }
+    }
+
+    /// Return the container name for the container when serializing.
+    pub fn serialize_name(&self) -> String {
+        self.serialize.clone()
+    }
+
+    /// Return the container name for the container when deserializing.
+    pub fn deserialize_name(&self) -> String {
+        self.deserialize.clone()
+    }
+
+    fn deserialize_aliases(&self) -> Vec<String> {
+        let mut aliases = self.deserialize_aliases.clone();
+        let main_name = self.deserialize_name();
+        if !aliases.contains(&main_name) {
+            aliases.push(main_name);
+        }
+        aliases
+    }
+}
+
+pub struct RenameAllRules {
+    serialize: RenameRule,
+    deserialize: RenameRule,
+}
+
+/// Represents struct or enum attribute information.
+pub struct Container {
+    name: Name,
+    transparent: bool,
+    deny_unknown_fields: bool,
+    default: Default,
+    rename_all_rules: RenameAllRules,
+    ser_bound: Option<Vec<syn::WherePredicate>>,
+    de_bound: Option<Vec<syn::WherePredicate>>,
+    tag: TagType,
+    type_from: Option<syn::Type>,
+    type_try_from: Option<syn::Type>,
+    type_into: Option<syn::Type>,
+    remote: Option<syn::Path>,
+    identifier: Identifier,
+    has_flatten: bool,
+    serde_path: Option<syn::Path>,
+    is_packed: bool,
+    /// Error message generated when type can't be deserialized
+    expecting: Option<String>,
+}
+
+/// Styles of representing an enum.
+pub enum TagType {
+    /// The default.
+    ///
+    /// ```json
+    /// {"variant1": {"key1": "value1", "key2": "value2"}}
+    /// ```
+    External,
+
+    /// `#[serde(tag = "type")]`
+    ///
+    /// ```json
+    /// {"type": "variant1", "key1": "value1", "key2": "value2"}
+    /// ```
+    Internal { tag: String },
+
+    /// `#[serde(tag = "t", content = "c")]`
+    ///
+    /// ```json
+    /// {"t": "variant1", "c": {"key1": "value1", "key2": "value2"}}
+    /// ```
+    Adjacent { tag: String, content: String },
+
+    /// `#[serde(untagged)]`
+    ///
+    /// ```json
+    /// {"key1": "value1", "key2": "value2"}
+    /// ```
+    None,
+}
+
+/// Whether this enum represents the fields of a struct or the variants of an
+/// enum.
+#[derive(Copy, Clone)]
+pub enum Identifier {
+    /// It does not.
+    No,
+
+    /// This enum represents the fields of a struct. All of the variants must be
+    /// unit variants, except possibly one which is annotated with
+    /// `#[serde(other)]` and is a newtype variant.
+    Field,
+
+    /// This enum represents the variants of an enum. All of the variants must
+    /// be unit variants.
+    Variant,
+}
+
+impl Identifier {
+    #[cfg(feature = "deserialize_in_place")]
+    pub fn is_some(self) -> bool {
+        match self {
+            Identifier::No => false,
+            Identifier::Field | Identifier::Variant => true,
+        }
+    }
+}
+
+impl Container {
+    /// Extract out the `#[serde(...)]` attributes from an item.
+    pub fn from_ast(cx: &Ctxt, item: &syn::DeriveInput) -> Self {
+        let mut ser_name = Attr::none(cx, RENAME);
+        let mut de_name = Attr::none(cx, RENAME);
+        let mut transparent = BoolAttr::none(cx, TRANSPARENT);
+        let mut deny_unknown_fields = BoolAttr::none(cx, DENY_UNKNOWN_FIELDS);
+        let mut default = Attr::none(cx, DEFAULT);
+        let mut rename_all_ser_rule = Attr::none(cx, RENAME_ALL);
+        let mut rename_all_de_rule = Attr::none(cx, RENAME_ALL);
+        let mut ser_bound = Attr::none(cx, BOUND);
+        let mut de_bound = Attr::none(cx, BOUND);
+        let mut untagged = BoolAttr::none(cx, UNTAGGED);
+        let mut internal_tag = Attr::none(cx, TAG);
+        let mut content = Attr::none(cx, CONTENT);
+        let mut type_from = Attr::none(cx, FROM);
+        let mut type_try_from = Attr::none(cx, TRY_FROM);
+        let mut type_into = Attr::none(cx, INTO);
+        let mut remote = Attr::none(cx, REMOTE);
+        let mut field_identifier = BoolAttr::none(cx, FIELD_IDENTIFIER);
+        let mut variant_identifier = BoolAttr::none(cx, VARIANT_IDENTIFIER);
+        let mut serde_path = Attr::none(cx, CRATE);
+        let mut expecting = Attr::none(cx, EXPECTING);
+
+        for attr in &item.attrs {
+            if attr.path() != SERDE {
+                continue;
+            }
+
+            if let Err(err) = attr.parse_nested_meta(|meta| {
+                if meta.path == RENAME {
+                    // #[serde(rename = "foo")]
+                    // #[serde(rename(serialize = "foo", deserialize = "bar"))]
+                    let (ser, de) = get_renames(cx, RENAME, &meta)?;
+                    ser_name.set_opt(&meta.path, ser.as_ref().map(syn::LitStr::value));
+                    de_name.set_opt(&meta.path, de.as_ref().map(syn::LitStr::value));
+                } else if meta.path == RENAME_ALL {
+                    // #[serde(rename_all = "foo")]
+                    // #[serde(rename_all(serialize = "foo", deserialize = "bar"))]
+                    let one_name = meta.input.peek(Token![=]);
+                    let (ser, de) = get_renames(cx, RENAME_ALL, &meta)?;
+                    if let Some(ser) = ser {
+                        match RenameRule::from_str(&ser.value()) {
+                            Ok(rename_rule) => rename_all_ser_rule.set(&meta.path, rename_rule),
+                            Err(err) => cx.error_spanned_by(ser, err),
+                        }
+                    }
+                    if let Some(de) = de {
+                        match RenameRule::from_str(&de.value()) {
+                            Ok(rename_rule) => rename_all_de_rule.set(&meta.path, rename_rule),
+                            Err(err) => {
+                                if !one_name {
+                                    cx.error_spanned_by(de, err);
+                                }
+                            }
+                        }
+                    }
+                } else if meta.path == TRANSPARENT {
+                    // #[serde(transparent)]
+                    transparent.set_true(meta.path);
+                } else if meta.path == DENY_UNKNOWN_FIELDS {
+                    // #[serde(deny_unknown_fields)]
+                    deny_unknown_fields.set_true(meta.path);
+                } else if meta.path == DEFAULT {
+                    if meta.input.peek(Token![=]) {
+                        // #[serde(default = "...")]
+                        if let Some(path) = parse_lit_into_expr_path(cx, DEFAULT, &meta)? {
+                            match &item.data {
+                                syn::Data::Struct(syn::DataStruct { fields, .. }) => match fields {
+                                    syn::Fields::Named(_) => {
+                                        default.set(&meta.path, Default::Path(path));
+                                    }
+                                    syn::Fields::Unnamed(_) | syn::Fields::Unit => {
+                                        let msg = "#[serde(default = \"...\")] can only be used on structs with named fields";
+                                        cx.error_spanned_by(fields, msg);
+                                    }
+                                },
+                                syn::Data::Enum(syn::DataEnum { enum_token, .. }) => {
+                                    let msg = "#[serde(default = \"...\")] can only be used on structs with named fields";
+                                    cx.error_spanned_by(enum_token, msg);
+                                }
+                                syn::Data::Union(syn::DataUnion { union_token, .. }) => {
+                                    let msg = "#[serde(default = \"...\")] can only be used on structs with named fields";
+                                    cx.error_spanned_by(union_token, msg);
+                                }
+                            }
+                        }
+                    } else {
+                        // #[serde(default)]
+                        match &item.data {
+                            syn::Data::Struct(syn::DataStruct { fields, .. }) => match fields {
+                                syn::Fields::Named(_) => {
+                                    default.set(meta.path, Default::Default);
+                                }
+                                syn::Fields::Unnamed(_) | syn::Fields::Unit => {
+                                    let msg = "#[serde(default)] can only be used on structs with named fields";
+                                    cx.error_spanned_by(fields, msg);
+                                }
+                            },
+                            syn::Data::Enum(syn::DataEnum { enum_token, .. }) => {
+                                let msg = "#[serde(default)] can only be used on structs with named fields";
+                                cx.error_spanned_by(enum_token, msg);
+                            }
+                            syn::Data::Union(syn::DataUnion { union_token, .. }) => {
+                                let msg = "#[serde(default)] can only be used on structs with named fields";
+                                cx.error_spanned_by(union_token, msg);
+                            }
+                        }
+                    }
+                } else if meta.path == BOUND {
+                    // #[serde(bound = "T: SomeBound")]
+                    // #[serde(bound(serialize = "...", deserialize = "..."))]
+                    let (ser, de) = get_where_predicates(cx, &meta)?;
+                    ser_bound.set_opt(&meta.path, ser);
+                    de_bound.set_opt(&meta.path, de);
+                } else if meta.path == UNTAGGED {
+                    // #[serde(untagged)]
+                    match item.data {
+                        syn::Data::Enum(_) => {
+                            untagged.set_true(&meta.path);
+                        }
+                        syn::Data::Struct(syn::DataStruct { struct_token, .. }) => {
+                            let msg = "#[serde(untagged)] can only be used on enums";
+                            cx.error_spanned_by(struct_token, msg);
+                        }
+                        syn::Data::Union(syn::DataUnion { union_token, .. }) => {
+                            let msg = "#[serde(untagged)] can only be used on enums";
+                            cx.error_spanned_by(union_token, msg);
+                        }
+                    }
+                } else if meta.path == TAG {
+                    // #[serde(tag = "type")]
+                    if let Some(s) = get_lit_str(cx, TAG, &meta)? {
+                        match &item.data {
+                            syn::Data::Enum(_) => {
+                                internal_tag.set(&meta.path, s.value());
+                            }
+                            syn::Data::Struct(syn::DataStruct { fields, .. }) => match fields {
+                                syn::Fields::Named(_) => {
+                                    internal_tag.set(&meta.path, s.value());
+                                }
+                                syn::Fields::Unnamed(_) | syn::Fields::Unit => {
+                                    let msg = "#[serde(tag = \"...\")] can only be used on enums and structs with named fields";
+                                    cx.error_spanned_by(fields, msg);
+                                }
+                            },
+                            syn::Data::Union(syn::DataUnion { union_token, .. }) => {
+                                let msg = "#[serde(tag = \"...\")] can only be used on enums and structs with named fields";
+                                cx.error_spanned_by(union_token, msg);
+                            }
+                        }
+                    }
+                } else if meta.path == CONTENT {
+                    // #[serde(content = "c")]
+                    if let Some(s) = get_lit_str(cx, CONTENT, &meta)? {
+                        match &item.data {
+                            syn::Data::Enum(_) => {
+                                content.set(&meta.path, s.value());
+                            }
+                            syn::Data::Struct(syn::DataStruct { struct_token, .. }) => {
+                                let msg = "#[serde(content = \"...\")] can only be used on enums";
+                                cx.error_spanned_by(struct_token, msg);
+                            }
+                            syn::Data::Union(syn::DataUnion { union_token, .. }) => {
+                                let msg = "#[serde(content = \"...\")] can only be used on enums";
+                                cx.error_spanned_by(union_token, msg);
+                            }
+                        }
+                    }
+                } else if meta.path == FROM {
+                    // #[serde(from = "Type")]
+                    if let Some(from_ty) = parse_lit_into_ty(cx, FROM, &meta)? {
+                        type_from.set_opt(&meta.path, Some(from_ty));
+                    }
+                } else if meta.path == TRY_FROM {
+                    // #[serde(try_from = "Type")]
+                    if let Some(try_from_ty) = parse_lit_into_ty(cx, TRY_FROM, &meta)? {
+                        type_try_from.set_opt(&meta.path, Some(try_from_ty));
+                    }
+                } else if meta.path == INTO {
+                    // #[serde(into = "Type")]
+                    if let Some(into_ty) = parse_lit_into_ty(cx, INTO, &meta)? {
+                        type_into.set_opt(&meta.path, Some(into_ty));
+                    }
+                } else if meta.path == REMOTE {
+                    // #[serde(remote = "...")]
+                    if let Some(path) = parse_lit_into_path(cx, REMOTE, &meta)? {
+                        if is_primitive_path(&path, "Self") {
+                            remote.set(&meta.path, item.ident.clone().into());
+                        } else {
+                            remote.set(&meta.path, path);
+                        }
+                    }
+                } else if meta.path == FIELD_IDENTIFIER {
+                    // #[serde(field_identifier)]
+                    field_identifier.set_true(&meta.path);
+                } else if meta.path == VARIANT_IDENTIFIER {
+                    // #[serde(variant_identifier)]
+                    variant_identifier.set_true(&meta.path);
+                } else if meta.path == CRATE {
+                    // #[serde(crate = "foo")]
+                    if let Some(path) = parse_lit_into_path(cx, CRATE, &meta)? {
+                        serde_path.set(&meta.path, path);
+                    }
+                } else if meta.path == EXPECTING {
+                    // #[serde(expecting = "a message")]
+                    if let Some(s) = get_lit_str(cx, EXPECTING, &meta)? {
+                        expecting.set(&meta.path, s.value());
+                    }
+                } else {
+                    let path = meta.path.to_token_stream().to_string().replace(' ', "");
+                    return Err(
+                        meta.error(format_args!("unknown serde container attribute `{}`", path))
+                    );
+                }
+                Ok(())
+            }) {
+                cx.syn_error(err);
+            }
+        }
+
+        let mut is_packed = false;
+        for attr in &item.attrs {
+            if attr.path() == REPR {
+                let _ = attr.parse_args_with(|input: ParseStream| {
+                    while let Some(token) = input.parse()? {
+                        if let TokenTree::Ident(ident) = token {
+                            is_packed |= ident == "packed";
+                        }
+                    }
+                    Ok(())
+                });
+            }
+        }
+
+        Container {
+            name: Name::from_attrs(unraw(&item.ident), ser_name, de_name, None),
+            transparent: transparent.get(),
+            deny_unknown_fields: deny_unknown_fields.get(),
+            default: default.get().unwrap_or(Default::None),
+            rename_all_rules: RenameAllRules {
+                serialize: rename_all_ser_rule.get().unwrap_or(RenameRule::None),
+                deserialize: rename_all_de_rule.get().unwrap_or(RenameRule::None),
+            },
+            ser_bound: ser_bound.get(),
+            de_bound: de_bound.get(),
+            tag: decide_tag(cx, item, untagged, internal_tag, content),
+            type_from: type_from.get(),
+            type_try_from: type_try_from.get(),
+            type_into: type_into.get(),
+            remote: remote.get(),
+            identifier: decide_identifier(cx, item, field_identifier, variant_identifier),
+            has_flatten: false,
+            serde_path: serde_path.get(),
+            is_packed,
+            expecting: expecting.get(),
+        }
+    }
+
+    pub fn name(&self) -> &Name {
+        &self.name
+    }
+
+    pub fn rename_all_rules(&self) -> &RenameAllRules {
+        &self.rename_all_rules
+    }
+
+    pub fn transparent(&self) -> bool {
+        self.transparent
+    }
+
+    pub fn deny_unknown_fields(&self) -> bool {
+        self.deny_unknown_fields
+    }
+
+    pub fn default(&self) -> &Default {
+        &self.default
+    }
+
+    pub fn ser_bound(&self) -> Option<&[syn::WherePredicate]> {
+        self.ser_bound.as_ref().map(|vec| &vec[..])
+    }
+
+    pub fn de_bound(&self) -> Option<&[syn::WherePredicate]> {
+        self.de_bound.as_ref().map(|vec| &vec[..])
+    }
+
+    pub fn tag(&self) -> &TagType {
+        &self.tag
+    }
+
+    pub fn type_from(&self) -> Option<&syn::Type> {
+        self.type_from.as_ref()
+    }
+
+    pub fn type_try_from(&self) -> Option<&syn::Type> {
+        self.type_try_from.as_ref()
+    }
+
+    pub fn type_into(&self) -> Option<&syn::Type> {
+        self.type_into.as_ref()
+    }
+
+    pub fn remote(&self) -> Option<&syn::Path> {
+        self.remote.as_ref()
+    }
+
+    pub fn is_packed(&self) -> bool {
+        self.is_packed
+    }
+
+    pub fn identifier(&self) -> Identifier {
+        self.identifier
+    }
+
+    pub fn has_flatten(&self) -> bool {
+        self.has_flatten
+    }
+
+    pub fn mark_has_flatten(&mut self) {
+        self.has_flatten = true;
+    }
+
+    pub fn custom_serde_path(&self) -> Option<&syn::Path> {
+        self.serde_path.as_ref()
+    }
+
+    pub fn serde_path(&self) -> Cow<syn::Path> {
+        self.custom_serde_path()
+            .map_or_else(|| Cow::Owned(parse_quote!(_serde)), Cow::Borrowed)
+    }
+
+    /// Error message generated when type can't be deserialized.
+    /// If `None`, default message will be used
+    pub fn expecting(&self) -> Option<&str> {
+        self.expecting.as_ref().map(String::as_ref)
+    }
+}
+
+fn decide_tag(
+    cx: &Ctxt,
+    item: &syn::DeriveInput,
+    untagged: BoolAttr,
+    internal_tag: Attr<String>,
+    content: Attr<String>,
+) -> TagType {
+    match (
+        untagged.0.get_with_tokens(),
+        internal_tag.get_with_tokens(),
+        content.get_with_tokens(),
+    ) {
+        (None, None, None) => TagType::External,
+        (Some(_), None, None) => TagType::None,
+        (None, Some((_, tag)), None) => {
+            // Check that there are no tuple variants.
+            if let syn::Data::Enum(data) = &item.data {
+                for variant in &data.variants {
+                    match &variant.fields {
+                        syn::Fields::Named(_) | syn::Fields::Unit => {}
+                        syn::Fields::Unnamed(fields) => {
+                            if fields.unnamed.len() != 1 {
+                                let msg =
+                                    "#[serde(tag = \"...\")] cannot be used with tuple variants";
+                                cx.error_spanned_by(variant, msg);
+                                break;
+                            }
+                        }
+                    }
+                }
+            }
+            TagType::Internal { tag }
+        }
+        (Some((untagged_tokens, _)), Some((tag_tokens, _)), None) => {
+            let msg = "enum cannot be both untagged and internally tagged";
+            cx.error_spanned_by(untagged_tokens, msg);
+            cx.error_spanned_by(tag_tokens, msg);
+            TagType::External // doesn't matter, will error
+        }
+        (None, None, Some((content_tokens, _))) => {
+            let msg = "#[serde(tag = \"...\", content = \"...\")] must be used together";
+            cx.error_spanned_by(content_tokens, msg);
+            TagType::External
+        }
+        (Some((untagged_tokens, _)), None, Some((content_tokens, _))) => {
+            let msg = "untagged enum cannot have #[serde(content = \"...\")]";
+            cx.error_spanned_by(untagged_tokens, msg);
+            cx.error_spanned_by(content_tokens, msg);
+            TagType::External
+        }
+        (None, Some((_, tag)), Some((_, content))) => TagType::Adjacent { tag, content },
+        (Some((untagged_tokens, _)), Some((tag_tokens, _)), Some((content_tokens, _))) => {
+            let msg = "untagged enum cannot have #[serde(tag = \"...\", content = \"...\")]";
+            cx.error_spanned_by(untagged_tokens, msg);
+            cx.error_spanned_by(tag_tokens, msg);
+            cx.error_spanned_by(content_tokens, msg);
+            TagType::External
+        }
+    }
+}
+
+fn decide_identifier(
+    cx: &Ctxt,
+    item: &syn::DeriveInput,
+    field_identifier: BoolAttr,
+    variant_identifier: BoolAttr,
+) -> Identifier {
+    match (
+        &item.data,
+        field_identifier.0.get_with_tokens(),
+        variant_identifier.0.get_with_tokens(),
+    ) {
+        (_, None, None) => Identifier::No,
+        (_, Some((field_identifier_tokens, _)), Some((variant_identifier_tokens, _))) => {
+            let msg =
+                "#[serde(field_identifier)] and #[serde(variant_identifier)] cannot both be set";
+            cx.error_spanned_by(field_identifier_tokens, msg);
+            cx.error_spanned_by(variant_identifier_tokens, msg);
+            Identifier::No
+        }
+        (syn::Data::Enum(_), Some(_), None) => Identifier::Field,
+        (syn::Data::Enum(_), None, Some(_)) => Identifier::Variant,
+        (syn::Data::Struct(syn::DataStruct { struct_token, .. }), Some(_), None) => {
+            let msg = "#[serde(field_identifier)] can only be used on an enum";
+            cx.error_spanned_by(struct_token, msg);
+            Identifier::No
+        }
+        (syn::Data::Union(syn::DataUnion { union_token, .. }), Some(_), None) => {
+            let msg = "#[serde(field_identifier)] can only be used on an enum";
+            cx.error_spanned_by(union_token, msg);
+            Identifier::No
+        }
+        (syn::Data::Struct(syn::DataStruct { struct_token, .. }), None, Some(_)) => {
+            let msg = "#[serde(variant_identifier)] can only be used on an enum";
+            cx.error_spanned_by(struct_token, msg);
+            Identifier::No
+        }
+        (syn::Data::Union(syn::DataUnion { union_token, .. }), None, Some(_)) => {
+            let msg = "#[serde(variant_identifier)] can only be used on an enum";
+            cx.error_spanned_by(union_token, msg);
+            Identifier::No
+        }
+    }
+}
+
+/// Represents variant attribute information
+pub struct Variant {
+    name: Name,
+    rename_all_rules: RenameAllRules,
+    ser_bound: Option<Vec<syn::WherePredicate>>,
+    de_bound: Option<Vec<syn::WherePredicate>>,
+    skip_deserializing: bool,
+    skip_serializing: bool,
+    other: bool,
+    serialize_with: Option<syn::ExprPath>,
+    deserialize_with: Option<syn::ExprPath>,
+    borrow: Option<BorrowAttribute>,
+}
+
+struct BorrowAttribute {
+    path: syn::Path,
+    lifetimes: Option<BTreeSet<syn::Lifetime>>,
+}
+
+impl Variant {
+    pub fn from_ast(cx: &Ctxt, variant: &syn::Variant) -> Self {
+        let mut ser_name = Attr::none(cx, RENAME);
+        let mut de_name = Attr::none(cx, RENAME);
+        let mut de_aliases = VecAttr::none(cx, RENAME);
+        let mut skip_deserializing = BoolAttr::none(cx, SKIP_DESERIALIZING);
+        let mut skip_serializing = BoolAttr::none(cx, SKIP_SERIALIZING);
+        let mut rename_all_ser_rule = Attr::none(cx, RENAME_ALL);
+        let mut rename_all_de_rule = Attr::none(cx, RENAME_ALL);
+        let mut ser_bound = Attr::none(cx, BOUND);
+        let mut de_bound = Attr::none(cx, BOUND);
+        let mut other = BoolAttr::none(cx, OTHER);
+        let mut serialize_with = Attr::none(cx, SERIALIZE_WITH);
+        let mut deserialize_with = Attr::none(cx, DESERIALIZE_WITH);
+        let mut borrow = Attr::none(cx, BORROW);
+
+        for attr in &variant.attrs {
+            if attr.path() != SERDE {
+                continue;
+            }
+
+            if let Err(err) = attr.parse_nested_meta(|meta| {
+                if meta.path == RENAME {
+                    // #[serde(rename = "foo")]
+                    // #[serde(rename(serialize = "foo", deserialize = "bar"))]
+                    let (ser, de) = get_multiple_renames(cx, &meta)?;
+                    ser_name.set_opt(&meta.path, ser.as_ref().map(syn::LitStr::value));
+                    for de_value in de {
+                        de_name.set_if_none(de_value.value());
+                        de_aliases.insert(&meta.path, de_value.value());
+                    }
+                } else if meta.path == ALIAS {
+                    // #[serde(alias = "foo")]
+                    if let Some(s) = get_lit_str(cx, ALIAS, &meta)? {
+                        de_aliases.insert(&meta.path, s.value());
+                    }
+                } else if meta.path == RENAME_ALL {
+                    // #[serde(rename_all = "foo")]
+                    // #[serde(rename_all(serialize = "foo", deserialize = "bar"))]
+                    let one_name = meta.input.peek(Token![=]);
+                    let (ser, de) = get_renames(cx, RENAME_ALL, &meta)?;
+                    if let Some(ser) = ser {
+                        match RenameRule::from_str(&ser.value()) {
+                            Ok(rename_rule) => rename_all_ser_rule.set(&meta.path, rename_rule),
+                            Err(err) => cx.error_spanned_by(ser, err),
+                        }
+                    }
+                    if let Some(de) = de {
+                        match RenameRule::from_str(&de.value()) {
+                            Ok(rename_rule) => rename_all_de_rule.set(&meta.path, rename_rule),
+                            Err(err) => {
+                                if !one_name {
+                                    cx.error_spanned_by(de, err);
+                                }
+                            }
+                        }
+                    }
+                } else if meta.path == SKIP {
+                    // #[serde(skip)]
+                    skip_serializing.set_true(&meta.path);
+                    skip_deserializing.set_true(&meta.path);
+                } else if meta.path == SKIP_DESERIALIZING {
+                    // #[serde(skip_deserializing)]
+                    skip_deserializing.set_true(&meta.path);
+                } else if meta.path == SKIP_SERIALIZING {
+                    // #[serde(skip_serializing)]
+                    skip_serializing.set_true(&meta.path);
+                } else if meta.path == OTHER {
+                    // #[serde(other)]
+                    other.set_true(&meta.path);
+                } else if meta.path == BOUND {
+                    // #[serde(bound = "T: SomeBound")]
+                    // #[serde(bound(serialize = "...", deserialize = "..."))]
+                    let (ser, de) = get_where_predicates(cx, &meta)?;
+                    ser_bound.set_opt(&meta.path, ser);
+                    de_bound.set_opt(&meta.path, de);
+                } else if meta.path == WITH {
+                    // #[serde(with = "...")]
+                    if let Some(path) = parse_lit_into_expr_path(cx, WITH, &meta)? {
+                        let mut ser_path = path.clone();
+                        ser_path
+                            .path
+                            .segments
+                            .push(Ident::new("serialize", Span::call_site()).into());
+                        serialize_with.set(&meta.path, ser_path);
+                        let mut de_path = path;
+                        de_path
+                            .path
+                            .segments
+                            .push(Ident::new("deserialize", Span::call_site()).into());
+                        deserialize_with.set(&meta.path, de_path);
+                    }
+                } else if meta.path == SERIALIZE_WITH {
+                    // #[serde(serialize_with = "...")]
+                    if let Some(path) = parse_lit_into_expr_path(cx, SERIALIZE_WITH, &meta)? {
+                        serialize_with.set(&meta.path, path);
+                    }
+                } else if meta.path == DESERIALIZE_WITH {
+                    // #[serde(deserialize_with = "...")]
+                    if let Some(path) = parse_lit_into_expr_path(cx, DESERIALIZE_WITH, &meta)? {
+                        deserialize_with.set(&meta.path, path);
+                    }
+                } else if meta.path == BORROW {
+                    let borrow_attribute = if meta.input.peek(Token![=]) {
+                        // #[serde(borrow = "'a + 'b")]
+                        let lifetimes = parse_lit_into_lifetimes(cx, &meta)?;
+                        BorrowAttribute {
+                            path: meta.path.clone(),
+                            lifetimes: Some(lifetimes),
+                        }
+                    } else {
+                        // #[serde(borrow)]
+                        BorrowAttribute {
+                            path: meta.path.clone(),
+                            lifetimes: None,
+                        }
+                    };
+                    match &variant.fields {
+                        syn::Fields::Unnamed(fields) if fields.unnamed.len() == 1 => {
+                            borrow.set(&meta.path, borrow_attribute);
+                        }
+                        _ => {
+                            let msg = "#[serde(borrow)] may only be used on newtype variants";
+                            cx.error_spanned_by(variant, msg);
+                        }
+                    }
+                } else {
+                    let path = meta.path.to_token_stream().to_string().replace(' ', "");
+                    return Err(
+                        meta.error(format_args!("unknown serde variant attribute `{}`", path))
+                    );
+                }
+                Ok(())
+            }) {
+                cx.syn_error(err);
+            }
+        }
+
+        Variant {
+            name: Name::from_attrs(unraw(&variant.ident), ser_name, de_name, Some(de_aliases)),
+            rename_all_rules: RenameAllRules {
+                serialize: rename_all_ser_rule.get().unwrap_or(RenameRule::None),
+                deserialize: rename_all_de_rule.get().unwrap_or(RenameRule::None),
+            },
+            ser_bound: ser_bound.get(),
+            de_bound: de_bound.get(),
+            skip_deserializing: skip_deserializing.get(),
+            skip_serializing: skip_serializing.get(),
+            other: other.get(),
+            serialize_with: serialize_with.get(),
+            deserialize_with: deserialize_with.get(),
+            borrow: borrow.get(),
+        }
+    }
+
+    pub fn name(&self) -> &Name {
+        &self.name
+    }
+
+    pub fn aliases(&self) -> Vec<String> {
+        self.name.deserialize_aliases()
+    }
+
+    pub fn rename_by_rules(&mut self, rules: &RenameAllRules) {
+        if !self.name.serialize_renamed {
+            self.name.serialize = rules.serialize.apply_to_variant(&self.name.serialize);
+        }
+        if !self.name.deserialize_renamed {
+            self.name.deserialize = rules.deserialize.apply_to_variant(&self.name.deserialize);
+        }
+    }
+
+    pub fn rename_all_rules(&self) -> &RenameAllRules {
+        &self.rename_all_rules
+    }
+
+    pub fn ser_bound(&self) -> Option<&[syn::WherePredicate]> {
+        self.ser_bound.as_ref().map(|vec| &vec[..])
+    }
+
+    pub fn de_bound(&self) -> Option<&[syn::WherePredicate]> {
+        self.de_bound.as_ref().map(|vec| &vec[..])
+    }
+
+    pub fn skip_deserializing(&self) -> bool {
+        self.skip_deserializing
+    }
+
+    pub fn skip_serializing(&self) -> bool {
+        self.skip_serializing
+    }
+
+    pub fn other(&self) -> bool {
+        self.other
+    }
+
+    pub fn serialize_with(&self) -> Option<&syn::ExprPath> {
+        self.serialize_with.as_ref()
+    }
+
+    pub fn deserialize_with(&self) -> Option<&syn::ExprPath> {
+        self.deserialize_with.as_ref()
+    }
+}
+
+/// Represents field attribute information
+pub struct Field {
+    name: Name,
+    skip_serializing: bool,
+    skip_deserializing: bool,
+    skip_serializing_if: Option<syn::ExprPath>,
+    default: Default,
+    serialize_with: Option<syn::ExprPath>,
+    deserialize_with: Option<syn::ExprPath>,
+    ser_bound: Option<Vec<syn::WherePredicate>>,
+    de_bound: Option<Vec<syn::WherePredicate>>,
+    borrowed_lifetimes: BTreeSet<syn::Lifetime>,
+    getter: Option<syn::ExprPath>,
+    flatten: bool,
+    transparent: bool,
+}
+
+/// Represents the default to use for a field when deserializing.
+pub enum Default {
+    /// Field must always be specified because it does not have a default.
+    None,
+    /// The default is given by `std::default::Default::default()`.
+    Default,
+    /// The default is given by this function.
+    Path(syn::ExprPath),
+}
+
+impl Default {
+    pub fn is_none(&self) -> bool {
+        match self {
+            Default::None => true,
+            Default::Default | Default::Path(_) => false,
+        }
+    }
+}
+
+impl Field {
+    /// Extract out the `#[serde(...)]` attributes from a struct field.
+    pub fn from_ast(
+        cx: &Ctxt,
+        index: usize,
+        field: &syn::Field,
+        attrs: Option<&Variant>,
+        container_default: &Default,
+    ) -> Self {
+        let mut ser_name = Attr::none(cx, RENAME);
+        let mut de_name = Attr::none(cx, RENAME);
+        let mut de_aliases = VecAttr::none(cx, RENAME);
+        let mut skip_serializing = BoolAttr::none(cx, SKIP_SERIALIZING);
+        let mut skip_deserializing = BoolAttr::none(cx, SKIP_DESERIALIZING);
+        let mut skip_serializing_if = Attr::none(cx, SKIP_SERIALIZING_IF);
+        let mut default = Attr::none(cx, DEFAULT);
+        let mut serialize_with = Attr::none(cx, SERIALIZE_WITH);
+        let mut deserialize_with = Attr::none(cx, DESERIALIZE_WITH);
+        let mut ser_bound = Attr::none(cx, BOUND);
+        let mut de_bound = Attr::none(cx, BOUND);
+        let mut borrowed_lifetimes = Attr::none(cx, BORROW);
+        let mut getter = Attr::none(cx, GETTER);
+        let mut flatten = BoolAttr::none(cx, FLATTEN);
+
+        let ident = match &field.ident {
+            Some(ident) => unraw(ident),
+            None => index.to_string(),
+        };
+
+        if let Some(borrow_attribute) = attrs.and_then(|variant| variant.borrow.as_ref()) {
+            if let Ok(borrowable) = borrowable_lifetimes(cx, &ident, field) {
+                if let Some(lifetimes) = &borrow_attribute.lifetimes {
+                    for lifetime in lifetimes {
+                        if !borrowable.contains(lifetime) {
+                            let msg =
+                                format!("field `{}` does not have lifetime {}", ident, lifetime);
+                            cx.error_spanned_by(field, msg);
+                        }
+                    }
+                    borrowed_lifetimes.set(&borrow_attribute.path, lifetimes.clone());
+                } else {
+                    borrowed_lifetimes.set(&borrow_attribute.path, borrowable);
+                }
+            }
+        }
+
+        for attr in &field.attrs {
+            if attr.path() != SERDE {
+                continue;
+            }
+
+            if let Err(err) = attr.parse_nested_meta(|meta| {
+                if meta.path == RENAME {
+                    // #[serde(rename = "foo")]
+                    // #[serde(rename(serialize = "foo", deserialize = "bar"))]
+                    let (ser, de) = get_multiple_renames(cx, &meta)?;
+                    ser_name.set_opt(&meta.path, ser.as_ref().map(syn::LitStr::value));
+                    for de_value in de {
+                        de_name.set_if_none(de_value.value());
+                        de_aliases.insert(&meta.path, de_value.value());
+                    }
+                } else if meta.path == ALIAS {
+                    // #[serde(alias = "foo")]
+                    if let Some(s) = get_lit_str(cx, ALIAS, &meta)? {
+                        de_aliases.insert(&meta.path, s.value());
+                    }
+                } else if meta.path == DEFAULT {
+                    if meta.input.peek(Token![=]) {
+                        // #[serde(default = "...")]
+                        if let Some(path) = parse_lit_into_expr_path(cx, DEFAULT, &meta)? {
+                            default.set(&meta.path, Default::Path(path));
+                        }
+                    } else {
+                        // #[serde(default)]
+                        default.set(&meta.path, Default::Default);
+                    }
+                } else if meta.path == SKIP_SERIALIZING {
+                    // #[serde(skip_serializing)]
+                    skip_serializing.set_true(&meta.path);
+                } else if meta.path == SKIP_DESERIALIZING {
+                    // #[serde(skip_deserializing)]
+                    skip_deserializing.set_true(&meta.path);
+                } else if meta.path == SKIP {
+                    // #[serde(skip)]
+                    skip_serializing.set_true(&meta.path);
+                    skip_deserializing.set_true(&meta.path);
+                } else if meta.path == SKIP_SERIALIZING_IF {
+                    // #[serde(skip_serializing_if = "...")]
+                    if let Some(path) = parse_lit_into_expr_path(cx, SKIP_SERIALIZING_IF, &meta)? {
+                        skip_serializing_if.set(&meta.path, path);
+                    }
+                } else if meta.path == SERIALIZE_WITH {
+                    // #[serde(serialize_with = "...")]
+                    if let Some(path) = parse_lit_into_expr_path(cx, SERIALIZE_WITH, &meta)? {
+                        serialize_with.set(&meta.path, path);
+                    }
+                } else if meta.path == DESERIALIZE_WITH {
+                    // #[serde(deserialize_with = "...")]
+                    if let Some(path) = parse_lit_into_expr_path(cx, DESERIALIZE_WITH, &meta)? {
+                        deserialize_with.set(&meta.path, path);
+                    }
+                } else if meta.path == WITH {
+                    // #[serde(with = "...")]
+                    if let Some(path) = parse_lit_into_expr_path(cx, WITH, &meta)? {
+                        let mut ser_path = path.clone();
+                        ser_path
+                            .path
+                            .segments
+                            .push(Ident::new("serialize", Span::call_site()).into());
+                        serialize_with.set(&meta.path, ser_path);
+                        let mut de_path = path;
+                        de_path
+                            .path
+                            .segments
+                            .push(Ident::new("deserialize", Span::call_site()).into());
+                        deserialize_with.set(&meta.path, de_path);
+                    }
+                } else if meta.path == BOUND {
+                    // #[serde(bound = "T: SomeBound")]
+                    // #[serde(bound(serialize = "...", deserialize = "..."))]
+                    let (ser, de) = get_where_predicates(cx, &meta)?;
+                    ser_bound.set_opt(&meta.path, ser);
+                    de_bound.set_opt(&meta.path, de);
+                } else if meta.path == BORROW {
+                    if meta.input.peek(Token![=]) {
+                        // #[serde(borrow = "'a + 'b")]
+                        let lifetimes = parse_lit_into_lifetimes(cx, &meta)?;
+                        if let Ok(borrowable) = borrowable_lifetimes(cx, &ident, field) {
+                            for lifetime in &lifetimes {
+                                if !borrowable.contains(lifetime) {
+                                    let msg = format!(
+                                        "field `{}` does not have lifetime {}",
+                                        ident, lifetime,
+                                    );
+                                    cx.error_spanned_by(field, msg);
+                                }
+                            }
+                            borrowed_lifetimes.set(&meta.path, lifetimes);
+                        }
+                    } else {
+                        // #[serde(borrow)]
+                        if let Ok(borrowable) = borrowable_lifetimes(cx, &ident, field) {
+                            borrowed_lifetimes.set(&meta.path, borrowable);
+                        }
+                    }
+                } else if meta.path == GETTER {
+                    // #[serde(getter = "...")]
+                    if let Some(path) = parse_lit_into_expr_path(cx, GETTER, &meta)? {
+                        getter.set(&meta.path, path);
+                    }
+                } else if meta.path == FLATTEN {
+                    // #[serde(flatten)]
+                    flatten.set_true(&meta.path);
+                } else {
+                    let path = meta.path.to_token_stream().to_string().replace(' ', "");
+                    return Err(
+                        meta.error(format_args!("unknown serde field attribute `{}`", path))
+                    );
+                }
+                Ok(())
+            }) {
+                cx.syn_error(err);
+            }
+        }
+
+        // Is skip_deserializing, initialize the field to Default::default() unless a
+        // different default is specified by `#[serde(default = "...")]` on
+        // ourselves or our container (e.g. the struct we are in).
+        if let Default::None = *container_default {
+            if skip_deserializing.0.value.is_some() {
+                default.set_if_none(Default::Default);
+            }
+        }
+
+        let mut borrowed_lifetimes = borrowed_lifetimes.get().unwrap_or_default();
+        if !borrowed_lifetimes.is_empty() {
+            // Cow<str> and Cow<[u8]> never borrow by default:
+            //
+            //     impl<'de, 'a, T: ?Sized> Deserialize<'de> for Cow<'a, T>
+            //
+            // A #[serde(borrow)] attribute enables borrowing that corresponds
+            // roughly to these impls:
+            //
+            //     impl<'de: 'a, 'a> Deserialize<'de> for Cow<'a, str>
+            //     impl<'de: 'a, 'a> Deserialize<'de> for Cow<'a, [u8]>
+            if is_cow(&field.ty, is_str) {
+                let mut path = syn::Path {
+                    leading_colon: None,
+                    segments: Punctuated::new(),
+                };
+                let span = Span::call_site();
+                path.segments.push(Ident::new("_serde", span).into());
+                path.segments.push(Ident::new("__private", span).into());
+                path.segments.push(Ident::new("de", span).into());
+                path.segments
+                    .push(Ident::new("borrow_cow_str", span).into());
+                let expr = syn::ExprPath {
+                    attrs: Vec::new(),
+                    qself: None,
+                    path,
+                };
+                deserialize_with.set_if_none(expr);
+            } else if is_cow(&field.ty, is_slice_u8) {
+                let mut path = syn::Path {
+                    leading_colon: None,
+                    segments: Punctuated::new(),
+                };
+                let span = Span::call_site();
+                path.segments.push(Ident::new("_serde", span).into());
+                path.segments.push(Ident::new("__private", span).into());
+                path.segments.push(Ident::new("de", span).into());
+                path.segments
+                    .push(Ident::new("borrow_cow_bytes", span).into());
+                let expr = syn::ExprPath {
+                    attrs: Vec::new(),
+                    qself: None,
+                    path,
+                };
+                deserialize_with.set_if_none(expr);
+            }
+        } else if is_implicitly_borrowed(&field.ty) {
+            // Types &str and &[u8] are always implicitly borrowed. No need for
+            // a #[serde(borrow)].
+            collect_lifetimes(&field.ty, &mut borrowed_lifetimes);
+        }
+
+        Field {
+            name: Name::from_attrs(ident, ser_name, de_name, Some(de_aliases)),
+            skip_serializing: skip_serializing.get(),
+            skip_deserializing: skip_deserializing.get(),
+            skip_serializing_if: skip_serializing_if.get(),
+            default: default.get().unwrap_or(Default::None),
+            serialize_with: serialize_with.get(),
+            deserialize_with: deserialize_with.get(),
+            ser_bound: ser_bound.get(),
+            de_bound: de_bound.get(),
+            borrowed_lifetimes,
+            getter: getter.get(),
+            flatten: flatten.get(),
+            transparent: false,
+        }
+    }
+
+    pub fn name(&self) -> &Name {
+        &self.name
+    }
+
+    pub fn aliases(&self) -> Vec<String> {
+        self.name.deserialize_aliases()
+    }
+
+    pub fn rename_by_rules(&mut self, rules: &RenameAllRules) {
+        if !self.name.serialize_renamed {
+            self.name.serialize = rules.serialize.apply_to_field(&self.name.serialize);
+        }
+        if !self.name.deserialize_renamed {
+            self.name.deserialize = rules.deserialize.apply_to_field(&self.name.deserialize);
+        }
+    }
+
+    pub fn skip_serializing(&self) -> bool {
+        self.skip_serializing
+    }
+
+    pub fn skip_deserializing(&self) -> bool {
+        self.skip_deserializing
+    }
+
+    pub fn skip_serializing_if(&self) -> Option<&syn::ExprPath> {
+        self.skip_serializing_if.as_ref()
+    }
+
+    pub fn default(&self) -> &Default {
+        &self.default
+    }
+
+    pub fn serialize_with(&self) -> Option<&syn::ExprPath> {
+        self.serialize_with.as_ref()
+    }
+
+    pub fn deserialize_with(&self) -> Option<&syn::ExprPath> {
+        self.deserialize_with.as_ref()
+    }
+
+    pub fn ser_bound(&self) -> Option<&[syn::WherePredicate]> {
+        self.ser_bound.as_ref().map(|vec| &vec[..])
+    }
+
+    pub fn de_bound(&self) -> Option<&[syn::WherePredicate]> {
+        self.de_bound.as_ref().map(|vec| &vec[..])
+    }
+
+    pub fn borrowed_lifetimes(&self) -> &BTreeSet<syn::Lifetime> {
+        &self.borrowed_lifetimes
+    }
+
+    pub fn getter(&self) -> Option<&syn::ExprPath> {
+        self.getter.as_ref()
+    }
+
+    pub fn flatten(&self) -> bool {
+        self.flatten
+    }
+
+    pub fn transparent(&self) -> bool {
+        self.transparent
+    }
+
+    pub fn mark_transparent(&mut self) {
+        self.transparent = true;
+    }
+}
+
+type SerAndDe<T> = (Option<T>, Option<T>);
+
+fn get_ser_and_de<'c, T, F, R>(
+    cx: &'c Ctxt,
+    attr_name: Symbol,
+    meta: &ParseNestedMeta,
+    f: F,
+) -> syn::Result<(VecAttr<'c, T>, VecAttr<'c, T>)>
+where
+    T: Clone,
+    F: Fn(&Ctxt, Symbol, Symbol, &ParseNestedMeta) -> syn::Result<R>,
+    R: Into<Option<T>>,
+{
+    let mut ser_meta = VecAttr::none(cx, attr_name);
+    let mut de_meta = VecAttr::none(cx, attr_name);
+
+    let lookahead = meta.input.lookahead1();
+    if lookahead.peek(Token![=]) {
+        if let Some(both) = f(cx, attr_name, attr_name, meta)?.into() {
+            ser_meta.insert(&meta.path, both.clone());
+            de_meta.insert(&meta.path, both);
+        }
+    } else if lookahead.peek(token::Paren) {
+        meta.parse_nested_meta(|meta| {
+            if meta.path == SERIALIZE {
+                if let Some(v) = f(cx, attr_name, SERIALIZE, &meta)?.into() {
+                    ser_meta.insert(&meta.path, v);
+                }
+            } else if meta.path == DESERIALIZE {
+                if let Some(v) = f(cx, attr_name, DESERIALIZE, &meta)?.into() {
+                    de_meta.insert(&meta.path, v);
+                }
+            } else {
+                return Err(meta.error(format_args!(
+                    "malformed {0} attribute, expected `{0}(serialize = ..., deserialize = ...)`",
+                    attr_name,
+                )));
+            }
+            Ok(())
+        })?;
+    } else {
+        return Err(lookahead.error());
+    }
+
+    Ok((ser_meta, de_meta))
+}
+
+fn get_renames(
+    cx: &Ctxt,
+    attr_name: Symbol,
+    meta: &ParseNestedMeta,
+) -> syn::Result<SerAndDe<syn::LitStr>> {
+    let (ser, de) = get_ser_and_de(cx, attr_name, meta, get_lit_str2)?;
+    Ok((ser.at_most_one(), de.at_most_one()))
+}
+
+fn get_multiple_renames(
+    cx: &Ctxt,
+    meta: &ParseNestedMeta,
+) -> syn::Result<(Option<syn::LitStr>, Vec<syn::LitStr>)> {
+    let (ser, de) = get_ser_and_de(cx, RENAME, meta, get_lit_str2)?;
+    Ok((ser.at_most_one(), de.get()))
+}
+
+fn get_where_predicates(
+    cx: &Ctxt,
+    meta: &ParseNestedMeta,
+) -> syn::Result<SerAndDe<Vec<syn::WherePredicate>>> {
+    let (ser, de) = get_ser_and_de(cx, BOUND, meta, parse_lit_into_where)?;
+    Ok((ser.at_most_one(), de.at_most_one()))
+}
+
+fn get_lit_str(
+    cx: &Ctxt,
+    attr_name: Symbol,
+    meta: &ParseNestedMeta,
+) -> syn::Result<Option<syn::LitStr>> {
+    get_lit_str2(cx, attr_name, attr_name, meta)
+}
+
+fn get_lit_str2(
+    cx: &Ctxt,
+    attr_name: Symbol,
+    meta_item_name: Symbol,
+    meta: &ParseNestedMeta,
+) -> syn::Result<Option<syn::LitStr>> {
+    let expr: syn::Expr = meta.value()?.parse()?;
+    let mut value = &expr;
+    while let syn::Expr::Group(e) = value {
+        value = &e.expr;
+    }
+    if let syn::Expr::Lit(syn::ExprLit {
+        lit: syn::Lit::Str(lit),
+        ..
+    }) = value
+    {
+        Ok(Some(lit.clone()))
+    } else {
+        cx.error_spanned_by(
+            expr,
+            format!(
+                "expected serde {} attribute to be a string: `{} = \"...\"`",
+                attr_name, meta_item_name
+            ),
+        );
+        Ok(None)
+    }
+}
+
+fn parse_lit_into_path(
+    cx: &Ctxt,
+    attr_name: Symbol,
+    meta: &ParseNestedMeta,
+) -> syn::Result<Option<syn::Path>> {
+    let string = match get_lit_str(cx, attr_name, meta)? {
+        Some(string) => string,
+        None => return Ok(None),
+    };
+
+    Ok(match string.parse() {
+        Ok(path) => Some(path),
+        Err(_) => {
+            cx.error_spanned_by(
+                &string,
+                format!("failed to parse path: {:?}", string.value()),
+            );
+            None
+        }
+    })
+}
+
+fn parse_lit_into_expr_path(
+    cx: &Ctxt,
+    attr_name: Symbol,
+    meta: &ParseNestedMeta,
+) -> syn::Result<Option<syn::ExprPath>> {
+    let string = match get_lit_str(cx, attr_name, meta)? {
+        Some(string) => string,
+        None => return Ok(None),
+    };
+
+    Ok(match string.parse() {
+        Ok(expr) => Some(expr),
+        Err(_) => {
+            cx.error_spanned_by(
+                &string,
+                format!("failed to parse path: {:?}", string.value()),
+            );
+            None
+        }
+    })
+}
+
+fn parse_lit_into_where(
+    cx: &Ctxt,
+    attr_name: Symbol,
+    meta_item_name: Symbol,
+    meta: &ParseNestedMeta,
+) -> syn::Result<Vec<syn::WherePredicate>> {
+    let string = match get_lit_str2(cx, attr_name, meta_item_name, meta)? {
+        Some(string) => string,
+        None => return Ok(Vec::new()),
+    };
+
+    Ok(
+        match string.parse_with(Punctuated::<syn::WherePredicate, Token![,]>::parse_terminated) {
+            Ok(predicates) => Vec::from_iter(predicates),
+            Err(err) => {
+                cx.error_spanned_by(string, err);
+                Vec::new()
+            }
+        },
+    )
+}
+
+fn parse_lit_into_ty(
+    cx: &Ctxt,
+    attr_name: Symbol,
+    meta: &ParseNestedMeta,
+) -> syn::Result<Option<syn::Type>> {
+    let string = match get_lit_str(cx, attr_name, meta)? {
+        Some(string) => string,
+        None => return Ok(None),
+    };
+
+    Ok(match string.parse() {
+        Ok(ty) => Some(ty),
+        Err(_) => {
+            cx.error_spanned_by(
+                &string,
+                format!("failed to parse type: {} = {:?}", attr_name, string.value()),
+            );
+            None
+        }
+    })
+}
+
+// Parses a string literal like "'a + 'b + 'c" containing a nonempty list of
+// lifetimes separated by `+`.
+fn parse_lit_into_lifetimes(
+    cx: &Ctxt,
+    meta: &ParseNestedMeta,
+) -> syn::Result<BTreeSet<syn::Lifetime>> {
+    let string = match get_lit_str(cx, BORROW, meta)? {
+        Some(string) => string,
+        None => return Ok(BTreeSet::new()),
+    };
+
+    if let Ok(lifetimes) = string.parse_with(|input: ParseStream| {
+        let mut set = BTreeSet::new();
+        while !input.is_empty() {
+            let lifetime: Lifetime = input.parse()?;
+            if !set.insert(lifetime.clone()) {
+                cx.error_spanned_by(
+                    &string,
+                    format!("duplicate borrowed lifetime `{}`", lifetime),
+                );
+            }
+            if input.is_empty() {
+                break;
+            }
+            input.parse::<Token![+]>()?;
+        }
+        Ok(set)
+    }) {
+        if lifetimes.is_empty() {
+            cx.error_spanned_by(string, "at least one lifetime must be borrowed");
+        }
+        return Ok(lifetimes);
+    }
+
+    cx.error_spanned_by(
+        &string,
+        format!("failed to parse borrowed lifetimes: {:?}", string.value()),
+    );
+    Ok(BTreeSet::new())
+}
+
+fn is_implicitly_borrowed(ty: &syn::Type) -> bool {
+    is_implicitly_borrowed_reference(ty) || is_option(ty, is_implicitly_borrowed_reference)
+}
+
+fn is_implicitly_borrowed_reference(ty: &syn::Type) -> bool {
+    is_reference(ty, is_str) || is_reference(ty, is_slice_u8)
+}
+
+// Whether the type looks like it might be `std::borrow::Cow<T>` where elem="T".
+// This can have false negatives and false positives.
+//
+// False negative:
+//
+//     use std::borrow::Cow as Pig;
+//
+//     #[derive(Deserialize)]
+//     struct S<'a> {
+//         #[serde(borrow)]
+//         pig: Pig<'a, str>,
+//     }
+//
+// False positive:
+//
+//     type str = [i16];
+//
+//     #[derive(Deserialize)]
+//     struct S<'a> {
+//         #[serde(borrow)]
+//         cow: Cow<'a, str>,
+//     }
+fn is_cow(ty: &syn::Type, elem: fn(&syn::Type) -> bool) -> bool {
+    let path = match ungroup(ty) {
+        syn::Type::Path(ty) => &ty.path,
+        _ => {
+            return false;
+        }
+    };
+    let seg = match path.segments.last() {
+        Some(seg) => seg,
+        None => {
+            return false;
+        }
+    };
+    let args = match &seg.arguments {
+        syn::PathArguments::AngleBracketed(bracketed) => &bracketed.args,
+        _ => {
+            return false;
+        }
+    };
+    seg.ident == "Cow"
+        && args.len() == 2
+        && match (&args[0], &args[1]) {
+            (syn::GenericArgument::Lifetime(_), syn::GenericArgument::Type(arg)) => elem(arg),
+            _ => false,
+        }
+}
+
+fn is_option(ty: &syn::Type, elem: fn(&syn::Type) -> bool) -> bool {
+    let path = match ungroup(ty) {
+        syn::Type::Path(ty) => &ty.path,
+        _ => {
+            return false;
+        }
+    };
+    let seg = match path.segments.last() {
+        Some(seg) => seg,
+        None => {
+            return false;
+        }
+    };
+    let args = match &seg.arguments {
+        syn::PathArguments::AngleBracketed(bracketed) => &bracketed.args,
+        _ => {
+            return false;
+        }
+    };
+    seg.ident == "Option"
+        && args.len() == 1
+        && match &args[0] {
+            syn::GenericArgument::Type(arg) => elem(arg),
+            _ => false,
+        }
+}
+
+// Whether the type looks like it might be `&T` where elem="T". This can have
+// false negatives and false positives.
+//
+// False negative:
+//
+//     type Yarn = str;
+//
+//     #[derive(Deserialize)]
+//     struct S<'a> {
+//         r: &'a Yarn,
+//     }
+//
+// False positive:
+//
+//     type str = [i16];
+//
+//     #[derive(Deserialize)]
+//     struct S<'a> {
+//         r: &'a str,
+//     }
+fn is_reference(ty: &syn::Type, elem: fn(&syn::Type) -> bool) -> bool {
+    match ungroup(ty) {
+        syn::Type::Reference(ty) => ty.mutability.is_none() && elem(&ty.elem),
+        _ => false,
+    }
+}
+
+fn is_str(ty: &syn::Type) -> bool {
+    is_primitive_type(ty, "str")
+}
+
+fn is_slice_u8(ty: &syn::Type) -> bool {
+    match ungroup(ty) {
+        syn::Type::Slice(ty) => is_primitive_type(&ty.elem, "u8"),
+        _ => false,
+    }
+}
+
+fn is_primitive_type(ty: &syn::Type, primitive: &str) -> bool {
+    match ungroup(ty) {
+        syn::Type::Path(ty) => ty.qself.is_none() && is_primitive_path(&ty.path, primitive),
+        _ => false,
+    }
+}
+
+fn is_primitive_path(path: &syn::Path, primitive: &str) -> bool {
+    path.leading_colon.is_none()
+        && path.segments.len() == 1
+        && path.segments[0].ident == primitive
+        && path.segments[0].arguments.is_empty()
+}
+
+// All lifetimes that this type could borrow from a Deserializer.
+//
+// For example a type `S<'a, 'b>` could borrow `'a` and `'b`. On the other hand
+// a type `for<'a> fn(&'a str)` could not borrow `'a` from the Deserializer.
+//
+// This is used when there is an explicit or implicit `#[serde(borrow)]`
+// attribute on the field so there must be at least one borrowable lifetime.
+fn borrowable_lifetimes(
+    cx: &Ctxt,
+    name: &str,
+    field: &syn::Field,
+) -> Result<BTreeSet<syn::Lifetime>, ()> {
+    let mut lifetimes = BTreeSet::new();
+    collect_lifetimes(&field.ty, &mut lifetimes);
+    if lifetimes.is_empty() {
+        let msg = format!("field `{}` has no lifetimes to borrow", name);
+        cx.error_spanned_by(field, msg);
+        Err(())
+    } else {
+        Ok(lifetimes)
+    }
+}
+
+fn collect_lifetimes(ty: &syn::Type, out: &mut BTreeSet<syn::Lifetime>) {
+    match ty {
+        syn::Type::Slice(ty) => {
+            collect_lifetimes(&ty.elem, out);
+        }
+        syn::Type::Array(ty) => {
+            collect_lifetimes(&ty.elem, out);
+        }
+        syn::Type::Ptr(ty) => {
+            collect_lifetimes(&ty.elem, out);
+        }
+        syn::Type::Reference(ty) => {
+            out.extend(ty.lifetime.iter().cloned());
+            collect_lifetimes(&ty.elem, out);
+        }
+        syn::Type::Tuple(ty) => {
+            for elem in &ty.elems {
+                collect_lifetimes(elem, out);
+            }
+        }
+        syn::Type::Path(ty) => {
+            if let Some(qself) = &ty.qself {
+                collect_lifetimes(&qself.ty, out);
+            }
+            for seg in &ty.path.segments {
+                if let syn::PathArguments::AngleBracketed(bracketed) = &seg.arguments {
+                    for arg in &bracketed.args {
+                        match arg {
+                            syn::GenericArgument::Lifetime(lifetime) => {
+                                out.insert(lifetime.clone());
+                            }
+                            syn::GenericArgument::Type(ty) => {
+                                collect_lifetimes(ty, out);
+                            }
+                            syn::GenericArgument::AssocType(binding) => {
+                                collect_lifetimes(&binding.ty, out);
+                            }
+                            _ => {}
+                        }
+                    }
+                }
+            }
+        }
+        syn::Type::Paren(ty) => {
+            collect_lifetimes(&ty.elem, out);
+        }
+        syn::Type::Group(ty) => {
+            collect_lifetimes(&ty.elem, out);
+        }
+        syn::Type::Macro(ty) => {
+            collect_lifetimes_from_tokens(ty.mac.tokens.clone(), out);
+        }
+        syn::Type::BareFn(_)
+        | syn::Type::Never(_)
+        | syn::Type::TraitObject(_)
+        | syn::Type::ImplTrait(_)
+        | syn::Type::Infer(_)
+        | syn::Type::Verbatim(_) => {}
+
+        #[cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))]
+        _ => {}
+    }
+}
+
+fn collect_lifetimes_from_tokens(tokens: TokenStream, out: &mut BTreeSet<syn::Lifetime>) {
+    let mut iter = tokens.into_iter();
+    while let Some(tt) = iter.next() {
+        match &tt {
+            TokenTree::Punct(op) if op.as_char() == '\'' && op.spacing() == Spacing::Joint => {
+                if let Some(TokenTree::Ident(ident)) = iter.next() {
+                    out.insert(syn::Lifetime {
+                        apostrophe: op.span(),
+                        ident,
+                    });
+                }
+            }
+            TokenTree::Group(group) => {
+                let tokens = group.stream();
+                collect_lifetimes_from_tokens(tokens, out);
+            }
+            _ => {}
+        }
+    }
+}
diff --git a/crates/serde_derive/src/internals/case.rs b/crates/serde_derive/src/internals/case.rs
new file mode 100644
index 0000000..5545051
--- /dev/null
+++ b/crates/serde_derive/src/internals/case.rs
@@ -0,0 +1,197 @@
+//! Code to convert the Rust-styled field/variant (e.g. `my_field`, `MyType`) to the
+//! case of the source (e.g. `my-field`, `MY_FIELD`).
+
+// See https://users.rust-lang.org/t/psa-dealing-with-warning-unused-import-std-ascii-asciiext-in-today-s-nightly/13726
+#[allow(deprecated, unused_imports)]
+use std::ascii::AsciiExt;
+
+use std::fmt::{self, Debug, Display};
+
+use self::RenameRule::*;
+
+/// The different possible ways to change case of fields in a struct, or variants in an enum.
+#[derive(Copy, Clone, PartialEq)]
+pub enum RenameRule {
+    /// Don't apply a default rename rule.
+    None,
+    /// Rename direct children to "lowercase" style.
+    LowerCase,
+    /// Rename direct children to "UPPERCASE" style.
+    UpperCase,
+    /// Rename direct children to "PascalCase" style, as typically used for
+    /// enum variants.
+    PascalCase,
+    /// Rename direct children to "camelCase" style.
+    CamelCase,
+    /// Rename direct children to "snake_case" style, as commonly used for
+    /// fields.
+    SnakeCase,
+    /// Rename direct children to "SCREAMING_SNAKE_CASE" style, as commonly
+    /// used for constants.
+    ScreamingSnakeCase,
+    /// Rename direct children to "kebab-case" style.
+    KebabCase,
+    /// Rename direct children to "SCREAMING-KEBAB-CASE" style.
+    ScreamingKebabCase,
+}
+
+static RENAME_RULES: &[(&str, RenameRule)] = &[
+    ("lowercase", LowerCase),
+    ("UPPERCASE", UpperCase),
+    ("PascalCase", PascalCase),
+    ("camelCase", CamelCase),
+    ("snake_case", SnakeCase),
+    ("SCREAMING_SNAKE_CASE", ScreamingSnakeCase),
+    ("kebab-case", KebabCase),
+    ("SCREAMING-KEBAB-CASE", ScreamingKebabCase),
+];
+
+impl RenameRule {
+    pub fn from_str(rename_all_str: &str) -> Result<Self, ParseError> {
+        for (name, rule) in RENAME_RULES {
+            if rename_all_str == *name {
+                return Ok(*rule);
+            }
+        }
+        Err(ParseError {
+            unknown: rename_all_str,
+        })
+    }
+
+    /// Apply a renaming rule to an enum variant, returning the version expected in the source.
+    pub fn apply_to_variant(&self, variant: &str) -> String {
+        match *self {
+            None | PascalCase => variant.to_owned(),
+            LowerCase => variant.to_ascii_lowercase(),
+            UpperCase => variant.to_ascii_uppercase(),
+            CamelCase => variant[..1].to_ascii_lowercase() + &variant[1..],
+            SnakeCase => {
+                let mut snake = String::new();
+                for (i, ch) in variant.char_indices() {
+                    if i > 0 && ch.is_uppercase() {
+                        snake.push('_');
+                    }
+                    snake.push(ch.to_ascii_lowercase());
+                }
+                snake
+            }
+            ScreamingSnakeCase => SnakeCase.apply_to_variant(variant).to_ascii_uppercase(),
+            KebabCase => SnakeCase.apply_to_variant(variant).replace('_', "-"),
+            ScreamingKebabCase => ScreamingSnakeCase
+                .apply_to_variant(variant)
+                .replace('_', "-"),
+        }
+    }
+
+    /// Apply a renaming rule to a struct field, returning the version expected in the source.
+    pub fn apply_to_field(&self, field: &str) -> String {
+        match *self {
+            None | LowerCase | SnakeCase => field.to_owned(),
+            UpperCase => field.to_ascii_uppercase(),
+            PascalCase => {
+                let mut pascal = String::new();
+                let mut capitalize = true;
+                for ch in field.chars() {
+                    if ch == '_' {
+                        capitalize = true;
+                    } else if capitalize {
+                        pascal.push(ch.to_ascii_uppercase());
+                        capitalize = false;
+                    } else {
+                        pascal.push(ch);
+                    }
+                }
+                pascal
+            }
+            CamelCase => {
+                let pascal = PascalCase.apply_to_field(field);
+                pascal[..1].to_ascii_lowercase() + &pascal[1..]
+            }
+            ScreamingSnakeCase => field.to_ascii_uppercase(),
+            KebabCase => field.replace('_', "-"),
+            ScreamingKebabCase => ScreamingSnakeCase.apply_to_field(field).replace('_', "-"),
+        }
+    }
+}
+
+pub struct ParseError<'a> {
+    unknown: &'a str,
+}
+
+impl<'a> Display for ParseError<'a> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.write_str("unknown rename rule `rename_all = ")?;
+        Debug::fmt(self.unknown, f)?;
+        f.write_str("`, expected one of ")?;
+        for (i, (name, _rule)) in RENAME_RULES.iter().enumerate() {
+            if i > 0 {
+                f.write_str(", ")?;
+            }
+            Debug::fmt(name, f)?;
+        }
+        Ok(())
+    }
+}
+
+#[test]
+fn rename_variants() {
+    for &(original, lower, upper, camel, snake, screaming, kebab, screaming_kebab) in &[
+        (
+            "Outcome", "outcome", "OUTCOME", "outcome", "outcome", "OUTCOME", "outcome", "OUTCOME",
+        ),
+        (
+            "VeryTasty",
+            "verytasty",
+            "VERYTASTY",
+            "veryTasty",
+            "very_tasty",
+            "VERY_TASTY",
+            "very-tasty",
+            "VERY-TASTY",
+        ),
+        ("A", "a", "A", "a", "a", "A", "a", "A"),
+        ("Z42", "z42", "Z42", "z42", "z42", "Z42", "z42", "Z42"),
+    ] {
+        assert_eq!(None.apply_to_variant(original), original);
+        assert_eq!(LowerCase.apply_to_variant(original), lower);
+        assert_eq!(UpperCase.apply_to_variant(original), upper);
+        assert_eq!(PascalCase.apply_to_variant(original), original);
+        assert_eq!(CamelCase.apply_to_variant(original), camel);
+        assert_eq!(SnakeCase.apply_to_variant(original), snake);
+        assert_eq!(ScreamingSnakeCase.apply_to_variant(original), screaming);
+        assert_eq!(KebabCase.apply_to_variant(original), kebab);
+        assert_eq!(
+            ScreamingKebabCase.apply_to_variant(original),
+            screaming_kebab
+        );
+    }
+}
+
+#[test]
+fn rename_fields() {
+    for &(original, upper, pascal, camel, screaming, kebab, screaming_kebab) in &[
+        (
+            "outcome", "OUTCOME", "Outcome", "outcome", "OUTCOME", "outcome", "OUTCOME",
+        ),
+        (
+            "very_tasty",
+            "VERY_TASTY",
+            "VeryTasty",
+            "veryTasty",
+            "VERY_TASTY",
+            "very-tasty",
+            "VERY-TASTY",
+        ),
+        ("a", "A", "A", "a", "A", "a", "A"),
+        ("z42", "Z42", "Z42", "z42", "Z42", "z42", "Z42"),
+    ] {
+        assert_eq!(None.apply_to_field(original), original);
+        assert_eq!(UpperCase.apply_to_field(original), upper);
+        assert_eq!(PascalCase.apply_to_field(original), pascal);
+        assert_eq!(CamelCase.apply_to_field(original), camel);
+        assert_eq!(SnakeCase.apply_to_field(original), original);
+        assert_eq!(ScreamingSnakeCase.apply_to_field(original), screaming);
+        assert_eq!(KebabCase.apply_to_field(original), kebab);
+        assert_eq!(ScreamingKebabCase.apply_to_field(original), screaming_kebab);
+    }
+}
diff --git a/crates/serde_derive/src/internals/check.rs b/crates/serde_derive/src/internals/check.rs
new file mode 100644
index 0000000..05b4b8f
--- /dev/null
+++ b/crates/serde_derive/src/internals/check.rs
@@ -0,0 +1,442 @@
+use internals::ast::{Container, Data, Field, Style};
+use internals::attr::{Identifier, TagType};
+use internals::{ungroup, Ctxt, Derive};
+use syn::{Member, Type};
+
+// Cross-cutting checks that require looking at more than a single attrs object.
+// Simpler checks should happen when parsing and building the attrs.
+pub fn check(cx: &Ctxt, cont: &mut Container, derive: Derive) {
+    check_remote_generic(cx, cont);
+    check_getter(cx, cont);
+    check_flatten(cx, cont);
+    check_identifier(cx, cont);
+    check_variant_skip_attrs(cx, cont);
+    check_internal_tag_field_name_conflict(cx, cont);
+    check_adjacent_tag_conflict(cx, cont);
+    check_transparent(cx, cont, derive);
+    check_from_and_try_from(cx, cont);
+}
+
+// Remote derive definition type must have either all of the generics of the
+// remote type:
+//
+//     #[serde(remote = "Generic")]
+//     struct Generic<T> {…}
+//
+// or none of them, i.e. defining impls for one concrete instantiation of the
+// remote type only:
+//
+//     #[serde(remote = "Generic<T>")]
+//     struct ConcreteDef {…}
+//
+fn check_remote_generic(cx: &Ctxt, cont: &Container) {
+    if let Some(remote) = cont.attrs.remote() {
+        let local_has_generic = !cont.generics.params.is_empty();
+        let remote_has_generic = !remote.segments.last().unwrap().arguments.is_none();
+        if local_has_generic && remote_has_generic {
+            cx.error_spanned_by(remote, "remove generic parameters from this path");
+        }
+    }
+}
+
+// Getters are only allowed inside structs (not enums) with the `remote`
+// attribute.
+fn check_getter(cx: &Ctxt, cont: &Container) {
+    match cont.data {
+        Data::Enum(_) => {
+            if cont.data.has_getter() {
+                cx.error_spanned_by(
+                    cont.original,
+                    "#[serde(getter = \"...\")] is not allowed in an enum",
+                );
+            }
+        }
+        Data::Struct(_, _) => {
+            if cont.data.has_getter() && cont.attrs.remote().is_none() {
+                cx.error_spanned_by(
+                    cont.original,
+                    "#[serde(getter = \"...\")] can only be used in structs that have #[serde(remote = \"...\")]",
+                );
+            }
+        }
+    }
+}
+
+// Flattening has some restrictions we can test.
+fn check_flatten(cx: &Ctxt, cont: &Container) {
+    match &cont.data {
+        Data::Enum(variants) => {
+            for variant in variants {
+                for field in &variant.fields {
+                    check_flatten_field(cx, variant.style, field);
+                }
+            }
+        }
+        Data::Struct(style, fields) => {
+            for field in fields {
+                check_flatten_field(cx, *style, field);
+            }
+        }
+    }
+}
+
+fn check_flatten_field(cx: &Ctxt, style: Style, field: &Field) {
+    if !field.attrs.flatten() {
+        return;
+    }
+    match style {
+        Style::Tuple => {
+            cx.error_spanned_by(
+                field.original,
+                "#[serde(flatten)] cannot be used on tuple structs",
+            );
+        }
+        Style::Newtype => {
+            cx.error_spanned_by(
+                field.original,
+                "#[serde(flatten)] cannot be used on newtype structs",
+            );
+        }
+        _ => {}
+    }
+}
+
+// The `other` attribute must be used at most once and it must be the last
+// variant of an enum.
+//
+// Inside a `variant_identifier` all variants must be unit variants. Inside a
+// `field_identifier` all but possibly one variant must be unit variants. The
+// last variant may be a newtype variant which is an implicit "other" case.
+fn check_identifier(cx: &Ctxt, cont: &Container) {
+    let variants = match &cont.data {
+        Data::Enum(variants) => variants,
+        Data::Struct(_, _) => {
+            return;
+        }
+    };
+
+    for (i, variant) in variants.iter().enumerate() {
+        match (
+            variant.style,
+            cont.attrs.identifier(),
+            variant.attrs.other(),
+            cont.attrs.tag(),
+        ) {
+            // The `other` attribute may not be used in a variant_identifier.
+            (_, Identifier::Variant, true, _) => {
+                cx.error_spanned_by(
+                    variant.original,
+                    "#[serde(other)] may not be used on a variant identifier",
+                );
+            }
+
+            // Variant with `other` attribute cannot appear in untagged enum
+            (_, Identifier::No, true, &TagType::None) => {
+                cx.error_spanned_by(
+                    variant.original,
+                    "#[serde(other)] cannot appear on untagged enum",
+                );
+            }
+
+            // Variant with `other` attribute must be the last one.
+            (Style::Unit, Identifier::Field, true, _) | (Style::Unit, Identifier::No, true, _) => {
+                if i < variants.len() - 1 {
+                    cx.error_spanned_by(
+                        variant.original,
+                        "#[serde(other)] must be on the last variant",
+                    );
+                }
+            }
+
+            // Variant with `other` attribute must be a unit variant.
+            (_, Identifier::Field, true, _) | (_, Identifier::No, true, _) => {
+                cx.error_spanned_by(
+                    variant.original,
+                    "#[serde(other)] must be on a unit variant",
+                );
+            }
+
+            // Any sort of variant is allowed if this is not an identifier.
+            (_, Identifier::No, false, _) => {}
+
+            // Unit variant without `other` attribute is always fine.
+            (Style::Unit, _, false, _) => {}
+
+            // The last field is allowed to be a newtype catch-all.
+            (Style::Newtype, Identifier::Field, false, _) => {
+                if i < variants.len() - 1 {
+                    cx.error_spanned_by(
+                        variant.original,
+                        format!("`{}` must be the last variant", variant.ident),
+                    );
+                }
+            }
+
+            (_, Identifier::Field, false, _) => {
+                cx.error_spanned_by(
+                    variant.original,
+                    "#[serde(field_identifier)] may only contain unit variants",
+                );
+            }
+
+            (_, Identifier::Variant, false, _) => {
+                cx.error_spanned_by(
+                    variant.original,
+                    "#[serde(variant_identifier)] may only contain unit variants",
+                );
+            }
+        }
+    }
+}
+
+// Skip-(de)serializing attributes are not allowed on variants marked
+// (de)serialize_with.
+fn check_variant_skip_attrs(cx: &Ctxt, cont: &Container) {
+    let variants = match &cont.data {
+        Data::Enum(variants) => variants,
+        Data::Struct(_, _) => {
+            return;
+        }
+    };
+
+    for variant in variants.iter() {
+        if variant.attrs.serialize_with().is_some() {
+            if variant.attrs.skip_serializing() {
+                cx.error_spanned_by(
+                    variant.original,
+                    format!(
+                        "variant `{}` cannot have both #[serde(serialize_with)] and #[serde(skip_serializing)]",
+                        variant.ident
+                    ),
+                );
+            }
+
+            for field in &variant.fields {
+                let member = member_message(&field.member);
+
+                if field.attrs.skip_serializing() {
+                    cx.error_spanned_by(
+                        variant.original,
+                        format!(
+                            "variant `{}` cannot have both #[serde(serialize_with)] and a field {} marked with #[serde(skip_serializing)]",
+                            variant.ident, member
+                        ),
+                    );
+                }
+
+                if field.attrs.skip_serializing_if().is_some() {
+                    cx.error_spanned_by(
+                        variant.original,
+                        format!(
+                            "variant `{}` cannot have both #[serde(serialize_with)] and a field {} marked with #[serde(skip_serializing_if)]",
+                            variant.ident, member
+                        ),
+                    );
+                }
+            }
+        }
+
+        if variant.attrs.deserialize_with().is_some() {
+            if variant.attrs.skip_deserializing() {
+                cx.error_spanned_by(
+                    variant.original,
+                    format!(
+                        "variant `{}` cannot have both #[serde(deserialize_with)] and #[serde(skip_deserializing)]",
+                        variant.ident
+                    ),
+                );
+            }
+
+            for field in &variant.fields {
+                if field.attrs.skip_deserializing() {
+                    let member = member_message(&field.member);
+
+                    cx.error_spanned_by(
+                        variant.original,
+                        format!(
+                            "variant `{}` cannot have both #[serde(deserialize_with)] and a field {} marked with #[serde(skip_deserializing)]",
+                            variant.ident, member
+                        ),
+                    );
+                }
+            }
+        }
+    }
+}
+
+// The tag of an internally-tagged struct variant must not be the same as either
+// one of its fields, as this would result in duplicate keys in the serialized
+// output and/or ambiguity in the to-be-deserialized input.
+fn check_internal_tag_field_name_conflict(cx: &Ctxt, cont: &Container) {
+    let variants = match &cont.data {
+        Data::Enum(variants) => variants,
+        Data::Struct(_, _) => return,
+    };
+
+    let tag = match cont.attrs.tag() {
+        TagType::Internal { tag } => tag.as_str(),
+        TagType::External | TagType::Adjacent { .. } | TagType::None => return,
+    };
+
+    let diagnose_conflict = || {
+        cx.error_spanned_by(
+            cont.original,
+            format!("variant field name `{}` conflicts with internal tag", tag),
+        );
+    };
+
+    for variant in variants {
+        match variant.style {
+            Style::Struct => {
+                for field in &variant.fields {
+                    let check_ser = !field.attrs.skip_serializing();
+                    let check_de = !field.attrs.skip_deserializing();
+                    let name = field.attrs.name();
+                    let ser_name = name.serialize_name();
+
+                    if check_ser && ser_name == tag {
+                        diagnose_conflict();
+                        return;
+                    }
+
+                    for de_name in field.attrs.aliases() {
+                        if check_de && de_name == tag {
+                            diagnose_conflict();
+                            return;
+                        }
+                    }
+                }
+            }
+            Style::Unit | Style::Newtype | Style::Tuple => {}
+        }
+    }
+}
+
+// In the case of adjacently-tagged enums, the type and the contents tag must
+// differ, for the same reason.
+fn check_adjacent_tag_conflict(cx: &Ctxt, cont: &Container) {
+    let (type_tag, content_tag) = match cont.attrs.tag() {
+        TagType::Adjacent { tag, content } => (tag, content),
+        TagType::Internal { .. } | TagType::External | TagType::None => return,
+    };
+
+    if type_tag == content_tag {
+        cx.error_spanned_by(
+            cont.original,
+            format!(
+                "enum tags `{}` for type and content conflict with each other",
+                type_tag
+            ),
+        );
+    }
+}
+
+// Enums and unit structs cannot be transparent.
+fn check_transparent(cx: &Ctxt, cont: &mut Container, derive: Derive) {
+    if !cont.attrs.transparent() {
+        return;
+    }
+
+    if cont.attrs.type_from().is_some() {
+        cx.error_spanned_by(
+            cont.original,
+            "#[serde(transparent)] is not allowed with #[serde(from = \"...\")]",
+        );
+    }
+
+    if cont.attrs.type_try_from().is_some() {
+        cx.error_spanned_by(
+            cont.original,
+            "#[serde(transparent)] is not allowed with #[serde(try_from = \"...\")]",
+        );
+    }
+
+    if cont.attrs.type_into().is_some() {
+        cx.error_spanned_by(
+            cont.original,
+            "#[serde(transparent)] is not allowed with #[serde(into = \"...\")]",
+        );
+    }
+
+    let fields = match &mut cont.data {
+        Data::Enum(_) => {
+            cx.error_spanned_by(
+                cont.original,
+                "#[serde(transparent)] is not allowed on an enum",
+            );
+            return;
+        }
+        Data::Struct(Style::Unit, _) => {
+            cx.error_spanned_by(
+                cont.original,
+                "#[serde(transparent)] is not allowed on a unit struct",
+            );
+            return;
+        }
+        Data::Struct(_, fields) => fields,
+    };
+
+    let mut transparent_field = None;
+
+    for field in fields {
+        if allow_transparent(field, derive) {
+            if transparent_field.is_some() {
+                cx.error_spanned_by(
+                    cont.original,
+                    "#[serde(transparent)] requires struct to have at most one transparent field",
+                );
+                return;
+            }
+            transparent_field = Some(field);
+        }
+    }
+
+    match transparent_field {
+        Some(transparent_field) => transparent_field.attrs.mark_transparent(),
+        None => match derive {
+            Derive::Serialize => {
+                cx.error_spanned_by(
+                    cont.original,
+                    "#[serde(transparent)] requires at least one field that is not skipped",
+                );
+            }
+            Derive::Deserialize => {
+                cx.error_spanned_by(
+                    cont.original,
+                    "#[serde(transparent)] requires at least one field that is neither skipped nor has a default",
+                );
+            }
+        },
+    }
+}
+
+fn member_message(member: &Member) -> String {
+    match member {
+        Member::Named(ident) => format!("`{}`", ident),
+        Member::Unnamed(i) => format!("#{}", i.index),
+    }
+}
+
+fn allow_transparent(field: &Field, derive: Derive) -> bool {
+    if let Type::Path(ty) = ungroup(field.ty) {
+        if let Some(seg) = ty.path.segments.last() {
+            if seg.ident == "PhantomData" {
+                return false;
+            }
+        }
+    }
+
+    match derive {
+        Derive::Serialize => !field.attrs.skip_serializing(),
+        Derive::Deserialize => !field.attrs.skip_deserializing() && field.attrs.default().is_none(),
+    }
+}
+
+fn check_from_and_try_from(cx: &Ctxt, cont: &mut Container) {
+    if cont.attrs.type_from().is_some() && cont.attrs.type_try_from().is_some() {
+        cx.error_spanned_by(
+            cont.original,
+            "#[serde(from = \"...\")] and #[serde(try_from = \"...\")] conflict with each other",
+        );
+    }
+}
diff --git a/crates/serde_derive/src/internals/ctxt.rs b/crates/serde_derive/src/internals/ctxt.rs
new file mode 100644
index 0000000..d692c2a
--- /dev/null
+++ b/crates/serde_derive/src/internals/ctxt.rs
@@ -0,0 +1,62 @@
+use quote::ToTokens;
+use std::cell::RefCell;
+use std::fmt::Display;
+use std::thread;
+use syn;
+
+/// A type to collect errors together and format them.
+///
+/// Dropping this object will cause a panic. It must be consumed using `check`.
+///
+/// References can be shared since this type uses run-time exclusive mut checking.
+#[derive(Default)]
+pub struct Ctxt {
+    // The contents will be set to `None` during checking. This is so that checking can be
+    // enforced.
+    errors: RefCell<Option<Vec<syn::Error>>>,
+}
+
+impl Ctxt {
+    /// Create a new context object.
+    ///
+    /// This object contains no errors, but will still trigger a panic if it is not `check`ed.
+    pub fn new() -> Self {
+        Ctxt {
+            errors: RefCell::new(Some(Vec::new())),
+        }
+    }
+
+    /// Add an error to the context object with a tokenenizable object.
+    ///
+    /// The object is used for spanning in error messages.
+    pub fn error_spanned_by<A: ToTokens, T: Display>(&self, obj: A, msg: T) {
+        self.errors
+            .borrow_mut()
+            .as_mut()
+            .unwrap()
+            // Curb monomorphization from generating too many identical methods.
+            .push(syn::Error::new_spanned(obj.into_token_stream(), msg));
+    }
+
+    /// Add one of Syn's parse errors.
+    pub fn syn_error(&self, err: syn::Error) {
+        self.errors.borrow_mut().as_mut().unwrap().push(err);
+    }
+
+    /// Consume this object, producing a formatted error string if there are errors.
+    pub fn check(self) -> Result<(), Vec<syn::Error>> {
+        let errors = self.errors.borrow_mut().take().unwrap();
+        match errors.len() {
+            0 => Ok(()),
+            _ => Err(errors),
+        }
+    }
+}
+
+impl Drop for Ctxt {
+    fn drop(&mut self) {
+        if !thread::panicking() && self.errors.borrow().is_some() {
+            panic!("forgot to check for errors");
+        }
+    }
+}
diff --git a/crates/serde_derive/src/internals/mod.rs b/crates/serde_derive/src/internals/mod.rs
new file mode 100644
index 0000000..5e9f416
--- /dev/null
+++ b/crates/serde_derive/src/internals/mod.rs
@@ -0,0 +1,28 @@
+pub mod ast;
+pub mod attr;
+
+mod ctxt;
+pub use self::ctxt::Ctxt;
+
+mod receiver;
+pub use self::receiver::replace_receiver;
+
+mod case;
+mod check;
+mod respan;
+mod symbol;
+
+use syn::Type;
+
+#[derive(Copy, Clone)]
+pub enum Derive {
+    Serialize,
+    Deserialize,
+}
+
+pub fn ungroup(mut ty: &Type) -> &Type {
+    while let Type::Group(group) = ty {
+        ty = &group.elem;
+    }
+    ty
+}
diff --git a/crates/serde_derive/src/internals/receiver.rs b/crates/serde_derive/src/internals/receiver.rs
new file mode 100644
index 0000000..5dc01db
--- /dev/null
+++ b/crates/serde_derive/src/internals/receiver.rs
@@ -0,0 +1,292 @@
+use internals::respan::respan;
+use proc_macro2::Span;
+use quote::ToTokens;
+use std::mem;
+use syn::punctuated::Punctuated;
+use syn::{
+    parse_quote, Data, DeriveInput, Expr, ExprPath, GenericArgument, GenericParam, Generics, Macro,
+    Path, PathArguments, QSelf, ReturnType, Type, TypeParamBound, TypePath, WherePredicate,
+};
+
+pub fn replace_receiver(input: &mut DeriveInput) {
+    let self_ty = {
+        let ident = &input.ident;
+        let ty_generics = input.generics.split_for_impl().1;
+        parse_quote!(#ident #ty_generics)
+    };
+    let mut visitor = ReplaceReceiver(&self_ty);
+    visitor.visit_generics_mut(&mut input.generics);
+    visitor.visit_data_mut(&mut input.data);
+}
+
+struct ReplaceReceiver<'a>(&'a TypePath);
+
+impl ReplaceReceiver<'_> {
+    fn self_ty(&self, span: Span) -> TypePath {
+        let tokens = self.0.to_token_stream();
+        let respanned = respan(tokens, span);
+        syn::parse2(respanned).unwrap()
+    }
+
+    fn self_to_qself(&self, qself: &mut Option<QSelf>, path: &mut Path) {
+        if path.leading_colon.is_some() || path.segments[0].ident != "Self" {
+            return;
+        }
+
+        if path.segments.len() == 1 {
+            self.self_to_expr_path(path);
+            return;
+        }
+
+        let span = path.segments[0].ident.span();
+        *qself = Some(QSelf {
+            lt_token: Token![<](span),
+            ty: Box::new(Type::Path(self.self_ty(span))),
+            position: 0,
+            as_token: None,
+            gt_token: Token![>](span),
+        });
+
+        path.leading_colon = Some(**path.segments.pairs().next().unwrap().punct().unwrap());
+
+        let segments = mem::replace(&mut path.segments, Punctuated::new());
+        path.segments = segments.into_pairs().skip(1).collect();
+    }
+
+    fn self_to_expr_path(&self, path: &mut Path) {
+        let self_ty = self.self_ty(path.segments[0].ident.span());
+        let variant = mem::replace(path, self_ty.path);
+        for segment in &mut path.segments {
+            if let PathArguments::AngleBracketed(bracketed) = &mut segment.arguments {
+                if bracketed.colon2_token.is_none() && !bracketed.args.is_empty() {
+                    bracketed.colon2_token = Some(<Token![::]>::default());
+                }
+            }
+        }
+        if variant.segments.len() > 1 {
+            path.segments.push_punct(<Token![::]>::default());
+            path.segments.extend(variant.segments.into_pairs().skip(1));
+        }
+    }
+}
+
+impl ReplaceReceiver<'_> {
+    // `Self` -> `Receiver`
+    fn visit_type_mut(&mut self, ty: &mut Type) {
+        let span = if let Type::Path(node) = ty {
+            if node.qself.is_none() && node.path.is_ident("Self") {
+                node.path.segments[0].ident.span()
+            } else {
+                self.visit_type_path_mut(node);
+                return;
+            }
+        } else {
+            self.visit_type_mut_impl(ty);
+            return;
+        };
+        *ty = self.self_ty(span).into();
+    }
+
+    // `Self::Assoc` -> `<Receiver>::Assoc`
+    fn visit_type_path_mut(&mut self, ty: &mut TypePath) {
+        if ty.qself.is_none() {
+            self.self_to_qself(&mut ty.qself, &mut ty.path);
+        }
+        self.visit_type_path_mut_impl(ty);
+    }
+
+    // `Self::method` -> `<Receiver>::method`
+    fn visit_expr_path_mut(&mut self, expr: &mut ExprPath) {
+        if expr.qself.is_none() {
+            self.self_to_qself(&mut expr.qself, &mut expr.path);
+        }
+        self.visit_expr_path_mut_impl(expr);
+    }
+
+    // Everything below is simply traversing the syntax tree.
+
+    fn visit_type_mut_impl(&mut self, ty: &mut Type) {
+        match ty {
+            Type::Array(ty) => {
+                self.visit_type_mut(&mut ty.elem);
+                self.visit_expr_mut(&mut ty.len);
+            }
+            Type::BareFn(ty) => {
+                for arg in &mut ty.inputs {
+                    self.visit_type_mut(&mut arg.ty);
+                }
+                self.visit_return_type_mut(&mut ty.output);
+            }
+            Type::Group(ty) => self.visit_type_mut(&mut ty.elem),
+            Type::ImplTrait(ty) => {
+                for bound in &mut ty.bounds {
+                    self.visit_type_param_bound_mut(bound);
+                }
+            }
+            Type::Macro(ty) => self.visit_macro_mut(&mut ty.mac),
+            Type::Paren(ty) => self.visit_type_mut(&mut ty.elem),
+            Type::Path(ty) => {
+                if let Some(qself) = &mut ty.qself {
+                    self.visit_type_mut(&mut qself.ty);
+                }
+                self.visit_path_mut(&mut ty.path);
+            }
+            Type::Ptr(ty) => self.visit_type_mut(&mut ty.elem),
+            Type::Reference(ty) => self.visit_type_mut(&mut ty.elem),
+            Type::Slice(ty) => self.visit_type_mut(&mut ty.elem),
+            Type::TraitObject(ty) => {
+                for bound in &mut ty.bounds {
+                    self.visit_type_param_bound_mut(bound);
+                }
+            }
+            Type::Tuple(ty) => {
+                for elem in &mut ty.elems {
+                    self.visit_type_mut(elem);
+                }
+            }
+
+            Type::Infer(_) | Type::Never(_) | Type::Verbatim(_) => {}
+
+            #[cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))]
+            _ => {}
+        }
+    }
+
+    fn visit_type_path_mut_impl(&mut self, ty: &mut TypePath) {
+        if let Some(qself) = &mut ty.qself {
+            self.visit_type_mut(&mut qself.ty);
+        }
+        self.visit_path_mut(&mut ty.path);
+    }
+
+    fn visit_expr_path_mut_impl(&mut self, expr: &mut ExprPath) {
+        if let Some(qself) = &mut expr.qself {
+            self.visit_type_mut(&mut qself.ty);
+        }
+        self.visit_path_mut(&mut expr.path);
+    }
+
+    fn visit_path_mut(&mut self, path: &mut Path) {
+        for segment in &mut path.segments {
+            self.visit_path_arguments_mut(&mut segment.arguments);
+        }
+    }
+
+    fn visit_path_arguments_mut(&mut self, arguments: &mut PathArguments) {
+        match arguments {
+            PathArguments::None => {}
+            PathArguments::AngleBracketed(arguments) => {
+                for arg in &mut arguments.args {
+                    match arg {
+                        GenericArgument::Type(arg) => self.visit_type_mut(arg),
+                        GenericArgument::AssocType(arg) => self.visit_type_mut(&mut arg.ty),
+                        GenericArgument::Lifetime(_)
+                        | GenericArgument::Const(_)
+                        | GenericArgument::AssocConst(_)
+                        | GenericArgument::Constraint(_) => {}
+                        #[cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))]
+                        _ => {}
+                    }
+                }
+            }
+            PathArguments::Parenthesized(arguments) => {
+                for argument in &mut arguments.inputs {
+                    self.visit_type_mut(argument);
+                }
+                self.visit_return_type_mut(&mut arguments.output);
+            }
+        }
+    }
+
+    fn visit_return_type_mut(&mut self, return_type: &mut ReturnType) {
+        match return_type {
+            ReturnType::Default => {}
+            ReturnType::Type(_, output) => self.visit_type_mut(output),
+        }
+    }
+
+    fn visit_type_param_bound_mut(&mut self, bound: &mut TypeParamBound) {
+        match bound {
+            TypeParamBound::Trait(bound) => self.visit_path_mut(&mut bound.path),
+            TypeParamBound::Lifetime(_) | TypeParamBound::Verbatim(_) => {}
+            #[cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))]
+            _ => {}
+        }
+    }
+
+    fn visit_generics_mut(&mut self, generics: &mut Generics) {
+        for param in &mut generics.params {
+            match param {
+                GenericParam::Type(param) => {
+                    for bound in &mut param.bounds {
+                        self.visit_type_param_bound_mut(bound);
+                    }
+                }
+                GenericParam::Lifetime(_) | GenericParam::Const(_) => {}
+            }
+        }
+        if let Some(where_clause) = &mut generics.where_clause {
+            for predicate in &mut where_clause.predicates {
+                match predicate {
+                    WherePredicate::Type(predicate) => {
+                        self.visit_type_mut(&mut predicate.bounded_ty);
+                        for bound in &mut predicate.bounds {
+                            self.visit_type_param_bound_mut(bound);
+                        }
+                    }
+                    WherePredicate::Lifetime(_) => {}
+                    #[cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))]
+                    _ => {}
+                }
+            }
+        }
+    }
+
+    fn visit_data_mut(&mut self, data: &mut Data) {
+        match data {
+            Data::Struct(data) => {
+                for field in &mut data.fields {
+                    self.visit_type_mut(&mut field.ty);
+                }
+            }
+            Data::Enum(data) => {
+                for variant in &mut data.variants {
+                    for field in &mut variant.fields {
+                        self.visit_type_mut(&mut field.ty);
+                    }
+                }
+            }
+            Data::Union(_) => {}
+        }
+    }
+
+    fn visit_expr_mut(&mut self, expr: &mut Expr) {
+        match expr {
+            Expr::Binary(expr) => {
+                self.visit_expr_mut(&mut expr.left);
+                self.visit_expr_mut(&mut expr.right);
+            }
+            Expr::Call(expr) => {
+                self.visit_expr_mut(&mut expr.func);
+                for arg in &mut expr.args {
+                    self.visit_expr_mut(arg);
+                }
+            }
+            Expr::Cast(expr) => {
+                self.visit_expr_mut(&mut expr.expr);
+                self.visit_type_mut(&mut expr.ty);
+            }
+            Expr::Field(expr) => self.visit_expr_mut(&mut expr.base),
+            Expr::Index(expr) => {
+                self.visit_expr_mut(&mut expr.expr);
+                self.visit_expr_mut(&mut expr.index);
+            }
+            Expr::Paren(expr) => self.visit_expr_mut(&mut expr.expr),
+            Expr::Path(expr) => self.visit_expr_path_mut(expr),
+            Expr::Unary(expr) => self.visit_expr_mut(&mut expr.expr),
+            _ => {}
+        }
+    }
+
+    fn visit_macro_mut(&mut self, _mac: &mut Macro) {}
+}
diff --git a/crates/serde_derive/src/internals/respan.rs b/crates/serde_derive/src/internals/respan.rs
new file mode 100644
index 0000000..dcec701
--- /dev/null
+++ b/crates/serde_derive/src/internals/respan.rs
@@ -0,0 +1,16 @@
+use proc_macro2::{Group, Span, TokenStream, TokenTree};
+
+pub(crate) fn respan(stream: TokenStream, span: Span) -> TokenStream {
+    stream
+        .into_iter()
+        .map(|token| respan_token(token, span))
+        .collect()
+}
+
+fn respan_token(mut token: TokenTree, span: Span) -> TokenTree {
+    if let TokenTree::Group(g) = &mut token {
+        *g = Group::new(g.delimiter(), respan(g.stream(), span));
+    }
+    token.set_span(span);
+    token
+}
diff --git a/crates/serde_derive/src/internals/symbol.rs b/crates/serde_derive/src/internals/symbol.rs
new file mode 100644
index 0000000..9606edb
--- /dev/null
+++ b/crates/serde_derive/src/internals/symbol.rs
@@ -0,0 +1,69 @@
+use std::fmt::{self, Display};
+use syn::{Ident, Path};
+
+#[derive(Copy, Clone)]
+pub struct Symbol(&'static str);
+
+pub const ALIAS: Symbol = Symbol("alias");
+pub const BORROW: Symbol = Symbol("borrow");
+pub const BOUND: Symbol = Symbol("bound");
+pub const CONTENT: Symbol = Symbol("content");
+pub const CRATE: Symbol = Symbol("crate");
+pub const DEFAULT: Symbol = Symbol("default");
+pub const DENY_UNKNOWN_FIELDS: Symbol = Symbol("deny_unknown_fields");
+pub const DESERIALIZE: Symbol = Symbol("deserialize");
+pub const DESERIALIZE_WITH: Symbol = Symbol("deserialize_with");
+pub const EXPECTING: Symbol = Symbol("expecting");
+pub const FIELD_IDENTIFIER: Symbol = Symbol("field_identifier");
+pub const FLATTEN: Symbol = Symbol("flatten");
+pub const FROM: Symbol = Symbol("from");
+pub const GETTER: Symbol = Symbol("getter");
+pub const INTO: Symbol = Symbol("into");
+pub const OTHER: Symbol = Symbol("other");
+pub const REMOTE: Symbol = Symbol("remote");
+pub const RENAME: Symbol = Symbol("rename");
+pub const RENAME_ALL: Symbol = Symbol("rename_all");
+pub const REPR: Symbol = Symbol("repr");
+pub const SERDE: Symbol = Symbol("serde");
+pub const SERIALIZE: Symbol = Symbol("serialize");
+pub const SERIALIZE_WITH: Symbol = Symbol("serialize_with");
+pub const SKIP: Symbol = Symbol("skip");
+pub const SKIP_DESERIALIZING: Symbol = Symbol("skip_deserializing");
+pub const SKIP_SERIALIZING: Symbol = Symbol("skip_serializing");
+pub const SKIP_SERIALIZING_IF: Symbol = Symbol("skip_serializing_if");
+pub const TAG: Symbol = Symbol("tag");
+pub const TRANSPARENT: Symbol = Symbol("transparent");
+pub const TRY_FROM: Symbol = Symbol("try_from");
+pub const UNTAGGED: Symbol = Symbol("untagged");
+pub const VARIANT_IDENTIFIER: Symbol = Symbol("variant_identifier");
+pub const WITH: Symbol = Symbol("with");
+
+impl PartialEq<Symbol> for Ident {
+    fn eq(&self, word: &Symbol) -> bool {
+        self == word.0
+    }
+}
+
+impl<'a> PartialEq<Symbol> for &'a Ident {
+    fn eq(&self, word: &Symbol) -> bool {
+        *self == word.0
+    }
+}
+
+impl PartialEq<Symbol> for Path {
+    fn eq(&self, word: &Symbol) -> bool {
+        self.is_ident(word.0)
+    }
+}
+
+impl<'a> PartialEq<Symbol> for &'a Path {
+    fn eq(&self, word: &Symbol) -> bool {
+        self.is_ident(word.0)
+    }
+}
+
+impl Display for Symbol {
+    fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+        formatter.write_str(self.0)
+    }
+}
diff --git a/crates/serde_derive/src/lib.rs b/crates/serde_derive/src/lib.rs
new file mode 100644
index 0000000..eda4acb
--- /dev/null
+++ b/crates/serde_derive/src/lib.rs
@@ -0,0 +1,110 @@
+//! This crate provides Serde's two derive macros.
+//!
+//! ```edition2018
+//! # use serde_derive::{Serialize, Deserialize};
+//! #
+//! #[derive(Serialize, Deserialize)]
+//! # struct S;
+//! #
+//! # fn main() {}
+//! ```
+//!
+//! Please refer to [https://serde.rs/derive.html] for how to set this up.
+//!
+//! [https://serde.rs/derive.html]: https://serde.rs/derive.html
+
+#![doc(html_root_url = "https://docs.rs/serde_derive/1.0.158")]
+#![allow(unknown_lints, bare_trait_objects)]
+// Ignored clippy lints
+#![allow(
+    // clippy false positive: https://github.com/rust-lang/rust-clippy/issues/7054
+    clippy::branches_sharing_code,
+    clippy::cognitive_complexity,
+    // clippy bug: https://github.com/rust-lang/rust-clippy/issues/7575
+    clippy::collapsible_match,
+    clippy::derive_partial_eq_without_eq,
+    clippy::enum_variant_names,
+    // clippy bug: https://github.com/rust-lang/rust-clippy/issues/6797
+    clippy::manual_map,
+    clippy::match_like_matches_macro,
+    clippy::needless_pass_by_value,
+    clippy::too_many_arguments,
+    clippy::trivially_copy_pass_by_ref,
+    clippy::used_underscore_binding,
+    clippy::wildcard_in_or_patterns,
+    // clippy bug: https://github.com/rust-lang/rust-clippy/issues/5704
+    clippy::unnested_or_patterns,
+)]
+// Ignored clippy_pedantic lints
+#![allow(
+    clippy::cast_possible_truncation,
+    clippy::checked_conversions,
+    clippy::doc_markdown,
+    clippy::enum_glob_use,
+    clippy::indexing_slicing,
+    clippy::items_after_statements,
+    clippy::let_underscore_untyped,
+    clippy::manual_assert,
+    clippy::map_err_ignore,
+    clippy::match_same_arms,
+    // clippy bug: https://github.com/rust-lang/rust-clippy/issues/6984
+    clippy::match_wildcard_for_single_variants,
+    clippy::module_name_repetitions,
+    clippy::must_use_candidate,
+    clippy::option_if_let_else,
+    clippy::similar_names,
+    clippy::single_match_else,
+    clippy::struct_excessive_bools,
+    clippy::too_many_lines,
+    clippy::unseparated_literal_suffix,
+    clippy::unused_self,
+    clippy::use_self,
+    clippy::wildcard_imports
+)]
+#![cfg_attr(all(test, exhaustive), feature(non_exhaustive_omitted_patterns_lint))]
+
+#[macro_use]
+extern crate quote;
+#[macro_use]
+extern crate syn;
+
+extern crate proc_macro;
+extern crate proc_macro2;
+
+mod internals;
+
+use proc_macro::TokenStream;
+use syn::DeriveInput;
+
+#[macro_use]
+mod bound;
+#[macro_use]
+mod fragment;
+
+mod de;
+mod dummy;
+mod pretend;
+mod ser;
+mod this;
+mod try;
+
+#[proc_macro_derive(Serialize, attributes(serde))]
+pub fn derive_serialize(input: TokenStream) -> TokenStream {
+    let mut input = parse_macro_input!(input as DeriveInput);
+    ser::expand_derive_serialize(&mut input)
+        .unwrap_or_else(to_compile_errors)
+        .into()
+}
+
+#[proc_macro_derive(Deserialize, attributes(serde))]
+pub fn derive_deserialize(input: TokenStream) -> TokenStream {
+    let mut input = parse_macro_input!(input as DeriveInput);
+    de::expand_derive_deserialize(&mut input)
+        .unwrap_or_else(to_compile_errors)
+        .into()
+}
+
+fn to_compile_errors(errors: Vec<syn::Error>) -> proc_macro2::TokenStream {
+    let compile_errors = errors.iter().map(syn::Error::to_compile_error);
+    quote!(#(#compile_errors)*)
+}
diff --git a/crates/serde_derive/src/pretend.rs b/crates/serde_derive/src/pretend.rs
new file mode 100644
index 0000000..d7b953d
--- /dev/null
+++ b/crates/serde_derive/src/pretend.rs
@@ -0,0 +1,201 @@
+use proc_macro2::TokenStream;
+use quote::format_ident;
+
+use internals::ast::{Container, Data, Field, Style, Variant};
+
+// Suppress dead_code warnings that would otherwise appear when using a remote
+// derive. Other than this pretend code, a struct annotated with remote derive
+// never has its fields referenced and an enum annotated with remote derive
+// never has its variants constructed.
+//
+//     warning: field is never used: `i`
+//      --> src/main.rs:4:20
+//       |
+//     4 | struct StructDef { i: i32 }
+//       |                    ^^^^^^
+//
+//     warning: variant is never constructed: `V`
+//      --> src/main.rs:8:16
+//       |
+//     8 | enum EnumDef { V }
+//       |                ^
+//
+pub fn pretend_used(cont: &Container, is_packed: bool) -> TokenStream {
+    let pretend_fields = pretend_fields_used(cont, is_packed);
+    let pretend_variants = pretend_variants_used(cont);
+
+    quote! {
+        #pretend_fields
+        #pretend_variants
+    }
+}
+
+// For structs with named fields, expands to:
+//
+//     match None::<&T> {
+//         Some(T { a: __v0, b: __v1 }) => {}
+//         _ => {}
+//     }
+//
+// For packed structs on sufficiently new rustc, expands to:
+//
+//     match None::<&T> {
+//         Some(__v @ T { a: _, b: _ }) => {
+//             let _ = addr_of!(__v.a);
+//             let _ = addr_of!(__v.b);
+//         }
+//         _ => {}
+//     }
+//
+// For packed structs on older rustc, we assume Sized and !Drop, and expand to:
+//
+//     match None::<T> {
+//         Some(T { a: __v0, b: __v1 }) => {}
+//         _ => {}
+//     }
+//
+// For enums, expands to the following but only including struct variants:
+//
+//     match None::<&T> {
+//         Some(T::A { a: __v0 }) => {}
+//         Some(T::B { b: __v0 }) => {}
+//         _ => {}
+//     }
+//
+fn pretend_fields_used(cont: &Container, is_packed: bool) -> TokenStream {
+    match &cont.data {
+        Data::Enum(variants) => pretend_fields_used_enum(cont, variants),
+        Data::Struct(Style::Struct, fields) => {
+            if is_packed {
+                pretend_fields_used_struct_packed(cont, fields)
+            } else {
+                pretend_fields_used_struct(cont, fields)
+            }
+        }
+        Data::Struct(_, _) => quote!(),
+    }
+}
+
+fn pretend_fields_used_struct(cont: &Container, fields: &[Field]) -> TokenStream {
+    let type_ident = &cont.ident;
+    let (_, ty_generics, _) = cont.generics.split_for_impl();
+
+    let members = fields.iter().map(|field| &field.member);
+    let placeholders = (0usize..).map(|i| format_ident!("__v{}", i));
+
+    quote! {
+        match _serde::__private::None::<&#type_ident #ty_generics> {
+            _serde::__private::Some(#type_ident { #(#members: #placeholders),* }) => {}
+            _ => {}
+        }
+    }
+}
+
+fn pretend_fields_used_struct_packed(cont: &Container, fields: &[Field]) -> TokenStream {
+    let type_ident = &cont.ident;
+    let (_, ty_generics, _) = cont.generics.split_for_impl();
+
+    let members = fields.iter().map(|field| &field.member).collect::<Vec<_>>();
+
+    #[cfg(not(no_ptr_addr_of))]
+    {
+        quote! {
+            match _serde::__private::None::<&#type_ident #ty_generics> {
+                _serde::__private::Some(__v @ #type_ident { #(#members: _),* }) => {
+                    #(
+                        let _ = _serde::__private::ptr::addr_of!(__v.#members);
+                    )*
+                }
+                _ => {}
+            }
+        }
+    }
+
+    #[cfg(no_ptr_addr_of)]
+    {
+        let placeholders = (0usize..).map(|i| format_ident!("__v{}", i));
+
+        quote! {
+            match _serde::__private::None::<#type_ident #ty_generics> {
+                _serde::__private::Some(#type_ident { #(#members: #placeholders),* }) => {}
+                _ => {}
+            }
+        }
+    }
+}
+
+fn pretend_fields_used_enum(cont: &Container, variants: &[Variant]) -> TokenStream {
+    let type_ident = &cont.ident;
+    let (_, ty_generics, _) = cont.generics.split_for_impl();
+
+    let patterns = variants
+        .iter()
+        .filter_map(|variant| match variant.style {
+            Style::Struct => {
+                let variant_ident = &variant.ident;
+                let members = variant.fields.iter().map(|field| &field.member);
+                let placeholders = (0usize..).map(|i| format_ident!("__v{}", i));
+                Some(quote!(#type_ident::#variant_ident { #(#members: #placeholders),* }))
+            }
+            _ => None,
+        })
+        .collect::<Vec<_>>();
+
+    quote! {
+        match _serde::__private::None::<&#type_ident #ty_generics> {
+            #(
+                _serde::__private::Some(#patterns) => {}
+            )*
+            _ => {}
+        }
+    }
+}
+
+// Expands to one of these per enum variant:
+//
+//     match None {
+//         Some((__v0, __v1,)) => {
+//             let _ = E::V { a: __v0, b: __v1 };
+//         }
+//         _ => {}
+//     }
+//
+fn pretend_variants_used(cont: &Container) -> TokenStream {
+    let variants = match &cont.data {
+        Data::Enum(variants) => variants,
+        Data::Struct(_, _) => {
+            return quote!();
+        }
+    };
+
+    let type_ident = &cont.ident;
+    let (_, ty_generics, _) = cont.generics.split_for_impl();
+    let turbofish = ty_generics.as_turbofish();
+
+    let cases = variants.iter().map(|variant| {
+        let variant_ident = &variant.ident;
+        let placeholders = &(0..variant.fields.len())
+            .map(|i| format_ident!("__v{}", i))
+            .collect::<Vec<_>>();
+
+        let pat = match variant.style {
+            Style::Struct => {
+                let members = variant.fields.iter().map(|field| &field.member);
+                quote!({ #(#members: #placeholders),* })
+            }
+            Style::Tuple | Style::Newtype => quote!(( #(#placeholders),* )),
+            Style::Unit => quote!(),
+        };
+
+        quote! {
+            match _serde::__private::None {
+                _serde::__private::Some((#(#placeholders,)*)) => {
+                    let _ = #type_ident::#variant_ident #turbofish #pat;
+                }
+                _ => {}
+            }
+        }
+    });
+
+    quote!(#(#cases)*)
+}
diff --git a/crates/serde_derive/src/ser.rs b/crates/serde_derive/src/ser.rs
new file mode 100644
index 0000000..43695dd
--- /dev/null
+++ b/crates/serde_derive/src/ser.rs
@@ -0,0 +1,1340 @@
+use proc_macro2::{Span, TokenStream};
+use syn::spanned::Spanned;
+use syn::{self, Ident, Index, Member};
+
+use bound;
+use dummy;
+use fragment::{Fragment, Match, Stmts};
+use internals::ast::{Container, Data, Field, Style, Variant};
+use internals::{attr, replace_receiver, Ctxt, Derive};
+use pretend;
+use this;
+
+pub fn expand_derive_serialize(
+    input: &mut syn::DeriveInput,
+) -> Result<TokenStream, Vec<syn::Error>> {
+    replace_receiver(input);
+
+    let ctxt = Ctxt::new();
+    let cont = match Container::from_ast(&ctxt, input, Derive::Serialize) {
+        Some(cont) => cont,
+        None => return Err(ctxt.check().unwrap_err()),
+    };
+    precondition(&ctxt, &cont);
+    ctxt.check()?;
+
+    let ident = &cont.ident;
+    let params = Parameters::new(&cont);
+    let (impl_generics, ty_generics, where_clause) = params.generics.split_for_impl();
+    let body = Stmts(serialize_body(&cont, &params));
+    let serde = cont.attrs.serde_path();
+
+    let impl_block = if let Some(remote) = cont.attrs.remote() {
+        let vis = &input.vis;
+        let used = pretend::pretend_used(&cont, params.is_packed);
+        quote! {
+            impl #impl_generics #ident #ty_generics #where_clause {
+                #vis fn serialize<__S>(__self: &#remote #ty_generics, __serializer: __S) -> #serde::__private::Result<__S::Ok, __S::Error>
+                where
+                    __S: #serde::Serializer,
+                {
+                    #used
+                    #body
+                }
+            }
+        }
+    } else {
+        quote! {
+            #[automatically_derived]
+            impl #impl_generics #serde::Serialize for #ident #ty_generics #where_clause {
+                fn serialize<__S>(&self, __serializer: __S) -> #serde::__private::Result<__S::Ok, __S::Error>
+                where
+                    __S: #serde::Serializer,
+                {
+                    #body
+                }
+            }
+        }
+    };
+
+    Ok(dummy::wrap_in_const(
+        cont.attrs.custom_serde_path(),
+        "SERIALIZE",
+        ident,
+        impl_block,
+    ))
+}
+
+fn precondition(cx: &Ctxt, cont: &Container) {
+    match cont.attrs.identifier() {
+        attr::Identifier::No => {}
+        attr::Identifier::Field => {
+            cx.error_spanned_by(cont.original, "field identifiers cannot be serialized");
+        }
+        attr::Identifier::Variant => {
+            cx.error_spanned_by(cont.original, "variant identifiers cannot be serialized");
+        }
+    }
+}
+
+struct Parameters {
+    /// Variable holding the value being serialized. Either `self` for local
+    /// types or `__self` for remote types.
+    self_var: Ident,
+
+    /// Path to the type the impl is for. Either a single `Ident` for local
+    /// types (does not include generic parameters) or `some::remote::Path` for
+    /// remote types.
+    this_type: syn::Path,
+
+    /// Same as `this_type` but using `::<T>` for generic parameters for use in
+    /// expression position.
+    this_value: syn::Path,
+
+    /// Generics including any explicit and inferred bounds for the impl.
+    generics: syn::Generics,
+
+    /// Type has a `serde(remote = "...")` attribute.
+    is_remote: bool,
+
+    /// Type has a repr(packed) attribute.
+    is_packed: bool,
+}
+
+impl Parameters {
+    fn new(cont: &Container) -> Self {
+        let is_remote = cont.attrs.remote().is_some();
+        let self_var = if is_remote {
+            Ident::new("__self", Span::call_site())
+        } else {
+            Ident::new("self", Span::call_site())
+        };
+
+        let this_type = this::this_type(cont);
+        let this_value = this::this_value(cont);
+        let is_packed = cont.attrs.is_packed();
+        let generics = build_generics(cont);
+
+        Parameters {
+            self_var,
+            this_type,
+            this_value,
+            generics,
+            is_remote,
+            is_packed,
+        }
+    }
+
+    /// Type name to use in error messages and `&'static str` arguments to
+    /// various Serializer methods.
+    fn type_name(&self) -> String {
+        self.this_type.segments.last().unwrap().ident.to_string()
+    }
+}
+
+// All the generics in the input, plus a bound `T: Serialize` for each generic
+// field type that will be serialized by us.
+fn build_generics(cont: &Container) -> syn::Generics {
+    let generics = bound::without_defaults(cont.generics);
+
+    let generics =
+        bound::with_where_predicates_from_fields(cont, &generics, attr::Field::ser_bound);
+
+    let generics =
+        bound::with_where_predicates_from_variants(cont, &generics, attr::Variant::ser_bound);
+
+    match cont.attrs.ser_bound() {
+        Some(predicates) => bound::with_where_predicates(&generics, predicates),
+        None => bound::with_bound(
+            cont,
+            &generics,
+            needs_serialize_bound,
+            &parse_quote!(_serde::Serialize),
+        ),
+    }
+}
+
+// Fields with a `skip_serializing` or `serialize_with` attribute, or which
+// belong to a variant with a 'skip_serializing` or `serialize_with` attribute,
+// are not serialized by us so we do not generate a bound. Fields with a `bound`
+// attribute specify their own bound so we do not generate one. All other fields
+// may need a `T: Serialize` bound where T is the type of the field.
+fn needs_serialize_bound(field: &attr::Field, variant: Option<&attr::Variant>) -> bool {
+    !field.skip_serializing()
+        && field.serialize_with().is_none()
+        && field.ser_bound().is_none()
+        && variant.map_or(true, |variant| {
+            !variant.skip_serializing()
+                && variant.serialize_with().is_none()
+                && variant.ser_bound().is_none()
+        })
+}
+
+fn serialize_body(cont: &Container, params: &Parameters) -> Fragment {
+    if cont.attrs.transparent() {
+        serialize_transparent(cont, params)
+    } else if let Some(type_into) = cont.attrs.type_into() {
+        serialize_into(params, type_into)
+    } else {
+        match &cont.data {
+            Data::Enum(variants) => serialize_enum(params, variants, &cont.attrs),
+            Data::Struct(Style::Struct, fields) => serialize_struct(params, fields, &cont.attrs),
+            Data::Struct(Style::Tuple, fields) => {
+                serialize_tuple_struct(params, fields, &cont.attrs)
+            }
+            Data::Struct(Style::Newtype, fields) => {
+                serialize_newtype_struct(params, &fields[0], &cont.attrs)
+            }
+            Data::Struct(Style::Unit, _) => serialize_unit_struct(&cont.attrs),
+        }
+    }
+}
+
+fn serialize_transparent(cont: &Container, params: &Parameters) -> Fragment {
+    let fields = match &cont.data {
+        Data::Struct(_, fields) => fields,
+        Data::Enum(_) => unreachable!(),
+    };
+
+    let self_var = &params.self_var;
+    let transparent_field = fields.iter().find(|f| f.attrs.transparent()).unwrap();
+    let member = &transparent_field.member;
+
+    let path = match transparent_field.attrs.serialize_with() {
+        Some(path) => quote!(#path),
+        None => {
+            let span = transparent_field.original.span();
+            quote_spanned!(span=> _serde::Serialize::serialize)
+        }
+    };
+
+    quote_block! {
+        #path(&#self_var.#member, __serializer)
+    }
+}
+
+fn serialize_into(params: &Parameters, type_into: &syn::Type) -> Fragment {
+    let self_var = &params.self_var;
+    quote_block! {
+        _serde::Serialize::serialize(
+            &_serde::__private::Into::<#type_into>::into(_serde::__private::Clone::clone(#self_var)),
+            __serializer)
+    }
+}
+
+fn serialize_unit_struct(cattrs: &attr::Container) -> Fragment {
+    let type_name = cattrs.name().serialize_name();
+
+    quote_expr! {
+        _serde::Serializer::serialize_unit_struct(__serializer, #type_name)
+    }
+}
+
+fn serialize_newtype_struct(
+    params: &Parameters,
+    field: &Field,
+    cattrs: &attr::Container,
+) -> Fragment {
+    let type_name = cattrs.name().serialize_name();
+
+    let mut field_expr = get_member(
+        params,
+        field,
+        &Member::Unnamed(Index {
+            index: 0,
+            span: Span::call_site(),
+        }),
+    );
+    if let Some(path) = field.attrs.serialize_with() {
+        field_expr = wrap_serialize_field_with(params, field.ty, path, &field_expr);
+    }
+
+    let span = field.original.span();
+    let func = quote_spanned!(span=> _serde::Serializer::serialize_newtype_struct);
+    quote_expr! {
+        #func(__serializer, #type_name, #field_expr)
+    }
+}
+
+fn serialize_tuple_struct(
+    params: &Parameters,
+    fields: &[Field],
+    cattrs: &attr::Container,
+) -> Fragment {
+    let serialize_stmts =
+        serialize_tuple_struct_visitor(fields, params, false, &TupleTrait::SerializeTupleStruct);
+
+    let type_name = cattrs.name().serialize_name();
+
+    let mut serialized_fields = fields
+        .iter()
+        .enumerate()
+        .filter(|(_, field)| !field.attrs.skip_serializing())
+        .peekable();
+
+    let let_mut = mut_if(serialized_fields.peek().is_some());
+
+    let len = serialized_fields
+        .map(|(i, field)| match field.attrs.skip_serializing_if() {
+            None => quote!(1),
+            Some(path) => {
+                let index = syn::Index {
+                    index: i as u32,
+                    span: Span::call_site(),
+                };
+                let field_expr = get_member(params, field, &Member::Unnamed(index));
+                quote!(if #path(#field_expr) { 0 } else { 1 })
+            }
+        })
+        .fold(quote!(0), |sum, expr| quote!(#sum + #expr));
+
+    quote_block! {
+        let #let_mut __serde_state = try!(_serde::Serializer::serialize_tuple_struct(__serializer, #type_name, #len));
+        #(#serialize_stmts)*
+        _serde::ser::SerializeTupleStruct::end(__serde_state)
+    }
+}
+
+fn serialize_struct(params: &Parameters, fields: &[Field], cattrs: &attr::Container) -> Fragment {
+    assert!(fields.len() as u64 <= u64::from(u32::max_value()));
+
+    if cattrs.has_flatten() {
+        serialize_struct_as_map(params, fields, cattrs)
+    } else {
+        serialize_struct_as_struct(params, fields, cattrs)
+    }
+}
+
+fn serialize_struct_tag_field(cattrs: &attr::Container, struct_trait: &StructTrait) -> TokenStream {
+    match cattrs.tag() {
+        attr::TagType::Internal { tag } => {
+            let type_name = cattrs.name().serialize_name();
+            let func = struct_trait.serialize_field(Span::call_site());
+            quote! {
+                try!(#func(&mut __serde_state, #tag, #type_name));
+            }
+        }
+        _ => quote! {},
+    }
+}
+
+fn serialize_struct_as_struct(
+    params: &Parameters,
+    fields: &[Field],
+    cattrs: &attr::Container,
+) -> Fragment {
+    let serialize_fields =
+        serialize_struct_visitor(fields, params, false, &StructTrait::SerializeStruct);
+
+    let type_name = cattrs.name().serialize_name();
+
+    let tag_field = serialize_struct_tag_field(cattrs, &StructTrait::SerializeStruct);
+    let tag_field_exists = !tag_field.is_empty();
+
+    let mut serialized_fields = fields
+        .iter()
+        .filter(|&field| !field.attrs.skip_serializing())
+        .peekable();
+
+    let let_mut = mut_if(serialized_fields.peek().is_some() || tag_field_exists);
+
+    let len = serialized_fields
+        .map(|field| match field.attrs.skip_serializing_if() {
+            None => quote!(1),
+            Some(path) => {
+                let field_expr = get_member(params, field, &field.member);
+                quote!(if #path(#field_expr) { 0 } else { 1 })
+            }
+        })
+        .fold(
+            quote!(#tag_field_exists as usize),
+            |sum, expr| quote!(#sum + #expr),
+        );
+
+    quote_block! {
+        let #let_mut __serde_state = try!(_serde::Serializer::serialize_struct(__serializer, #type_name, #len));
+        #tag_field
+        #(#serialize_fields)*
+        _serde::ser::SerializeStruct::end(__serde_state)
+    }
+}
+
+fn serialize_struct_as_map(
+    params: &Parameters,
+    fields: &[Field],
+    cattrs: &attr::Container,
+) -> Fragment {
+    let serialize_fields =
+        serialize_struct_visitor(fields, params, false, &StructTrait::SerializeMap);
+
+    let tag_field = serialize_struct_tag_field(cattrs, &StructTrait::SerializeMap);
+    let tag_field_exists = !tag_field.is_empty();
+
+    let mut serialized_fields = fields
+        .iter()
+        .filter(|&field| !field.attrs.skip_serializing())
+        .peekable();
+
+    let let_mut = mut_if(serialized_fields.peek().is_some() || tag_field_exists);
+
+    let len = if cattrs.has_flatten() {
+        quote!(_serde::__private::None)
+    } else {
+        let len = serialized_fields
+            .map(|field| match field.attrs.skip_serializing_if() {
+                None => quote!(1),
+                Some(path) => {
+                    let field_expr = get_member(params, field, &field.member);
+                    quote!(if #path(#field_expr) { 0 } else { 1 })
+                }
+            })
+            .fold(
+                quote!(#tag_field_exists as usize),
+                |sum, expr| quote!(#sum + #expr),
+            );
+        quote!(_serde::__private::Some(#len))
+    };
+
+    quote_block! {
+        let #let_mut __serde_state = try!(_serde::Serializer::serialize_map(__serializer, #len));
+        #tag_field
+        #(#serialize_fields)*
+        _serde::ser::SerializeMap::end(__serde_state)
+    }
+}
+
+fn serialize_enum(params: &Parameters, variants: &[Variant], cattrs: &attr::Container) -> Fragment {
+    assert!(variants.len() as u64 <= u64::from(u32::max_value()));
+
+    let self_var = &params.self_var;
+
+    let arms: Vec<_> = variants
+        .iter()
+        .enumerate()
+        .map(|(variant_index, variant)| {
+            serialize_variant(params, variant, variant_index as u32, cattrs)
+        })
+        .collect();
+
+    quote_expr! {
+        match *#self_var {
+            #(#arms)*
+        }
+    }
+}
+
+fn serialize_variant(
+    params: &Parameters,
+    variant: &Variant,
+    variant_index: u32,
+    cattrs: &attr::Container,
+) -> TokenStream {
+    let this_value = &params.this_value;
+    let variant_ident = &variant.ident;
+
+    if variant.attrs.skip_serializing() {
+        let skipped_msg = format!(
+            "the enum variant {}::{} cannot be serialized",
+            params.type_name(),
+            variant_ident
+        );
+        let skipped_err = quote! {
+            _serde::__private::Err(_serde::ser::Error::custom(#skipped_msg))
+        };
+        let fields_pat = match variant.style {
+            Style::Unit => quote!(),
+            Style::Newtype | Style::Tuple => quote!((..)),
+            Style::Struct => quote!({ .. }),
+        };
+        quote! {
+            #this_value::#variant_ident #fields_pat => #skipped_err,
+        }
+    } else {
+        // variant wasn't skipped
+        let case = match variant.style {
+            Style::Unit => {
+                quote! {
+                    #this_value::#variant_ident
+                }
+            }
+            Style::Newtype => {
+                quote! {
+                    #this_value::#variant_ident(ref __field0)
+                }
+            }
+            Style::Tuple => {
+                let field_names = (0..variant.fields.len())
+                    .map(|i| Ident::new(&format!("__field{}", i), Span::call_site()));
+                quote! {
+                    #this_value::#variant_ident(#(ref #field_names),*)
+                }
+            }
+            Style::Struct => {
+                let members = variant.fields.iter().map(|f| &f.member);
+                quote! {
+                    #this_value::#variant_ident { #(ref #members),* }
+                }
+            }
+        };
+
+        let body = Match(match cattrs.tag() {
+            attr::TagType::External => {
+                serialize_externally_tagged_variant(params, variant, variant_index, cattrs)
+            }
+            attr::TagType::Internal { tag } => {
+                serialize_internally_tagged_variant(params, variant, cattrs, tag)
+            }
+            attr::TagType::Adjacent { tag, content } => {
+                serialize_adjacently_tagged_variant(params, variant, cattrs, tag, content)
+            }
+            attr::TagType::None => serialize_untagged_variant(params, variant, cattrs),
+        });
+
+        quote! {
+            #case => #body
+        }
+    }
+}
+
+fn serialize_externally_tagged_variant(
+    params: &Parameters,
+    variant: &Variant,
+    variant_index: u32,
+    cattrs: &attr::Container,
+) -> Fragment {
+    let type_name = cattrs.name().serialize_name();
+    let variant_name = variant.attrs.name().serialize_name();
+
+    if let Some(path) = variant.attrs.serialize_with() {
+        let ser = wrap_serialize_variant_with(params, path, variant);
+        return quote_expr! {
+            _serde::Serializer::serialize_newtype_variant(
+                __serializer,
+                #type_name,
+                #variant_index,
+                #variant_name,
+                #ser,
+            )
+        };
+    }
+
+    match effective_style(variant) {
+        Style::Unit => {
+            quote_expr! {
+                _serde::Serializer::serialize_unit_variant(
+                    __serializer,
+                    #type_name,
+                    #variant_index,
+                    #variant_name,
+                )
+            }
+        }
+        Style::Newtype => {
+            let field = &variant.fields[0];
+            let mut field_expr = quote!(__field0);
+            if let Some(path) = field.attrs.serialize_with() {
+                field_expr = wrap_serialize_field_with(params, field.ty, path, &field_expr);
+            }
+
+            let span = field.original.span();
+            let func = quote_spanned!(span=> _serde::Serializer::serialize_newtype_variant);
+            quote_expr! {
+                #func(
+                    __serializer,
+                    #type_name,
+                    #variant_index,
+                    #variant_name,
+                    #field_expr,
+                )
+            }
+        }
+        Style::Tuple => serialize_tuple_variant(
+            TupleVariant::ExternallyTagged {
+                type_name,
+                variant_index,
+                variant_name,
+            },
+            params,
+            &variant.fields,
+        ),
+        Style::Struct => serialize_struct_variant(
+            StructVariant::ExternallyTagged {
+                variant_index,
+                variant_name,
+            },
+            params,
+            &variant.fields,
+            &type_name,
+        ),
+    }
+}
+
+fn serialize_internally_tagged_variant(
+    params: &Parameters,
+    variant: &Variant,
+    cattrs: &attr::Container,
+    tag: &str,
+) -> Fragment {
+    let type_name = cattrs.name().serialize_name();
+    let variant_name = variant.attrs.name().serialize_name();
+
+    let enum_ident_str = params.type_name();
+    let variant_ident_str = variant.ident.to_string();
+
+    if let Some(path) = variant.attrs.serialize_with() {
+        let ser = wrap_serialize_variant_with(params, path, variant);
+        return quote_expr! {
+            _serde::__private::ser::serialize_tagged_newtype(
+                __serializer,
+                #enum_ident_str,
+                #variant_ident_str,
+                #tag,
+                #variant_name,
+                #ser,
+            )
+        };
+    }
+
+    match effective_style(variant) {
+        Style::Unit => {
+            quote_block! {
+                let mut __struct = try!(_serde::Serializer::serialize_struct(
+                    __serializer, #type_name, 1));
+                try!(_serde::ser::SerializeStruct::serialize_field(
+                    &mut __struct, #tag, #variant_name));
+                _serde::ser::SerializeStruct::end(__struct)
+            }
+        }
+        Style::Newtype => {
+            let field = &variant.fields[0];
+            let mut field_expr = quote!(__field0);
+            if let Some(path) = field.attrs.serialize_with() {
+                field_expr = wrap_serialize_field_with(params, field.ty, path, &field_expr);
+            }
+
+            let span = field.original.span();
+            let func = quote_spanned!(span=> _serde::__private::ser::serialize_tagged_newtype);
+            quote_expr! {
+                #func(
+                    __serializer,
+                    #enum_ident_str,
+                    #variant_ident_str,
+                    #tag,
+                    #variant_name,
+                    #field_expr,
+                )
+            }
+        }
+        Style::Struct => serialize_struct_variant(
+            StructVariant::InternallyTagged { tag, variant_name },
+            params,
+            &variant.fields,
+            &type_name,
+        ),
+        Style::Tuple => unreachable!("checked in serde_derive_internals"),
+    }
+}
+
+fn serialize_adjacently_tagged_variant(
+    params: &Parameters,
+    variant: &Variant,
+    cattrs: &attr::Container,
+    tag: &str,
+    content: &str,
+) -> Fragment {
+    let this_type = &params.this_type;
+    let type_name = cattrs.name().serialize_name();
+    let variant_name = variant.attrs.name().serialize_name();
+
+    let inner = Stmts(if let Some(path) = variant.attrs.serialize_with() {
+        let ser = wrap_serialize_variant_with(params, path, variant);
+        quote_expr! {
+            _serde::Serialize::serialize(#ser, __serializer)
+        }
+    } else {
+        match effective_style(variant) {
+            Style::Unit => {
+                return quote_block! {
+                    let mut __struct = try!(_serde::Serializer::serialize_struct(
+                        __serializer, #type_name, 1));
+                    try!(_serde::ser::SerializeStruct::serialize_field(
+                        &mut __struct, #tag, #variant_name));
+                    _serde::ser::SerializeStruct::end(__struct)
+                };
+            }
+            Style::Newtype => {
+                let field = &variant.fields[0];
+                let mut field_expr = quote!(__field0);
+                if let Some(path) = field.attrs.serialize_with() {
+                    field_expr = wrap_serialize_field_with(params, field.ty, path, &field_expr);
+                }
+
+                let span = field.original.span();
+                let func = quote_spanned!(span=> _serde::ser::SerializeStruct::serialize_field);
+                return quote_block! {
+                    let mut __struct = try!(_serde::Serializer::serialize_struct(
+                        __serializer, #type_name, 2));
+                    try!(_serde::ser::SerializeStruct::serialize_field(
+                        &mut __struct, #tag, #variant_name));
+                    try!(#func(
+                        &mut __struct, #content, #field_expr));
+                    _serde::ser::SerializeStruct::end(__struct)
+                };
+            }
+            Style::Tuple => {
+                serialize_tuple_variant(TupleVariant::Untagged, params, &variant.fields)
+            }
+            Style::Struct => serialize_struct_variant(
+                StructVariant::Untagged,
+                params,
+                &variant.fields,
+                &variant_name,
+            ),
+        }
+    });
+
+    let fields_ty = variant.fields.iter().map(|f| &f.ty);
+    let fields_ident: &Vec<_> = &match variant.style {
+        Style::Unit => {
+            if variant.attrs.serialize_with().is_some() {
+                vec![]
+            } else {
+                unreachable!()
+            }
+        }
+        Style::Newtype => vec![Member::Named(Ident::new("__field0", Span::call_site()))],
+        Style::Tuple => (0..variant.fields.len())
+            .map(|i| Member::Named(Ident::new(&format!("__field{}", i), Span::call_site())))
+            .collect(),
+        Style::Struct => variant.fields.iter().map(|f| f.member.clone()).collect(),
+    };
+
+    let (_, ty_generics, where_clause) = params.generics.split_for_impl();
+
+    let wrapper_generics = if fields_ident.is_empty() {
+        params.generics.clone()
+    } else {
+        bound::with_lifetime_bound(&params.generics, "'__a")
+    };
+    let (wrapper_impl_generics, wrapper_ty_generics, _) = wrapper_generics.split_for_impl();
+
+    quote_block! {
+        struct __AdjacentlyTagged #wrapper_generics #where_clause {
+            data: (#(&'__a #fields_ty,)*),
+            phantom: _serde::__private::PhantomData<#this_type #ty_generics>,
+        }
+
+        impl #wrapper_impl_generics _serde::Serialize for __AdjacentlyTagged #wrapper_ty_generics #where_clause {
+            fn serialize<__S>(&self, __serializer: __S) -> _serde::__private::Result<__S::Ok, __S::Error>
+            where
+                __S: _serde::Serializer,
+            {
+                // Elements that have skip_serializing will be unused.
+                #[allow(unused_variables)]
+                let (#(#fields_ident,)*) = self.data;
+                #inner
+            }
+        }
+
+        let mut __struct = try!(_serde::Serializer::serialize_struct(
+            __serializer, #type_name, 2));
+        try!(_serde::ser::SerializeStruct::serialize_field(
+            &mut __struct, #tag, #variant_name));
+        try!(_serde::ser::SerializeStruct::serialize_field(
+            &mut __struct, #content, &__AdjacentlyTagged {
+                data: (#(#fields_ident,)*),
+                phantom: _serde::__private::PhantomData::<#this_type #ty_generics>,
+            }));
+        _serde::ser::SerializeStruct::end(__struct)
+    }
+}
+
+fn serialize_untagged_variant(
+    params: &Parameters,
+    variant: &Variant,
+    cattrs: &attr::Container,
+) -> Fragment {
+    if let Some(path) = variant.attrs.serialize_with() {
+        let ser = wrap_serialize_variant_with(params, path, variant);
+        return quote_expr! {
+            _serde::Serialize::serialize(#ser, __serializer)
+        };
+    }
+
+    match effective_style(variant) {
+        Style::Unit => {
+            quote_expr! {
+                _serde::Serializer::serialize_unit(__serializer)
+            }
+        }
+        Style::Newtype => {
+            let field = &variant.fields[0];
+            let mut field_expr = quote!(__field0);
+            if let Some(path) = field.attrs.serialize_with() {
+                field_expr = wrap_serialize_field_with(params, field.ty, path, &field_expr);
+            }
+
+            let span = field.original.span();
+            let func = quote_spanned!(span=> _serde::Serialize::serialize);
+            quote_expr! {
+                #func(#field_expr, __serializer)
+            }
+        }
+        Style::Tuple => serialize_tuple_variant(TupleVariant::Untagged, params, &variant.fields),
+        Style::Struct => {
+            let type_name = cattrs.name().serialize_name();
+            serialize_struct_variant(StructVariant::Untagged, params, &variant.fields, &type_name)
+        }
+    }
+}
+
+enum TupleVariant {
+    ExternallyTagged {
+        type_name: String,
+        variant_index: u32,
+        variant_name: String,
+    },
+    Untagged,
+}
+
+fn serialize_tuple_variant(
+    context: TupleVariant,
+    params: &Parameters,
+    fields: &[Field],
+) -> Fragment {
+    let tuple_trait = match context {
+        TupleVariant::ExternallyTagged { .. } => TupleTrait::SerializeTupleVariant,
+        TupleVariant::Untagged => TupleTrait::SerializeTuple,
+    };
+
+    let serialize_stmts = serialize_tuple_struct_visitor(fields, params, true, &tuple_trait);
+
+    let mut serialized_fields = fields
+        .iter()
+        .enumerate()
+        .filter(|(_, field)| !field.attrs.skip_serializing())
+        .peekable();
+
+    let let_mut = mut_if(serialized_fields.peek().is_some());
+
+    let len = serialized_fields
+        .map(|(i, field)| match field.attrs.skip_serializing_if() {
+            None => quote!(1),
+            Some(path) => {
+                let field_expr = Ident::new(&format!("__field{}", i), Span::call_site());
+                quote!(if #path(#field_expr) { 0 } else { 1 })
+            }
+        })
+        .fold(quote!(0), |sum, expr| quote!(#sum + #expr));
+
+    match context {
+        TupleVariant::ExternallyTagged {
+            type_name,
+            variant_index,
+            variant_name,
+        } => {
+            quote_block! {
+                let #let_mut __serde_state = try!(_serde::Serializer::serialize_tuple_variant(
+                    __serializer,
+                    #type_name,
+                    #variant_index,
+                    #variant_name,
+                    #len));
+                #(#serialize_stmts)*
+                _serde::ser::SerializeTupleVariant::end(__serde_state)
+            }
+        }
+        TupleVariant::Untagged => {
+            quote_block! {
+                let #let_mut __serde_state = try!(_serde::Serializer::serialize_tuple(
+                    __serializer,
+                    #len));
+                #(#serialize_stmts)*
+                _serde::ser::SerializeTuple::end(__serde_state)
+            }
+        }
+    }
+}
+
+enum StructVariant<'a> {
+    ExternallyTagged {
+        variant_index: u32,
+        variant_name: String,
+    },
+    InternallyTagged {
+        tag: &'a str,
+        variant_name: String,
+    },
+    Untagged,
+}
+
+fn serialize_struct_variant(
+    context: StructVariant,
+    params: &Parameters,
+    fields: &[Field],
+    name: &str,
+) -> Fragment {
+    if fields.iter().any(|field| field.attrs.flatten()) {
+        return serialize_struct_variant_with_flatten(context, params, fields, name);
+    }
+
+    let struct_trait = match context {
+        StructVariant::ExternallyTagged { .. } => StructTrait::SerializeStructVariant,
+        StructVariant::InternallyTagged { .. } | StructVariant::Untagged => {
+            StructTrait::SerializeStruct
+        }
+    };
+
+    let serialize_fields = serialize_struct_visitor(fields, params, true, &struct_trait);
+
+    let mut serialized_fields = fields
+        .iter()
+        .filter(|&field| !field.attrs.skip_serializing())
+        .peekable();
+
+    let let_mut = mut_if(serialized_fields.peek().is_some());
+
+    let len = serialized_fields
+        .map(|field| {
+            let member = &field.member;
+
+            match field.attrs.skip_serializing_if() {
+                Some(path) => quote!(if #path(#member) { 0 } else { 1 }),
+                None => quote!(1),
+            }
+        })
+        .fold(quote!(0), |sum, expr| quote!(#sum + #expr));
+
+    match context {
+        StructVariant::ExternallyTagged {
+            variant_index,
+            variant_name,
+        } => {
+            quote_block! {
+                let #let_mut __serde_state = try!(_serde::Serializer::serialize_struct_variant(
+                    __serializer,
+                    #name,
+                    #variant_index,
+                    #variant_name,
+                    #len,
+                ));
+                #(#serialize_fields)*
+                _serde::ser::SerializeStructVariant::end(__serde_state)
+            }
+        }
+        StructVariant::InternallyTagged { tag, variant_name } => {
+            quote_block! {
+                let mut __serde_state = try!(_serde::Serializer::serialize_struct(
+                    __serializer,
+                    #name,
+                    #len + 1,
+                ));
+                try!(_serde::ser::SerializeStruct::serialize_field(
+                    &mut __serde_state,
+                    #tag,
+                    #variant_name,
+                ));
+                #(#serialize_fields)*
+                _serde::ser::SerializeStruct::end(__serde_state)
+            }
+        }
+        StructVariant::Untagged => {
+            quote_block! {
+                let #let_mut __serde_state = try!(_serde::Serializer::serialize_struct(
+                    __serializer,
+                    #name,
+                    #len,
+                ));
+                #(#serialize_fields)*
+                _serde::ser::SerializeStruct::end(__serde_state)
+            }
+        }
+    }
+}
+
+fn serialize_struct_variant_with_flatten(
+    context: StructVariant,
+    params: &Parameters,
+    fields: &[Field],
+    name: &str,
+) -> Fragment {
+    let struct_trait = StructTrait::SerializeMap;
+    let serialize_fields = serialize_struct_visitor(fields, params, true, &struct_trait);
+
+    let mut serialized_fields = fields
+        .iter()
+        .filter(|&field| !field.attrs.skip_serializing())
+        .peekable();
+
+    let let_mut = mut_if(serialized_fields.peek().is_some());
+
+    match context {
+        StructVariant::ExternallyTagged {
+            variant_index,
+            variant_name,
+        } => {
+            let this_type = &params.this_type;
+            let fields_ty = fields.iter().map(|f| &f.ty);
+            let members = &fields.iter().map(|f| &f.member).collect::<Vec<_>>();
+
+            let (_, ty_generics, where_clause) = params.generics.split_for_impl();
+            let wrapper_generics = bound::with_lifetime_bound(&params.generics, "'__a");
+            let (wrapper_impl_generics, wrapper_ty_generics, _) = wrapper_generics.split_for_impl();
+
+            quote_block! {
+                struct __EnumFlatten #wrapper_generics #where_clause {
+                    data: (#(&'__a #fields_ty,)*),
+                    phantom: _serde::__private::PhantomData<#this_type #ty_generics>,
+                }
+
+                impl #wrapper_impl_generics _serde::Serialize for __EnumFlatten #wrapper_ty_generics #where_clause {
+                    fn serialize<__S>(&self, __serializer: __S) -> _serde::__private::Result<__S::Ok, __S::Error>
+                    where
+                        __S: _serde::Serializer,
+                    {
+                        let (#(#members,)*) = self.data;
+                        let #let_mut __serde_state = try!(_serde::Serializer::serialize_map(
+                            __serializer,
+                            _serde::__private::None));
+                        #(#serialize_fields)*
+                        _serde::ser::SerializeMap::end(__serde_state)
+                    }
+                }
+
+                _serde::Serializer::serialize_newtype_variant(
+                    __serializer,
+                    #name,
+                    #variant_index,
+                    #variant_name,
+                    &__EnumFlatten {
+                        data: (#(#members,)*),
+                        phantom: _serde::__private::PhantomData::<#this_type #ty_generics>,
+                    })
+            }
+        }
+        StructVariant::InternallyTagged { tag, variant_name } => {
+            quote_block! {
+                let #let_mut __serde_state = try!(_serde::Serializer::serialize_map(
+                    __serializer,
+                    _serde::__private::None));
+                try!(_serde::ser::SerializeMap::serialize_entry(
+                    &mut __serde_state,
+                    #tag,
+                    #variant_name,
+                ));
+                #(#serialize_fields)*
+                _serde::ser::SerializeMap::end(__serde_state)
+            }
+        }
+        StructVariant::Untagged => {
+            quote_block! {
+                let #let_mut __serde_state = try!(_serde::Serializer::serialize_map(
+                    __serializer,
+                    _serde::__private::None));
+                #(#serialize_fields)*
+                _serde::ser::SerializeMap::end(__serde_state)
+            }
+        }
+    }
+}
+
+fn serialize_tuple_struct_visitor(
+    fields: &[Field],
+    params: &Parameters,
+    is_enum: bool,
+    tuple_trait: &TupleTrait,
+) -> Vec<TokenStream> {
+    fields
+        .iter()
+        .enumerate()
+        .filter(|(_, field)| !field.attrs.skip_serializing())
+        .map(|(i, field)| {
+            let mut field_expr = if is_enum {
+                let id = Ident::new(&format!("__field{}", i), Span::call_site());
+                quote!(#id)
+            } else {
+                get_member(
+                    params,
+                    field,
+                    &Member::Unnamed(Index {
+                        index: i as u32,
+                        span: Span::call_site(),
+                    }),
+                )
+            };
+
+            let skip = field
+                .attrs
+                .skip_serializing_if()
+                .map(|path| quote!(#path(#field_expr)));
+
+            if let Some(path) = field.attrs.serialize_with() {
+                field_expr = wrap_serialize_field_with(params, field.ty, path, &field_expr);
+            }
+
+            let span = field.original.span();
+            let func = tuple_trait.serialize_element(span);
+            let ser = quote! {
+                try!(#func(&mut __serde_state, #field_expr));
+            };
+
+            match skip {
+                None => ser,
+                Some(skip) => quote!(if !#skip { #ser }),
+            }
+        })
+        .collect()
+}
+
+fn serialize_struct_visitor(
+    fields: &[Field],
+    params: &Parameters,
+    is_enum: bool,
+    struct_trait: &StructTrait,
+) -> Vec<TokenStream> {
+    fields
+        .iter()
+        .filter(|&field| !field.attrs.skip_serializing())
+        .map(|field| {
+            let member = &field.member;
+
+            let mut field_expr = if is_enum {
+                quote!(#member)
+            } else {
+                get_member(params, field, member)
+            };
+
+            let key_expr = field.attrs.name().serialize_name();
+
+            let skip = field
+                .attrs
+                .skip_serializing_if()
+                .map(|path| quote!(#path(#field_expr)));
+
+            if let Some(path) = field.attrs.serialize_with() {
+                field_expr = wrap_serialize_field_with(params, field.ty, path, &field_expr);
+            }
+
+            let span = field.original.span();
+            let ser = if field.attrs.flatten() {
+                let func = quote_spanned!(span=> _serde::Serialize::serialize);
+                quote! {
+                    try!(#func(&#field_expr, _serde::__private::ser::FlatMapSerializer(&mut __serde_state)));
+                }
+            } else {
+                let func = struct_trait.serialize_field(span);
+                quote! {
+                    try!(#func(&mut __serde_state, #key_expr, #field_expr));
+                }
+            };
+
+            match skip {
+                None => ser,
+                Some(skip) => {
+                    if let Some(skip_func) = struct_trait.skip_field(span) {
+                        quote! {
+                            if !#skip {
+                                #ser
+                            } else {
+                                try!(#skip_func(&mut __serde_state, #key_expr));
+                            }
+                        }
+                    } else {
+                        quote! {
+                            if !#skip {
+                                #ser
+                            }
+                        }
+                    }
+                }
+            }
+        })
+        .collect()
+}
+
+fn wrap_serialize_field_with(
+    params: &Parameters,
+    field_ty: &syn::Type,
+    serialize_with: &syn::ExprPath,
+    field_expr: &TokenStream,
+) -> TokenStream {
+    wrap_serialize_with(params, serialize_with, &[field_ty], &[quote!(#field_expr)])
+}
+
+fn wrap_serialize_variant_with(
+    params: &Parameters,
+    serialize_with: &syn::ExprPath,
+    variant: &Variant,
+) -> TokenStream {
+    let field_tys: Vec<_> = variant.fields.iter().map(|field| field.ty).collect();
+    let field_exprs: Vec<_> = variant
+        .fields
+        .iter()
+        .map(|field| {
+            let id = match &field.member {
+                Member::Named(ident) => ident.clone(),
+                Member::Unnamed(member) => {
+                    Ident::new(&format!("__field{}", member.index), Span::call_site())
+                }
+            };
+            quote!(#id)
+        })
+        .collect();
+    wrap_serialize_with(
+        params,
+        serialize_with,
+        field_tys.as_slice(),
+        field_exprs.as_slice(),
+    )
+}
+
+fn wrap_serialize_with(
+    params: &Parameters,
+    serialize_with: &syn::ExprPath,
+    field_tys: &[&syn::Type],
+    field_exprs: &[TokenStream],
+) -> TokenStream {
+    let this_type = &params.this_type;
+    let (_, ty_generics, where_clause) = params.generics.split_for_impl();
+
+    let wrapper_generics = if field_exprs.is_empty() {
+        params.generics.clone()
+    } else {
+        bound::with_lifetime_bound(&params.generics, "'__a")
+    };
+    let (wrapper_impl_generics, wrapper_ty_generics, _) = wrapper_generics.split_for_impl();
+
+    let field_access = (0..field_exprs.len()).map(|n| {
+        Member::Unnamed(Index {
+            index: n as u32,
+            span: Span::call_site(),
+        })
+    });
+
+    quote!({
+        struct __SerializeWith #wrapper_impl_generics #where_clause {
+            values: (#(&'__a #field_tys, )*),
+            phantom: _serde::__private::PhantomData<#this_type #ty_generics>,
+        }
+
+        impl #wrapper_impl_generics _serde::Serialize for __SerializeWith #wrapper_ty_generics #where_clause {
+            fn serialize<__S>(&self, __s: __S) -> _serde::__private::Result<__S::Ok, __S::Error>
+            where
+                __S: _serde::Serializer,
+            {
+                #serialize_with(#(self.values.#field_access, )* __s)
+            }
+        }
+
+        &__SerializeWith {
+            values: (#(#field_exprs, )*),
+            phantom: _serde::__private::PhantomData::<#this_type #ty_generics>,
+        }
+    })
+}
+
+// Serialization of an empty struct results in code like:
+//
+//     let mut __serde_state = try!(serializer.serialize_struct("S", 0));
+//     _serde::ser::SerializeStruct::end(__serde_state)
+//
+// where we want to omit the `mut` to avoid a warning.
+fn mut_if(is_mut: bool) -> Option<TokenStream> {
+    if is_mut {
+        Some(quote!(mut))
+    } else {
+        None
+    }
+}
+
+fn get_member(params: &Parameters, field: &Field, member: &Member) -> TokenStream {
+    let self_var = &params.self_var;
+    match (params.is_remote, field.attrs.getter()) {
+        (false, None) => {
+            if params.is_packed {
+                quote!(&{#self_var.#member})
+            } else {
+                quote!(&#self_var.#member)
+            }
+        }
+        (true, None) => {
+            let inner = if params.is_packed {
+                quote!(&{#self_var.#member})
+            } else {
+                quote!(&#self_var.#member)
+            };
+            let ty = field.ty;
+            quote!(_serde::__private::ser::constrain::<#ty>(#inner))
+        }
+        (true, Some(getter)) => {
+            let ty = field.ty;
+            quote!(_serde::__private::ser::constrain::<#ty>(&#getter(#self_var)))
+        }
+        (false, Some(_)) => {
+            unreachable!("getter is only allowed for remote impls");
+        }
+    }
+}
+
+fn effective_style(variant: &Variant) -> Style {
+    match variant.style {
+        Style::Newtype if variant.fields[0].attrs.skip_serializing() => Style::Unit,
+        other => other,
+    }
+}
+
+enum StructTrait {
+    SerializeMap,
+    SerializeStruct,
+    SerializeStructVariant,
+}
+
+impl StructTrait {
+    fn serialize_field(&self, span: Span) -> TokenStream {
+        match *self {
+            StructTrait::SerializeMap => {
+                quote_spanned!(span=> _serde::ser::SerializeMap::serialize_entry)
+            }
+            StructTrait::SerializeStruct => {
+                quote_spanned!(span=> _serde::ser::SerializeStruct::serialize_field)
+            }
+            StructTrait::SerializeStructVariant => {
+                quote_spanned!(span=> _serde::ser::SerializeStructVariant::serialize_field)
+            }
+        }
+    }
+
+    fn skip_field(&self, span: Span) -> Option<TokenStream> {
+        match *self {
+            StructTrait::SerializeMap => None,
+            StructTrait::SerializeStruct => {
+                Some(quote_spanned!(span=> _serde::ser::SerializeStruct::skip_field))
+            }
+            StructTrait::SerializeStructVariant => {
+                Some(quote_spanned!(span=> _serde::ser::SerializeStructVariant::skip_field))
+            }
+        }
+    }
+}
+
+enum TupleTrait {
+    SerializeTuple,
+    SerializeTupleStruct,
+    SerializeTupleVariant,
+}
+
+impl TupleTrait {
+    fn serialize_element(&self, span: Span) -> TokenStream {
+        match *self {
+            TupleTrait::SerializeTuple => {
+                quote_spanned!(span=> _serde::ser::SerializeTuple::serialize_element)
+            }
+            TupleTrait::SerializeTupleStruct => {
+                quote_spanned!(span=> _serde::ser::SerializeTupleStruct::serialize_field)
+            }
+            TupleTrait::SerializeTupleVariant => {
+                quote_spanned!(span=> _serde::ser::SerializeTupleVariant::serialize_field)
+            }
+        }
+    }
+}
diff --git a/crates/serde_derive/src/this.rs b/crates/serde_derive/src/this.rs
new file mode 100644
index 0000000..32731d0
--- /dev/null
+++ b/crates/serde_derive/src/this.rs
@@ -0,0 +1,32 @@
+use internals::ast::Container;
+use syn::{Path, PathArguments, Token};
+
+pub fn this_type(cont: &Container) -> Path {
+    if let Some(remote) = cont.attrs.remote() {
+        let mut this = remote.clone();
+        for segment in &mut this.segments {
+            if let PathArguments::AngleBracketed(arguments) = &mut segment.arguments {
+                arguments.colon2_token = None;
+            }
+        }
+        this
+    } else {
+        Path::from(cont.ident.clone())
+    }
+}
+
+pub fn this_value(cont: &Container) -> Path {
+    if let Some(remote) = cont.attrs.remote() {
+        let mut this = remote.clone();
+        for segment in &mut this.segments {
+            if let PathArguments::AngleBracketed(arguments) = &mut segment.arguments {
+                if arguments.colon2_token.is_none() {
+                    arguments.colon2_token = Some(Token![::](arguments.lt_token.span));
+                }
+            }
+        }
+        this
+    } else {
+        Path::from(cont.ident.clone())
+    }
+}
diff --git a/crates/serde_derive/src/try.rs b/crates/serde_derive/src/try.rs
new file mode 100644
index 0000000..48cceeb
--- /dev/null
+++ b/crates/serde_derive/src/try.rs
@@ -0,0 +1,24 @@
+use proc_macro2::{Punct, Spacing, TokenStream};
+
+// None of our generated code requires the `From::from` error conversion
+// performed by the standard library's `try!` macro. With this simplified macro
+// we see a significant improvement in type checking and borrow checking time of
+// the generated code and a slight improvement in binary size.
+pub fn replacement() -> TokenStream {
+    // Cannot pass `$expr` to `quote!` prior to Rust 1.17.0 so interpolate it.
+    let dollar = Punct::new('$', Spacing::Alone);
+
+    quote! {
+        #[allow(unused_macros)]
+        macro_rules! try {
+            (#dollar __expr:expr) => {
+                match #dollar __expr {
+                    _serde::__private::Ok(__val) => __val,
+                    _serde::__private::Err(__err) => {
+                        return _serde::__private::Err(__err);
+                    }
+                }
+            }
+        }
+    }
+}
diff --git a/crates/shared_library/.cargo-checksum.json b/crates/shared_library/.cargo-checksum.json
new file mode 100644
index 0000000..0b16e83
--- /dev/null
+++ b/crates/shared_library/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.toml":"f9ad715b6b6424f37f903a069039d3567d46e426d98d11d4e62a9a3933691e5b","LICENSE-APACHE":"c144680885b29e4719e2a51f0aab5439a1e02d980692b5aaf086cae12727f28b","LICENSE-MIT":"1c07d19ccbe2578665ab7d8c63f71559f890eb8d2a82fa39d0206b7a3414064f","src/dynamic_library.rs":"973df715d4ae2daae662392d73ca853b9bacdb4165bab3e4d8343427dca55c9c","src/lib.rs":"29f1aef9437d1ab891d17d6a6b86c6e1176813d372333cfdfc063b97586deb02"},"package":"5a9e7e0f2bfae24d8a5b5a66c5b257a83c7412304311512a0c054cd5e619da11"}
\ No newline at end of file
diff --git a/crates/shared_library/Android.bp b/crates/shared_library/Android.bp
new file mode 100644
index 0000000..fcc0ee0
--- /dev/null
+++ b/crates/shared_library/Android.bp
@@ -0,0 +1,53 @@
+// This file is generated by cargo_embargo.
+// Do not modify this file because the changes will be overridden on upgrade.
+
+package {
+    default_applicable_licenses: ["external_rust_crates_shared_library_license"],
+    default_team: "trendy_team_android_rust",
+}
+
+license {
+    name: "external_rust_crates_shared_library_license",
+    visibility: [":__subpackages__"],
+    license_kinds: ["SPDX-license-identifier-Apache-2.0"],
+    license_text: ["LICENSE"],
+}
+
+rust_library {
+    name: "libshared_library",
+    host_supported: true,
+    crate_name: "shared_library",
+    cargo_env_compat: true,
+    cargo_pkg_version: "0.1.9",
+    crate_root: "src/lib.rs",
+    edition: "2015",
+    rustlibs: [
+        "liblazy_static",
+        "liblibc",
+    ],
+    apex_available: [
+        "//apex_available:platform",
+        "com.android.virt",
+    ],
+    product_available: true,
+    vendor_available: true,
+}
+
+rust_test {
+    name: "shared_library_test_src_lib",
+    host_supported: true,
+    crate_name: "shared_library",
+    cargo_env_compat: true,
+    cargo_pkg_version: "0.1.9",
+    crate_root: "src/lib.rs",
+    test_suites: ["general-tests"],
+    auto_gen_config: true,
+    test_options: {
+        unit_test: false,
+    },
+    edition: "2015",
+    rustlibs: [
+        "liblazy_static",
+        "liblibc",
+    ],
+}
diff --git a/crates/shared_library/Cargo.lock b/crates/shared_library/Cargo.lock
new file mode 100644
index 0000000..abb0045
--- /dev/null
+++ b/crates/shared_library/Cargo.lock
@@ -0,0 +1,23 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "lazy_static"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe"
+
+[[package]]
+name = "libc"
+version = "0.2.158"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439"
+
+[[package]]
+name = "shared_library"
+version = "0.1.9"
+dependencies = [
+ "lazy_static",
+ "libc",
+]
diff --git a/crates/shared_library/Cargo.toml b/crates/shared_library/Cargo.toml
new file mode 100644
index 0000000..1d7d58b
--- /dev/null
+++ b/crates/shared_library/Cargo.toml
@@ -0,0 +1,24 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g. crates.io) dependencies
+#
+# If you believe there's an error in this file please file an
+# issue against the rust-lang/cargo repository. If you're
+# editing this file be aware that the upstream Cargo.toml
+# will likely look very different (and much more reasonable)
+
+[package]
+name = "shared_library"
+version = "0.1.9"
+authors = ["Pierre Krieger <pierre.krieger1708@gmail.com>"]
+description = "Easily bind to and load shared libraries"
+license = "Apache-2.0/MIT"
+repository = "https://github.com/tomaka/shared_library/"
+[dependencies.lazy_static]
+version = "1"
+
+[dependencies.libc]
+version = "0.2"
diff --git a/crates/shared_library/LICENSE b/crates/shared_library/LICENSE
new file mode 120000
index 0000000..6b579aa
--- /dev/null
+++ b/crates/shared_library/LICENSE
@@ -0,0 +1 @@
+LICENSE-APACHE
\ No newline at end of file
diff --git a/crates/shared_library/LICENSE-APACHE b/crates/shared_library/LICENSE-APACHE
new file mode 100644
index 0000000..1b22bef
--- /dev/null
+++ b/crates/shared_library/LICENSE-APACHE
@@ -0,0 +1,201 @@
+                              Apache License
+                        Version 2.0, January 2004
+                     http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+   "License" shall mean the terms and conditions for use, reproduction,
+   and distribution as defined by Sections 1 through 9 of this document.
+
+   "Licensor" shall mean the copyright owner or entity authorized by
+   the copyright owner that is granting the License.
+
+   "Legal Entity" shall mean the union of the acting entity and all
+   other entities that control, are controlled by, or are under common
+   control with that entity. For the purposes of this definition,
+   "control" means (i) the power, direct or indirect, to cause the
+   direction or management of such entity, whether by contract or
+   otherwise, or (ii) ownership of fifty percent (50%) or more of the
+   outstanding shares, or (iii) beneficial ownership of such entity.
+
+   "You" (or "Your") shall mean an individual or Legal Entity
+   exercising permissions granted by this License.
+
+   "Source" form shall mean the preferred form for making modifications,
+   including but not limited to software source code, documentation
+   source, and configuration files.
+
+   "Object" form shall mean any form resulting from mechanical
+   transformation or translation of a Source form, including but
+   not limited to compiled object code, generated documentation,
+   and conversions to other media types.
+
+   "Work" shall mean the work of authorship, whether in Source or
+   Object form, made available under the License, as indicated by a
+   copyright notice that is included in or attached to the work
+   (an example is provided in the Appendix below).
+
+   "Derivative Works" shall mean any work, whether in Source or Object
+   form, that is based on (or derived from) the Work and for which the
+   editorial revisions, annotations, elaborations, or other modifications
+   represent, as a whole, an original work of authorship. For the purposes
+   of this License, Derivative Works shall not include works that remain
+   separable from, or merely link (or bind by name) to the interfaces of,
+   the Work and Derivative Works thereof.
+
+   "Contribution" shall mean any work of authorship, including
+   the original version of the Work and any modifications or additions
+   to that Work or Derivative Works thereof, that is intentionally
+   submitted to Licensor for inclusion in the Work by the copyright owner
+   or by an individual or Legal Entity authorized to submit on behalf of
+   the copyright owner. For the purposes of this definition, "submitted"
+   means any form of electronic, verbal, or written communication sent
+   to the Licensor or its representatives, including but not limited to
+   communication on electronic mailing lists, source code control systems,
+   and issue tracking systems that are managed by, or on behalf of, the
+   Licensor for the purpose of discussing and improving the Work, but
+   excluding communication that is conspicuously marked or otherwise
+   designated in writing by the copyright owner as "Not a Contribution."
+
+   "Contributor" shall mean Licensor and any individual or Legal Entity
+   on behalf of whom a Contribution has been received by Licensor and
+   subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+   this License, each Contributor hereby grants to You a perpetual,
+   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+   copyright license to reproduce, prepare Derivative Works of,
+   publicly display, publicly perform, sublicense, and distribute the
+   Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+   this License, each Contributor hereby grants to You a perpetual,
+   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+   (except as stated in this section) patent license to make, have made,
+   use, offer to sell, sell, import, and otherwise transfer the Work,
+   where such license applies only to those patent claims licensable
+   by such Contributor that are necessarily infringed by their
+   Contribution(s) alone or by combination of their Contribution(s)
+   with the Work to which such Contribution(s) was submitted. If You
+   institute patent litigation against any entity (including a
+   cross-claim or counterclaim in a lawsuit) alleging that the Work
+   or a Contribution incorporated within the Work constitutes direct
+   or contributory patent infringement, then any patent licenses
+   granted to You under this License for that Work shall terminate
+   as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+   Work or Derivative Works thereof in any medium, with or without
+   modifications, and in Source or Object form, provided that You
+   meet the following conditions:
+
+   (a) You must give any other recipients of the Work or
+       Derivative Works a copy of this License; and
+
+   (b) You must cause any modified files to carry prominent notices
+       stating that You changed the files; and
+
+   (c) You must retain, in the Source form of any Derivative Works
+       that You distribute, all copyright, patent, trademark, and
+       attribution notices from the Source form of the Work,
+       excluding those notices that do not pertain to any part of
+       the Derivative Works; and
+
+   (d) If the Work includes a "NOTICE" text file as part of its
+       distribution, then any Derivative Works that You distribute must
+       include a readable copy of the attribution notices contained
+       within such NOTICE file, excluding those notices that do not
+       pertain to any part of the Derivative Works, in at least one
+       of the following places: within a NOTICE text file distributed
+       as part of the Derivative Works; within the Source form or
+       documentation, if provided along with the Derivative Works; or,
+       within a display generated by the Derivative Works, if and
+       wherever such third-party notices normally appear. The contents
+       of the NOTICE file are for informational purposes only and
+       do not modify the License. You may add Your own attribution
+       notices within Derivative Works that You distribute, alongside
+       or as an addendum to the NOTICE text from the Work, provided
+       that such additional attribution notices cannot be construed
+       as modifying the License.
+
+   You may add Your own copyright statement to Your modifications and
+   may provide additional or different license terms and conditions
+   for use, reproduction, or distribution of Your modifications, or
+   for any such Derivative Works as a whole, provided Your use,
+   reproduction, and distribution of the Work otherwise complies with
+   the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+   any Contribution intentionally submitted for inclusion in the Work
+   by You to the Licensor shall be under the terms and conditions of
+   this License, without any additional terms or conditions.
+   Notwithstanding the above, nothing herein shall supersede or modify
+   the terms of any separate license agreement you may have executed
+   with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+   names, trademarks, service marks, or product names of the Licensor,
+   except as required for reasonable and customary use in describing the
+   origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+   agreed to in writing, Licensor provides the Work (and each
+   Contributor provides its Contributions) on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+   implied, including, without limitation, any warranties or conditions
+   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+   PARTICULAR PURPOSE. You are solely responsible for determining the
+   appropriateness of using or redistributing the Work and assume any
+   risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+   whether in tort (including negligence), contract, or otherwise,
+   unless required by applicable law (such as deliberate and grossly
+   negligent acts) or agreed to in writing, shall any Contributor be
+   liable to You for damages, including any direct, indirect, special,
+   incidental, or consequential damages of any character arising as a
+   result of this License or out of the use or inability to use the
+   Work (including but not limited to damages for loss of goodwill,
+   work stoppage, computer failure or malfunction, or any and all
+   other commercial damages or losses), even if such Contributor
+   has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+   the Work or Derivative Works thereof, You may choose to offer,
+   and charge a fee for, acceptance of support, warranty, indemnity,
+   or other liability obligations and/or rights consistent with this
+   License. However, in accepting such obligations, You may act only
+   on Your own behalf and on Your sole responsibility, not on behalf
+   of any other Contributor, and only if You agree to indemnify,
+   defend, and hold each Contributor harmless for any liability
+   incurred by, or claims asserted against, such Contributor by reason
+   of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+   To apply the Apache License to your work, attach the following
+   boilerplate notice, with the fields enclosed by brackets "[]"
+   replaced with your own identifying information. (Don't include
+   the brackets!)  The text should be enclosed in the appropriate
+   comment syntax for the file format. We also recommend that a
+   file or class name and description of purpose be included on the
+   same "printed page" as the copyright notice for easier
+   identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
\ No newline at end of file
diff --git a/crates/shared_library/LICENSE-MIT b/crates/shared_library/LICENSE-MIT
new file mode 100644
index 0000000..4f2b149
--- /dev/null
+++ b/crates/shared_library/LICENSE-MIT
@@ -0,0 +1,25 @@
+Copyright (c) 2017 Pierre Krieger
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
\ No newline at end of file
diff --git a/crates/shared_library/METADATA b/crates/shared_library/METADATA
new file mode 100644
index 0000000..0b6518d
--- /dev/null
+++ b/crates/shared_library/METADATA
@@ -0,0 +1,20 @@
+name: "shared_library"
+description: "Easily bind to and load shared libraries"
+third_party {
+  url {
+    type: HOMEPAGE
+    value: "https://crates.io/crates/shared_library"
+  }
+  url {
+    type: ARCHIVE
+    value: "https://static.crates.io/crates/shared_library/shared_library-0.1.9.crate"
+  }
+  version: "0.1.9"
+  # Dual-licensed, using the least restrictive per go/thirdpartylicenses#same.
+  license_type: NOTICE
+  last_upgrade_date {
+    year: 2021
+    month: 8
+    day: 30
+  }
+}
diff --git a/crates/shared_library/MODULE_LICENSE_APACHE2 b/crates/shared_library/MODULE_LICENSE_APACHE2
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/crates/shared_library/MODULE_LICENSE_APACHE2
diff --git a/crates/shared_library/TEST_MAPPING b/crates/shared_library/TEST_MAPPING
new file mode 100644
index 0000000..f439cf8
--- /dev/null
+++ b/crates/shared_library/TEST_MAPPING
@@ -0,0 +1,8 @@
+// Generated by update_crate_tests.py for tests that depend on this crate.
+{
+  "imports": [
+    {
+      "path": "external/rust/crates/vulkano"
+    }
+  ]
+}
diff --git a/crates/shared_library/cargo_embargo.json b/crates/shared_library/cargo_embargo.json
new file mode 100644
index 0000000..98cba6d
--- /dev/null
+++ b/crates/shared_library/cargo_embargo.json
@@ -0,0 +1,13 @@
+{
+  "apex_available": [
+    "//apex_available:platform",
+    "com.android.virt"
+  ],
+  "package": {
+    "shared_library": {
+      "no_presubmit": true
+    }
+  },
+  "run_cargo": false,
+  "tests": true
+}
diff --git a/crates/shared_library/src/dynamic_library.rs b/crates/shared_library/src/dynamic_library.rs
new file mode 100644
index 0000000..753b632
--- /dev/null
+++ b/crates/shared_library/src/dynamic_library.rs
@@ -0,0 +1,410 @@
+// Copyright 2013-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Dynamic library facilities.
+//!
+//! A simple wrapper over the platform's dynamic library facilities
+
+#![allow(missing_docs)]
+
+use std::env;
+use std::ffi::{CString, OsString};
+use std::mem;
+use std::path::{Path, PathBuf};
+use libc;
+
+pub struct DynamicLibrary {
+    handle: *mut u8
+}
+
+unsafe impl Send for DynamicLibrary {}
+unsafe impl Sync for DynamicLibrary {}
+
+impl Drop for DynamicLibrary {
+    fn drop(&mut self) {
+        if let Err(str) = dl::check_for_errors_in(|| unsafe {
+            dl::close(self.handle)
+        }) {
+            panic!("{}", str)
+        }
+    }
+}
+
+/// Special handles to be used with the `symbol_special` function. These are 
+/// provided by a GNU only extension and are not included as part of the POSIX 
+/// standard. 
+///
+/// See https://linux.die.net/man/3/dlsym for their behaviour.
+#[cfg(target_os = "linux")]
+pub enum SpecialHandles {
+    Next,
+    Default,
+}
+
+impl DynamicLibrary {
+    // FIXME (#12938): Until DST lands, we cannot decompose &str into
+    // & and str, so we cannot usefully take ToCStr arguments by
+    // reference (without forcing an additional & around &str). So we
+    // are instead temporarily adding an instance for &Path, so that
+    // we can take ToCStr as owned. When DST lands, the &Path instance
+    // should be removed, and arguments bound by ToCStr should be
+    // passed by reference. (Here: in the `open` method.)
+
+    /// Lazily loads the dynamic library named `filename` into memory and 
+    /// then returns an opaque "handle" for that dynamic library.
+    ///
+    /// Returns a handle to the calling process when passed `None`.
+    pub fn open(filename: Option<&Path>) -> Result<Self, String> {
+        // The dynamic library must not be constructed if there is
+        // an error opening the library so the destructor does not
+        // run.
+        dl::open(filename.map(|path| path.as_os_str()))
+            .map(|handle| DynamicLibrary { handle })
+    }
+
+    /// Prepends a path to this process's search path for dynamic libraries
+    pub fn prepend_search_path(path: &Path) {
+        let mut search_path = Self::search_path();
+        search_path.insert(0, path.to_path_buf());
+        env::set_var(Self::envvar(), &Self::create_path(&search_path));
+    }
+
+    /// From a slice of paths, create a new vector which is suitable to be an
+    /// environment variable for this platforms dylib search path.
+    pub fn create_path(path: &[PathBuf]) -> OsString {
+        let mut newvar = OsString::new();
+        for (i, path) in path.iter().enumerate() {
+            if i > 0 { newvar.push(Self::separator()); }
+            newvar.push(path);
+        }
+        newvar
+    }
+
+    /// Returns the environment variable for this process's dynamic library
+    /// search path
+    pub fn envvar() -> &'static str {
+        if cfg!(windows) {
+            "PATH"
+        } else if cfg!(target_os = "macos") {
+            "DYLD_LIBRARY_PATH"
+        } else {
+            "LD_LIBRARY_PATH"
+        }
+    }
+
+    //TODO: turn this and `envvar` into associated constants
+    fn separator() -> &'static str {
+        if cfg!(windows) { ";" } else { ":" }
+    }
+
+    /// Returns the current search path for dynamic libraries being used by this
+    /// process
+    pub fn search_path() -> Vec<PathBuf> {
+        match env::var_os(Self::envvar()) {
+            Some(var) => env::split_paths(&var).collect(),
+            None => Vec::new(),
+        }
+    }
+
+    /// Returns the address of where symbol `symbol` was loaded into memory.
+    ///
+    /// In POSIX compliant systems, we return 'Err' if the symbol was not found, 
+    /// in this library or any of the libraries that were automatically loaded 
+    /// when this library was loaded.
+    pub unsafe fn symbol<T>(&self, symbol: &str) -> Result<*mut T, String> {
+        // This function should have a lifetime constraint of 'a on
+        // T but that feature is still unimplemented
+
+        let raw_string = CString::new(symbol).unwrap();
+        // The value must not be constructed if there is an error so
+        // the destructor does not run.
+        dl::check_for_errors_in(|| {
+                dl::symbol(self.handle as *mut libc::c_void, raw_string.as_ptr() as *const _)
+            })
+            .map(|sym| mem::transmute(sym))
+    }
+
+    /// Returns the address of the first occurance of symbol `symbol` using the 
+    /// default library search order if you use `SpecialHandles::Default`.
+    ///
+    /// Returns the address of the next occurance of symbol `symbol` after the 
+    /// current library in the default library search order if you use 
+    /// `SpecialHandles::Next`.
+    #[cfg(target_os = "linux")]
+    pub unsafe fn symbol_special<T>(handle: SpecialHandles, symbol: &str) -> Result<*mut T, String> {
+        // This function should have a lifetime constraint of 'a on
+        // T but that feature is still unimplemented
+
+        let handle = match handle {
+            SpecialHandles::Next => mem::transmute::<libc::c_long, _>(-1),
+            SpecialHandles::Default => ::std::ptr::null_mut(),
+        };
+
+        let raw_string = CString::new(symbol).unwrap();
+        // The value must not be constructed if there is an error so
+        // the destructor does not run.
+        dl::check_for_errors_in(|| {
+                dl::symbol(handle, raw_string.as_ptr() as *const _)
+            })
+            .map(|sym| mem::transmute(sym))
+    }
+}
+
+#[cfg(all(test, not(target_os = "ios")))]
+mod test {
+    use super::*;
+    use std::mem;
+    use std::path::Path;
+
+    #[test]
+    #[cfg_attr(any(windows, target_os = "android"), ignore)] // FIXME #8818, #10379
+    fn test_loading_cosine() {
+        // The math library does not need to be loaded since it is already
+        // statically linked in
+        let libm = match DynamicLibrary::open(None) {
+            Err(error) => panic!("Could not load self as module: {}", error),
+            Ok(libm) => libm
+        };
+
+        let cosine: extern fn(libc::c_double) -> libc::c_double = unsafe {
+            match libm.symbol("cos") {
+                Err(error) => panic!("Could not load function cos: {}", error),
+                Ok(cosine) => mem::transmute::<*mut u8, _>(cosine)
+            }
+        };
+
+        let argument = 0.0;
+        let expected_result = 1.0;
+        let result = cosine(argument);
+        if result != expected_result {
+            panic!("cos({}) != {} but equaled {} instead", argument,
+                   expected_result, result)
+        }
+    }
+
+    #[test]
+    #[cfg(any(target_os = "linux",
+              target_os = "macos",
+              target_os = "freebsd",
+              target_os = "fuchsia",
+              target_os = "netbsd",
+              target_os = "dragonfly",
+              target_os = "bitrig",
+              target_os = "openbsd",
+              target_os = "solaris"))]
+    fn test_errors_do_not_crash() {
+        // Open /dev/null as a library to get an error, and make sure
+        // that only causes an error, and not a crash.
+        let path = Path::new("/dev/null");
+        match DynamicLibrary::open(Some(&path)) {
+            Err(_) => {}
+            Ok(_) => panic!("Successfully opened the empty library.")
+        }
+    }
+}
+
+//TODO: use `unix` shortcut?
+#[cfg(any(target_os = "linux",
+          target_os = "android",
+          target_os = "macos",
+          target_os = "ios",
+          target_os = "fuchsia",
+          target_os = "freebsd",
+          target_os = "netbsd",
+          target_os = "dragonfly",
+          target_os = "bitrig",
+          target_os = "openbsd",
+          target_os = "solaris",
+          target_os = "emscripten"))]
+mod dl {
+    use std::ffi::{CString, CStr, OsStr};
+    use std::os::unix::ffi::OsStrExt;
+    use std::str;
+    use libc;
+    use std::ptr;
+    use std::sync::Mutex;
+
+    lazy_static! {
+        static ref LOCK: Mutex<()> = Mutex::new(());
+    }
+
+    pub fn open(filename: Option<&OsStr>) -> Result<*mut u8, String> {
+        check_for_errors_in(|| unsafe {
+            match filename {
+                Some(filename) => open_external(filename),
+                None => open_internal(),
+            }
+        })
+    }
+
+    const LAZY: libc::c_int = 1;
+
+    unsafe fn open_external(filename: &OsStr) -> *mut u8 {
+        let s = CString::new(filename.as_bytes().to_vec()).unwrap();
+        dlopen(s.as_ptr() as *const _, LAZY) as *mut u8
+    }
+
+    unsafe fn open_internal() -> *mut u8 {
+        dlopen(ptr::null(), LAZY) as *mut u8
+    }
+
+    pub fn check_for_errors_in<T, F>(f: F) -> Result<T, String> where
+        F: FnOnce() -> T,
+    {
+        unsafe {
+            // dlerror isn't thread safe, so we need to lock around this entire
+            // sequence
+            let _guard = LOCK.lock();
+            let _old_error = dlerror();
+
+            let result = f();
+
+            let last_error = dlerror() as *const _;
+            let ret = if ptr::null() == last_error {
+                Ok(result)
+            } else {
+                let s = CStr::from_ptr(last_error).to_bytes();
+                Err(str::from_utf8(s).unwrap().to_string())
+            };
+
+            ret
+        }
+    }
+
+    pub unsafe fn symbol(
+        handle: *mut libc::c_void,
+        symbol: *const libc::c_char,
+    ) -> *mut u8 {
+        dlsym(handle, symbol) as *mut u8
+    }
+
+    pub unsafe fn close(handle: *mut u8) {
+        dlclose(handle as *mut libc::c_void); ()
+    }
+
+    extern {
+        fn dlopen(
+            filename: *const libc::c_char,
+            flag: libc::c_int,
+        ) -> *mut libc::c_void;
+        fn dlerror() -> *mut libc::c_char;
+        fn dlsym(
+            handle: *mut libc::c_void,
+            symbol: *const libc::c_char,
+        ) -> *mut libc::c_void;
+        fn dlclose(
+            handle: *mut libc::c_void,
+        ) -> libc::c_int;
+    }
+}
+
+#[cfg(target_os = "windows")]
+mod dl {
+    use std::ffi::OsStr;
+    use std::iter::Iterator;
+    use libc;
+    use std::ops::FnOnce;
+    use std::io::Error as IoError;
+    use std::os::windows::prelude::*;
+    use std::option::Option::{self, Some, None};
+    use std::ptr;
+    use std::result::Result;
+    use std::result::Result::{Ok, Err};
+    use std::string::String;
+    use std::vec::Vec;
+
+    pub fn open(filename: Option<&OsStr>) -> Result<*mut u8, String> {
+        // disable "dll load failed" error dialog.
+        let prev_error_mode = unsafe {
+            // SEM_FAILCRITICALERRORS 0x01
+            let new_error_mode = 1;
+            SetErrorMode(new_error_mode)
+        };
+
+        unsafe {
+            SetLastError(0);
+        }
+
+        let result = match filename {
+            Some(filename) => {
+                let filename_str: Vec<_> =
+                    filename.encode_wide().chain(Some(0).into_iter()).collect();
+                let result = unsafe {
+                    LoadLibraryW(filename_str.as_ptr() as *const libc::c_void)
+                };
+                // beware: Vec/String may change errno during drop!
+                // so we get error here.
+                if result == ptr::null_mut() {
+                    Err(format!("{}", IoError::last_os_error()))
+                } else {
+                    Ok(result as *mut u8)
+                }
+            }
+            None => {
+                let mut handle = ptr::null_mut();
+                let succeeded = unsafe {
+                    GetModuleHandleExW(0, ptr::null(), &mut handle)
+                };
+                if succeeded == 0 {
+                    Err(format!("{}", IoError::last_os_error()))
+                } else {
+                    Ok(handle as *mut u8)
+                }
+            }
+        };
+
+        unsafe {
+            SetErrorMode(prev_error_mode);
+        }
+
+        result
+    }
+
+    pub fn check_for_errors_in<T, F>(f: F) -> Result<T, String> where
+        F: FnOnce() -> T,
+    {
+        unsafe {
+            SetLastError(0);
+
+            let result = f();
+
+            let error = IoError::last_os_error();
+            if 0 == error.raw_os_error().unwrap() {
+                Ok(result)
+            } else {
+                Err(format!("{}", error))
+            }
+        }
+    }
+
+    pub unsafe fn symbol(handle: *mut libc::c_void, symbol: *const libc::c_char) -> *mut u8 {
+        GetProcAddress(handle, symbol) as *mut u8
+    }
+    pub unsafe fn close(handle: *mut u8) {
+        FreeLibrary(handle as *mut libc::c_void); ()
+    }
+
+    #[allow(non_snake_case)]
+    extern "system" {
+        fn SetLastError(error: libc::size_t);
+        fn LoadLibraryW(name: *const libc::c_void) -> *mut libc::c_void;
+        fn GetModuleHandleExW(
+            dwFlags: u32,
+            name: *const u16,
+            handle: *mut *mut libc::c_void,
+        ) -> i32;
+        fn GetProcAddress(
+            handle: *mut libc::c_void,
+            name: *const libc::c_char,
+        ) -> *mut libc::c_void;
+        fn FreeLibrary(handle: *mut libc::c_void);
+        fn SetErrorMode(uMode: libc::c_uint) -> libc::c_uint;
+    }
+}
diff --git a/crates/shared_library/src/lib.rs b/crates/shared_library/src/lib.rs
new file mode 100644
index 0000000..e698a47
--- /dev/null
+++ b/crates/shared_library/src/lib.rs
@@ -0,0 +1,175 @@
+extern crate libc;
+
+#[macro_use]
+extern crate lazy_static;
+
+pub mod dynamic_library;
+
+/// Error that can happen while loading the shared library.
+#[derive(Debug, Clone)]
+pub enum LoadingError {
+    /// 
+    LibraryNotFound {
+        descr: String,
+    },
+
+    /// One of the symbols could not be found in the library.
+    SymbolNotFound {
+        /// The symbol.
+        symbol: &'static str,
+    }
+}
+
+#[macro_export]
+macro_rules! shared_library {
+    ($struct_name:ident, pub $($rest:tt)+) => {
+        shared_library!(__impl $struct_name [] [] [] pub $($rest)+);
+    };
+
+    ($struct_name:ident, fn $($rest:tt)+) => {
+        shared_library!(__impl $struct_name [] [] [] fn $($rest)+);
+    };
+
+    ($struct_name:ident, static $($rest:tt)+) => {
+        shared_library!(__impl $struct_name [] [] [] static $($rest)+);
+    };
+
+    ($struct_name:ident, $def_path:expr, $($rest:tt)+) => {
+        shared_library!(__impl $struct_name [] [$def_path] [] $($rest)+);
+    };
+
+    (__impl $struct_name:ident
+            [$($p1:tt)*] [$($p2:tt)*] [$($p3:tt)*]
+            , $($rest:tt)*
+    ) => {
+        shared_library!(__impl $struct_name [$($p1)*] [$($p2)*] [$($p3)*] $($rest)*);
+    };
+
+    (__impl $struct_name:ident
+            [$($p1:tt)*] [$($p2:tt)*] [$($p3:tt)*]
+            pub $($rest:tt)*
+    ) => {
+        shared_library!(__impl $struct_name
+                       [$($p1)*] [$($p2)*] [$($p3)* pub] $($rest)*);
+    };
+
+    (__impl $struct_name:ident
+            [$($p1:tt)*] [$($p2:tt)*] [$($p3:tt)*]
+            fn $name:ident($($p:ident:$ty:ty),*) -> $ret:ty, $($rest:tt)*
+    ) => {
+        shared_library!(__impl $struct_name
+                       [$($p1)*, $name:unsafe extern fn($($p:$ty),*) -> $ret]
+                       [$($p2)*]
+                       [$($p3)*
+                           unsafe fn $name($($p:$ty),*) -> $ret {
+                               #![allow(dead_code)]
+                               ($struct_name::get_static_ref().$name)($($p),*)
+                           }
+                        ] $($rest)*);
+    };
+
+    (__impl $struct_name:ident
+            [$($p1:tt)*] [$($p2:tt)*] [$($p3:tt)*]
+            static $name:ident:$ty:ty, $($rest:tt)*
+    ) => {
+        shared_library!(__impl $struct_name
+                       [$($p1)*, $name: $ty]
+                       [$($p2)*]
+                       [$($p3)*] $($rest)*);
+    };
+
+    (__impl $struct_name:ident
+            [$($p1:tt)*] [$($p2:tt)*] [$($p3:tt)*]
+            fn $name:ident($($p:ident:$ty:ty),*), $($rest:tt)*
+    ) => {
+        shared_library!(__impl $struct_name
+                       [$($p1)*] [$($p2)*] [$($p3)*]
+                       fn $name($($p:$ty),*) -> (), $($rest)*);
+    };
+
+    (__impl $struct_name:ident [$(,$mem_n:ident:$mem_t:ty)+] [$($p2:tt)*] [$($p3:tt)*]) => {
+        /// Symbols loaded from a shared library.
+        #[allow(non_snake_case)]
+        pub struct $struct_name {
+            _library_guard: $crate::dynamic_library::DynamicLibrary,
+            $(
+                pub $mem_n: $mem_t,
+            )+
+        }
+
+        impl $struct_name {
+            /// Tries to open the dynamic library.
+            #[allow(non_snake_case)]
+            pub fn open(path: &::std::path::Path) -> Result<$struct_name, $crate::LoadingError> {
+                use std::mem;
+
+                let dylib = match $crate::dynamic_library::DynamicLibrary::open(Some(path)) {
+                    Ok(l) => l,
+                    Err(reason) => return Err($crate::LoadingError::LibraryNotFound { descr: reason })
+                };
+
+                $(
+                    let $mem_n: *mut () = match unsafe { dylib.symbol(stringify!($mem_n)) } {
+                        Ok(s) => s,
+                        Err(_) => return Err($crate::LoadingError::SymbolNotFound { symbol: stringify!($mem_n) }),
+                    };
+                )+
+
+                Ok($struct_name {
+                    _library_guard: dylib,
+                    $(
+                        $mem_n: unsafe { mem::transmute($mem_n) },
+                    )+
+                })
+            }
+        }
+
+        shared_library!(__write_static_fns $struct_name [] [$($p2)*] [$($p3)*]);
+    };
+
+    (__write_static_fns $struct_name:ident [$($p1:tt)*] [] [$($p3:tt)*]) => {
+    };
+
+    (__write_static_fns $struct_name:ident [$($p1:tt)*] [$defpath:expr] [$($standalones:item)+]) => {
+        impl $struct_name {
+            /// This function is used by the regular functions.
+            fn get_static_ref() -> &'static $struct_name {
+                $struct_name::try_loading().ok()
+                                           .expect(concat!("Could not open dynamic \
+                                                            library `", stringify!($struct_name),
+                                                            "`"))
+            }
+
+            /// Try loading the static symbols linked to this library.
+            pub fn try_loading() -> Result<&'static $struct_name, $crate::LoadingError> {
+                use std::sync::{Mutex, Once, ONCE_INIT};
+                use std::mem;
+
+                unsafe {
+                    static mut DATA: *const Mutex<Option<$struct_name>> = 0 as *const _;
+
+                    static mut INIT: Once = ONCE_INIT;
+                    INIT.call_once(|| {
+                        let data = Box::new(Mutex::new(None));
+                        DATA = &*data;
+                        mem::forget(data);
+                    });
+
+                    let data: &Mutex<Option<$struct_name>> = &*DATA;
+                    let mut data = data.lock().unwrap();
+
+                    if let Some(ref data) = *data {
+                        return Ok(mem::transmute(data));
+                    }
+
+                    let path = ::std::path::Path::new($defpath);
+                    let result = try!($struct_name::open(path));
+                    *data = Some(result);
+                    Ok(mem::transmute(data.as_ref().unwrap()))
+                }
+            }
+        }
+
+        $($standalones)+
+    };
+}
diff --git a/crates/slab/.cargo-checksum.json b/crates/slab/.cargo-checksum.json
new file mode 100644
index 0000000..704fdd3
--- /dev/null
+++ b/crates/slab/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"CHANGELOG.md":"4ab78a6c79fcce5c853fa02902dc20ab217a41582f2c9204c517da57606e3508","Cargo.toml":"cc4f904c3a853b1b5efe9c36dd973e86ae72094ae27398b7c04ff05606517061","LICENSE":"8ce0830173fdac609dfb4ea603fdc002c2f4af0dc9b1a005653f5da9cf534b18","README.md":"6e8d6d493aec6526593ae9b05e3b21d3f878c5816a94af9d372598e03406b35f","build.rs":"2c008232a3ae7c83c166f61c2942314717976776a4dba38e9063cd8e57a1b9bd","src/builder.rs":"87e629b1f9853d910389635b27a42391f1681cd5638d81e386215211e8b67847","src/lib.rs":"7030188777c9e1a1b1649cc434ebee62b324f1d4212b0982157205892bfb3b5b","src/serde.rs":"e58650a04644cb732119f50eefe6a3066104b5b41be7074e12525e05e2ad21b7","tests/serde.rs":"bb28112509dbb6949528802d05a1b1e34d2e5ff9d3ba5f62aa801cfb3de7a78e","tests/slab.rs":"83f597daf9430d68553738facfd1fd00d9d718888fc5f7dc082ed9eb08a3a18d"},"package":"8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67"}
\ No newline at end of file
diff --git a/crates/slab/Android.bp b/crates/slab/Android.bp
new file mode 100644
index 0000000..7516630
--- /dev/null
+++ b/crates/slab/Android.bp
@@ -0,0 +1,60 @@
+// This file is generated by cargo_embargo.
+// Do not modify this file because the changes will be overridden on upgrade.
+
+package {
+    default_applicable_licenses: ["external_rust_crates_slab_license"],
+    default_team: "trendy_team_android_rust",
+}
+
+license {
+    name: "external_rust_crates_slab_license",
+    visibility: [":__subpackages__"],
+    license_kinds: ["SPDX-license-identifier-MIT"],
+    license_text: ["LICENSE"],
+}
+
+rust_library {
+    name: "libslab",
+    host_supported: true,
+    crate_name: "slab",
+    cargo_env_compat: true,
+    cargo_pkg_version: "0.4.9",
+    crate_root: "src/lib.rs",
+    edition: "2018",
+    features: [
+        "default",
+        "std",
+    ],
+    apex_available: [
+        "//apex_available:platform",
+        "//apex_available:anyapex",
+    ],
+    product_available: true,
+    vendor_available: true,
+    min_sdk_version: "29",
+}
+
+rust_test {
+    name: "slab_test_tests_slab",
+    host_supported: true,
+    crate_name: "slab",
+    cargo_env_compat: true,
+    cargo_pkg_version: "0.4.9",
+    crate_root: "tests/slab.rs",
+    test_suites: ["general-tests"],
+    auto_gen_config: true,
+    test_options: {
+        unit_test: true,
+    },
+    edition: "2018",
+    features: [
+        "default",
+        "std",
+    ],
+    rustlibs: [
+        "libserde",
+        "libserde_test",
+        "libslab",
+    ],
+    proc_macros: ["librustversion"],
+}
diff --git a/crates/slab/CHANGELOG.md b/crates/slab/CHANGELOG.md
new file mode 100644
index 0000000..f46a797
--- /dev/null
+++ b/crates/slab/CHANGELOG.md
@@ -0,0 +1,54 @@
+# 0.4.9 (August 22, 2023)
+
+* Avoid reallocations in `Slab::clone_from` (#137)
+
+# 0.4.8 (January 20, 2023)
+
+* Fixed documentation about overflow (#124)
+* Document panic in `get2_mut` (#131)
+* Refactoring (#129, #132)
+
+# 0.4.7 (July 19, 2022)
+
+* Use `#[track_caller]` on Rust 1.46+ (#119)
+* Make `Slab::new` const on Rust 1.39+ (#119)
+
+# 0.4.6 (April 2, 2022)
+
+* Add `Slab::vacant_key` (#114)
+* Fix stacked borrows violation in `Slab::get2_unchecked_mut` (#115)
+
+# 0.4.5 (October 13, 2021)
+
+* Add alternate debug output for listing items in the slab (#108)
+* Fix typo in debug output of IntoIter (#109)
+* Impl 'Clone' for 'Iter' (#110)
+
+# 0.4.4 (August 06, 2021)
+
+* Fix panic in `FromIterator` impl (#102)
+* Fix compatibility with older clippy versions (#104)
+* Add `try_remove` method (#89)
+* Implement `ExactSizeIterator` and `FusedIterator` for iterators (#92)
+
+# 0.4.3 (April 20, 2021)
+
+* Add no_std support for Rust 1.36 and above (#71).
+* Add `get2_mut` and `get2_unchecked_mut` methods (#65).
+* Make `shrink_to_fit()` remove trailing vacant entries (#62).
+* Implement `FromIterator<(usize, T)>` (#62).
+* Implement `IntoIterator<Item = (usize, T)>` (#62).
+* Provide `size_hint()` of the iterators (#62).
+* Make all iterators reversible (#62).
+* Add `key_of()` method (#61)
+* Add `compact()` method (#60)
+* Add support for serde (#85)
+
+# 0.4.2 (January 11, 2019)
+
+* Add `Slab::drain` (#56).
+
+# 0.4.1 (July 15, 2018)
+
+* Improve `reserve` and `reserve_exact` (#37).
+* Implement `Default` for `Slab` (#43).
diff --git a/crates/slab/Cargo.lock b/crates/slab/Cargo.lock
new file mode 100644
index 0000000..ef20f29
--- /dev/null
+++ b/crates/slab/Cargo.lock
@@ -0,0 +1,89 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+[[package]]
+name = "autocfg"
+version = "1.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.86"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "unicode-ident 1.0.12 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "quote"
+version = "1.0.37"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "proc-macro2 1.0.86 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "rustversion"
+version = "1.0.17"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "serde"
+version = "1.0.209"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "serde_derive 1.0.209 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "serde_derive"
+version = "1.0.209"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "proc-macro2 1.0.86 (registry+https://github.com/rust-lang/crates.io-index)",
+ "quote 1.0.37 (registry+https://github.com/rust-lang/crates.io-index)",
+ "syn 2.0.76 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "serde_test"
+version = "1.0.177"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "serde 1.0.209 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "slab"
+version = "0.4.9"
+dependencies = [
+ "autocfg 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rustversion 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)",
+ "serde 1.0.209 (registry+https://github.com/rust-lang/crates.io-index)",
+ "serde_test 1.0.177 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "syn"
+version = "2.0.76"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "proc-macro2 1.0.86 (registry+https://github.com/rust-lang/crates.io-index)",
+ "quote 1.0.37 (registry+https://github.com/rust-lang/crates.io-index)",
+ "unicode-ident 1.0.12 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "unicode-ident"
+version = "1.0.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[metadata]
+"checksum autocfg 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0"
+"checksum proc-macro2 1.0.86 (registry+https://github.com/rust-lang/crates.io-index)" = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77"
+"checksum quote 1.0.37 (registry+https://github.com/rust-lang/crates.io-index)" = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af"
+"checksum rustversion 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)" = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6"
+"checksum serde 1.0.209 (registry+https://github.com/rust-lang/crates.io-index)" = "99fce0ffe7310761ca6bf9faf5115afbc19688edd00171d81b1bb1b116c63e09"
+"checksum serde_derive 1.0.209 (registry+https://github.com/rust-lang/crates.io-index)" = "a5831b979fd7b5439637af1752d535ff49f4860c0f341d1baeb6faf0f4242170"
+"checksum serde_test 1.0.177 (registry+https://github.com/rust-lang/crates.io-index)" = "7f901ee573cab6b3060453d2d5f0bae4e6d628c23c0a962ff9b5f1d7c8d4f1ed"
+"checksum syn 2.0.76 (registry+https://github.com/rust-lang/crates.io-index)" = "578e081a14e0cefc3279b0472138c513f37b41a08d5a3cca9b6e4e8ceb6cd525"
+"checksum unicode-ident 1.0.12 (registry+https://github.com/rust-lang/crates.io-index)" = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
diff --git a/crates/slab/Cargo.toml b/crates/slab/Cargo.toml
new file mode 100644
index 0000000..461bb8f
--- /dev/null
+++ b/crates/slab/Cargo.toml
@@ -0,0 +1,55 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2018"
+rust-version = "1.31"
+name = "slab"
+version = "0.4.9"
+authors = ["Carl Lerche <me@carllerche.com>"]
+exclude = ["/.*"]
+description = "Pre-allocated storage for a uniform data type"
+readme = "README.md"
+keywords = [
+    "slab",
+    "allocator",
+    "no_std",
+]
+categories = [
+    "memory-management",
+    "data-structures",
+    "no-std",
+]
+license = "MIT"
+repository = "https://github.com/tokio-rs/slab"
+
+[dependencies.serde]
+version = "1.0.95"
+features = ["alloc"]
+optional = true
+default-features = false
+
+[dev-dependencies.rustversion]
+version = "1"
+
+[dev-dependencies.serde]
+version = "1"
+features = ["derive"]
+
+[dev-dependencies.serde_test]
+version = "1"
+
+[build-dependencies.autocfg]
+version = "1"
+
+[features]
+default = ["std"]
+std = []
diff --git a/crates/slab/LICENSE b/crates/slab/LICENSE
new file mode 100644
index 0000000..819ce21
--- /dev/null
+++ b/crates/slab/LICENSE
@@ -0,0 +1,25 @@
+Copyright (c) 2019 Carl Lerche
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/crates/slab/METADATA b/crates/slab/METADATA
new file mode 100644
index 0000000..2f44dfc
--- /dev/null
+++ b/crates/slab/METADATA
@@ -0,0 +1,20 @@
+# This project was upgraded with external_updater.
+# Usage: tools/external_updater/updater.sh update external/rust/crates/slab
+# For more info, check https://cs.android.com/android/platform/superproject/+/main:tools/external_updater/README.md
+
+name: "slab"
+description: "Pre-allocated storage for a uniform data type"
+third_party {
+  license_type: NOTICE
+  last_upgrade_date {
+    year: 2024
+    month: 2
+    day: 5
+  }
+  homepage: "https://crates.io/crates/slab"
+  identifier {
+    type: "Archive"
+    value: "https://static.crates.io/crates/slab/slab-0.4.9.crate"
+    version: "0.4.9"
+  }
+}
diff --git a/crates/slab/MODULE_LICENSE_MIT b/crates/slab/MODULE_LICENSE_MIT
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/crates/slab/MODULE_LICENSE_MIT
diff --git a/crates/slab/NOTICE b/crates/slab/NOTICE
new file mode 120000
index 0000000..7a694c9
--- /dev/null
+++ b/crates/slab/NOTICE
@@ -0,0 +1 @@
+LICENSE
\ No newline at end of file
diff --git a/crates/slab/README.md b/crates/slab/README.md
new file mode 100644
index 0000000..4281389
--- /dev/null
+++ b/crates/slab/README.md
@@ -0,0 +1,51 @@
+# Slab
+
+Pre-allocated storage for a uniform data type.
+
+[![Crates.io][crates-badge]][crates-url]
+[![Build Status][ci-badge]][ci-url]
+
+[crates-badge]: https://img.shields.io/crates/v/slab
+[crates-url]: https://crates.io/crates/slab
+[ci-badge]: https://img.shields.io/github/actions/workflow/status/tokio-rs/slab/ci.yml?branch=master
+[ci-url]: https://github.com/tokio-rs/slab/actions
+
+[Documentation](https://docs.rs/slab)
+
+## Usage
+
+To use `slab`, first add this to your `Cargo.toml`:
+
+```toml
+[dependencies]
+slab = "0.4"
+```
+
+Next, add this to your crate:
+
+```rust
+use slab::Slab;
+
+let mut slab = Slab::new();
+
+let hello = slab.insert("hello");
+let world = slab.insert("world");
+
+assert_eq!(slab[hello], "hello");
+assert_eq!(slab[world], "world");
+
+slab[world] = "earth";
+assert_eq!(slab[world], "earth");
+```
+
+See [documentation](https://docs.rs/slab) for more details.
+
+## License
+
+This project is licensed under the [MIT license](LICENSE).
+
+### Contribution
+
+Unless you explicitly state otherwise, any contribution intentionally submitted
+for inclusion in `slab` by you, shall be licensed as MIT, without any additional
+terms or conditions.
diff --git a/crates/slab/TEST_MAPPING b/crates/slab/TEST_MAPPING
new file mode 100644
index 0000000..b755779
--- /dev/null
+++ b/crates/slab/TEST_MAPPING
@@ -0,0 +1,57 @@
+// Generated by update_crate_tests.py for tests that depend on this crate.
+{
+  "imports": [
+    {
+      "path": "external/rust/crates/anyhow"
+    },
+    {
+      "path": "external/rust/crates/async-stream"
+    },
+    {
+      "path": "external/rust/crates/futures-channel"
+    },
+    {
+      "path": "external/rust/crates/futures-executor"
+    },
+    {
+      "path": "external/rust/crates/futures-test"
+    },
+    {
+      "path": "external/rust/crates/futures-util"
+    },
+    {
+      "path": "external/rust/crates/tokio"
+    },
+    {
+      "path": "external/rust/crates/tokio-test"
+    },
+    {
+      "path": "packages/modules/DnsResolver"
+    },
+    {
+      "path": "packages/modules/Virtualization/authfs"
+    },
+    {
+      "path": "packages/modules/Virtualization/virtualizationmanager"
+    },
+    {
+      "path": "packages/modules/Virtualization/zipfuse"
+    },
+    {
+      "path": "system/security/keystore2"
+    },
+    {
+      "path": "system/security/keystore2/legacykeystore"
+    }
+  ],
+  "presubmit": [
+    {
+      "name": "slab_test_tests_slab"
+    }
+  ],
+  "presubmit-rust": [
+    {
+      "name": "slab_test_tests_slab"
+    }
+  ]
+}
diff --git a/crates/slab/build.rs b/crates/slab/build.rs
new file mode 100644
index 0000000..b60351a
--- /dev/null
+++ b/crates/slab/build.rs
@@ -0,0 +1,24 @@
+fn main() {
+    let cfg = match autocfg::AutoCfg::new() {
+        Ok(cfg) => cfg,
+        Err(e) => {
+            // If we couldn't detect the compiler version and features, just
+            // print a warning. This isn't a fatal error: we can still build
+            // Slab, we just can't enable cfgs automatically.
+            println!(
+                "cargo:warning=slab: failed to detect compiler features: {}",
+                e
+            );
+            return;
+        }
+    };
+    // Note that this is `no_`*, not `has_*`. This allows treating as the latest
+    // stable rustc is used when the build script doesn't run. This is useful
+    // for non-cargo build systems that don't run the build script.
+    if !cfg.probe_rustc_version(1, 39) {
+        println!("cargo:rustc-cfg=slab_no_const_vec_new");
+    }
+    if !cfg.probe_rustc_version(1, 46) {
+        println!("cargo:rustc-cfg=slab_no_track_caller");
+    }
+}
diff --git a/crates/slab/cargo_embargo.json b/crates/slab/cargo_embargo.json
new file mode 100644
index 0000000..db1aaad
--- /dev/null
+++ b/crates/slab/cargo_embargo.json
@@ -0,0 +1,4 @@
+{
+  "min_sdk_version": "29",
+  "tests": true
+}
diff --git a/crates/slab/patches/disable_panic_tests_on_android.patch b/crates/slab/patches/disable_panic_tests_on_android.patch
new file mode 100644
index 0000000..4333952
--- /dev/null
+++ b/crates/slab/patches/disable_panic_tests_on_android.patch
@@ -0,0 +1,13 @@
+diff --git a/tests/slab.rs b/tests/slab.rs
+index c1570fa..8ba3064 100644
+--- a/tests/slab.rs
++++ b/tests/slab.rs
+@@ -580,6 +580,8 @@ fn compact_doesnt_move_if_closure_errors() {
+ }
+ 
+ #[test]
++// Android aborts on panic and this test relies on stack unwinding.
++#[cfg(not(target_os = "android"))]
+ fn compact_handles_closure_panic() {
+     let mut slab = Slab::new();
+     for i in 0..10 {
diff --git a/crates/slab/src/builder.rs b/crates/slab/src/builder.rs
new file mode 100644
index 0000000..8e50a20
--- /dev/null
+++ b/crates/slab/src/builder.rs
@@ -0,0 +1,63 @@
+use crate::{Entry, Slab};
+
+// Building `Slab` from pairs (usize, T).
+pub(crate) struct Builder<T> {
+    slab: Slab<T>,
+    vacant_list_broken: bool,
+    first_vacant_index: Option<usize>,
+}
+
+impl<T> Builder<T> {
+    pub(crate) fn with_capacity(capacity: usize) -> Self {
+        Self {
+            slab: Slab::with_capacity(capacity),
+            vacant_list_broken: false,
+            first_vacant_index: None,
+        }
+    }
+    pub(crate) fn pair(&mut self, key: usize, value: T) {
+        let slab = &mut self.slab;
+        if key < slab.entries.len() {
+            // iterator is not sorted, might need to recreate vacant list
+            if let Entry::Vacant(_) = slab.entries[key] {
+                self.vacant_list_broken = true;
+                slab.len += 1;
+            }
+            // if an element with this key already exists, replace it.
+            // This is consistent with HashMap and BtreeMap
+            slab.entries[key] = Entry::Occupied(value);
+        } else {
+            if self.first_vacant_index.is_none() && slab.entries.len() < key {
+                self.first_vacant_index = Some(slab.entries.len());
+            }
+            // insert holes as necessary
+            while slab.entries.len() < key {
+                // add the entry to the start of the vacant list
+                let next = slab.next;
+                slab.next = slab.entries.len();
+                slab.entries.push(Entry::Vacant(next));
+            }
+            slab.entries.push(Entry::Occupied(value));
+            slab.len += 1;
+        }
+    }
+
+    pub(crate) fn build(self) -> Slab<T> {
+        let mut slab = self.slab;
+        if slab.len == slab.entries.len() {
+            // no vacant entries, so next might not have been updated
+            slab.next = slab.entries.len();
+        } else if self.vacant_list_broken {
+            slab.recreate_vacant_list();
+        } else if let Some(first_vacant_index) = self.first_vacant_index {
+            let next = slab.entries.len();
+            match &mut slab.entries[first_vacant_index] {
+                Entry::Vacant(n) => *n = next,
+                _ => unreachable!(),
+            }
+        } else {
+            unreachable!()
+        }
+        slab
+    }
+}
diff --git a/crates/slab/src/lib.rs b/crates/slab/src/lib.rs
new file mode 100644
index 0000000..7fc6f1a
--- /dev/null
+++ b/crates/slab/src/lib.rs
@@ -0,0 +1,1589 @@
+#![cfg_attr(not(feature = "std"), no_std)]
+#![warn(
+    missing_debug_implementations,
+    missing_docs,
+    rust_2018_idioms,
+    unreachable_pub
+)]
+#![doc(test(
+    no_crate_inject,
+    attr(deny(warnings, rust_2018_idioms), allow(dead_code, unused_variables))
+))]
+
+//! Pre-allocated storage for a uniform data type.
+//!
+//! `Slab` provides pre-allocated storage for a single data type. If many values
+//! of a single type are being allocated, it can be more efficient to
+//! pre-allocate the necessary storage. Since the size of the type is uniform,
+//! memory fragmentation can be avoided. Storing, clearing, and lookup
+//! operations become very cheap.
+//!
+//! While `Slab` may look like other Rust collections, it is not intended to be
+//! used as a general purpose collection. The primary difference between `Slab`
+//! and `Vec` is that `Slab` returns the key when storing the value.
+//!
+//! It is important to note that keys may be reused. In other words, once a
+//! value associated with a given key is removed from a slab, that key may be
+//! returned from future calls to `insert`.
+//!
+//! # Examples
+//!
+//! Basic storing and retrieval.
+//!
+//! ```
+//! # use slab::*;
+//! let mut slab = Slab::new();
+//!
+//! let hello = slab.insert("hello");
+//! let world = slab.insert("world");
+//!
+//! assert_eq!(slab[hello], "hello");
+//! assert_eq!(slab[world], "world");
+//!
+//! slab[world] = "earth";
+//! assert_eq!(slab[world], "earth");
+//! ```
+//!
+//! Sometimes it is useful to be able to associate the key with the value being
+//! inserted in the slab. This can be done with the `vacant_entry` API as such:
+//!
+//! ```
+//! # use slab::*;
+//! let mut slab = Slab::new();
+//!
+//! let hello = {
+//!     let entry = slab.vacant_entry();
+//!     let key = entry.key();
+//!
+//!     entry.insert((key, "hello"));
+//!     key
+//! };
+//!
+//! assert_eq!(hello, slab[hello].0);
+//! assert_eq!("hello", slab[hello].1);
+//! ```
+//!
+//! It is generally a good idea to specify the desired capacity of a slab at
+//! creation time. Note that `Slab` will grow the internal capacity when
+//! attempting to insert a new value once the existing capacity has been reached.
+//! To avoid this, add a check.
+//!
+//! ```
+//! # use slab::*;
+//! let mut slab = Slab::with_capacity(1024);
+//!
+//! // ... use the slab
+//!
+//! if slab.len() == slab.capacity() {
+//!     panic!("slab full");
+//! }
+//!
+//! slab.insert("the slab is not at capacity yet");
+//! ```
+//!
+//! # Capacity and reallocation
+//!
+//! The capacity of a slab is the amount of space allocated for any future
+//! values that will be inserted in the slab. This is not to be confused with
+//! the *length* of the slab, which specifies the number of actual values
+//! currently being inserted. If a slab's length is equal to its capacity, the
+//! next value inserted into the slab will require growing the slab by
+//! reallocating.
+//!
+//! For example, a slab with capacity 10 and length 0 would be an empty slab
+//! with space for 10 more stored values. Storing 10 or fewer elements into the
+//! slab will not change its capacity or cause reallocation to occur. However,
+//! if the slab length is increased to 11 (due to another `insert`), it will
+//! have to reallocate, which can be slow. For this reason, it is recommended to
+//! use [`Slab::with_capacity`] whenever possible to specify how many values the
+//! slab is expected to store.
+//!
+//! # Implementation
+//!
+//! `Slab` is backed by a `Vec` of slots. Each slot is either occupied or
+//! vacant. `Slab` maintains a stack of vacant slots using a linked list. To
+//! find a vacant slot, the stack is popped. When a slot is released, it is
+//! pushed onto the stack.
+//!
+//! If there are no more available slots in the stack, then `Vec::reserve(1)` is
+//! called and a new slot is created.
+//!
+//! [`Slab::with_capacity`]: struct.Slab.html#with_capacity
+
+#[cfg(not(feature = "std"))]
+extern crate alloc;
+#[cfg(feature = "std")]
+extern crate std as alloc;
+
+#[cfg(feature = "serde")]
+mod serde;
+
+mod builder;
+
+use alloc::vec::{self, Vec};
+use core::iter::{self, FromIterator, FusedIterator};
+use core::{fmt, mem, ops, slice};
+
+/// Pre-allocated storage for a uniform data type
+///
+/// See the [module documentation] for more details.
+///
+/// [module documentation]: index.html
+pub struct Slab<T> {
+    // Chunk of memory
+    entries: Vec<Entry<T>>,
+
+    // Number of Filled elements currently in the slab
+    len: usize,
+
+    // Offset of the next available slot in the slab. Set to the slab's
+    // capacity when the slab is full.
+    next: usize,
+}
+
+impl<T> Clone for Slab<T>
+where
+    T: Clone,
+{
+    fn clone(&self) -> Self {
+        Self {
+            entries: self.entries.clone(),
+            len: self.len,
+            next: self.next,
+        }
+    }
+
+    fn clone_from(&mut self, source: &Self) {
+        self.entries.clone_from(&source.entries);
+        self.len = source.len;
+        self.next = source.next;
+    }
+}
+
+impl<T> Default for Slab<T> {
+    fn default() -> Self {
+        Slab::new()
+    }
+}
+
+/// A handle to a vacant entry in a `Slab`.
+///
+/// `VacantEntry` allows constructing values with the key that they will be
+/// assigned to.
+///
+/// # Examples
+///
+/// ```
+/// # use slab::*;
+/// let mut slab = Slab::new();
+///
+/// let hello = {
+///     let entry = slab.vacant_entry();
+///     let key = entry.key();
+///
+///     entry.insert((key, "hello"));
+///     key
+/// };
+///
+/// assert_eq!(hello, slab[hello].0);
+/// assert_eq!("hello", slab[hello].1);
+/// ```
+#[derive(Debug)]
+pub struct VacantEntry<'a, T> {
+    slab: &'a mut Slab<T>,
+    key: usize,
+}
+
+/// A consuming iterator over the values stored in a `Slab`
+pub struct IntoIter<T> {
+    entries: iter::Enumerate<vec::IntoIter<Entry<T>>>,
+    len: usize,
+}
+
+/// An iterator over the values stored in the `Slab`
+pub struct Iter<'a, T> {
+    entries: iter::Enumerate<slice::Iter<'a, Entry<T>>>,
+    len: usize,
+}
+
+impl<'a, T> Clone for Iter<'a, T> {
+    fn clone(&self) -> Self {
+        Self {
+            entries: self.entries.clone(),
+            len: self.len,
+        }
+    }
+}
+
+/// A mutable iterator over the values stored in the `Slab`
+pub struct IterMut<'a, T> {
+    entries: iter::Enumerate<slice::IterMut<'a, Entry<T>>>,
+    len: usize,
+}
+
+/// A draining iterator for `Slab`
+pub struct Drain<'a, T> {
+    inner: vec::Drain<'a, Entry<T>>,
+    len: usize,
+}
+
+#[derive(Clone)]
+enum Entry<T> {
+    Vacant(usize),
+    Occupied(T),
+}
+
+impl<T> Slab<T> {
+    /// Construct a new, empty `Slab`.
+    ///
+    /// The function does not allocate and the returned slab will have no
+    /// capacity until `insert` is called or capacity is explicitly reserved.
+    ///
+    /// This is `const fn` on Rust 1.39+.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use slab::*;
+    /// let slab: Slab<i32> = Slab::new();
+    /// ```
+    #[cfg(not(slab_no_const_vec_new))]
+    pub const fn new() -> Self {
+        Self {
+            entries: Vec::new(),
+            next: 0,
+            len: 0,
+        }
+    }
+    /// Construct a new, empty `Slab`.
+    ///
+    /// The function does not allocate and the returned slab will have no
+    /// capacity until `insert` is called or capacity is explicitly reserved.
+    ///
+    /// This is `const fn` on Rust 1.39+.
+    #[cfg(slab_no_const_vec_new)]
+    pub fn new() -> Self {
+        Self {
+            entries: Vec::new(),
+            next: 0,
+            len: 0,
+        }
+    }
+
+    /// Construct a new, empty `Slab` with the specified capacity.
+    ///
+    /// The returned slab will be able to store exactly `capacity` without
+    /// reallocating. If `capacity` is 0, the slab will not allocate.
+    ///
+    /// It is important to note that this function does not specify the *length*
+    /// of the returned slab, but only the capacity. For an explanation of the
+    /// difference between length and capacity, see [Capacity and
+    /// reallocation](index.html#capacity-and-reallocation).
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use slab::*;
+    /// let mut slab = Slab::with_capacity(10);
+    ///
+    /// // The slab contains no values, even though it has capacity for more
+    /// assert_eq!(slab.len(), 0);
+    ///
+    /// // These are all done without reallocating...
+    /// for i in 0..10 {
+    ///     slab.insert(i);
+    /// }
+    ///
+    /// // ...but this may make the slab reallocate
+    /// slab.insert(11);
+    /// ```
+    pub fn with_capacity(capacity: usize) -> Slab<T> {
+        Slab {
+            entries: Vec::with_capacity(capacity),
+            next: 0,
+            len: 0,
+        }
+    }
+
+    /// Return the number of values the slab can store without reallocating.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use slab::*;
+    /// let slab: Slab<i32> = Slab::with_capacity(10);
+    /// assert_eq!(slab.capacity(), 10);
+    /// ```
+    pub fn capacity(&self) -> usize {
+        self.entries.capacity()
+    }
+
+    /// Reserve capacity for at least `additional` more values to be stored
+    /// without allocating.
+    ///
+    /// `reserve` does nothing if the slab already has sufficient capacity for
+    /// `additional` more values. If more capacity is required, a new segment of
+    /// memory will be allocated and all existing values will be copied into it.
+    /// As such, if the slab is already very large, a call to `reserve` can end
+    /// up being expensive.
+    ///
+    /// The slab may reserve more than `additional` extra space in order to
+    /// avoid frequent reallocations. Use `reserve_exact` instead to guarantee
+    /// that only the requested space is allocated.
+    ///
+    /// # Panics
+    ///
+    /// Panics if the new capacity exceeds `isize::MAX` bytes.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use slab::*;
+    /// let mut slab = Slab::new();
+    /// slab.insert("hello");
+    /// slab.reserve(10);
+    /// assert!(slab.capacity() >= 11);
+    /// ```
+    pub fn reserve(&mut self, additional: usize) {
+        if self.capacity() - self.len >= additional {
+            return;
+        }
+        let need_add = additional - (self.entries.len() - self.len);
+        self.entries.reserve(need_add);
+    }
+
+    /// Reserve the minimum capacity required to store exactly `additional`
+    /// more values.
+    ///
+    /// `reserve_exact` does nothing if the slab already has sufficient capacity
+    /// for `additional` more values. If more capacity is required, a new segment
+    /// of memory will be allocated and all existing values will be copied into
+    /// it.  As such, if the slab is already very large, a call to `reserve` can
+    /// end up being expensive.
+    ///
+    /// Note that the allocator may give the slab more space than it requests.
+    /// Therefore capacity can not be relied upon to be precisely minimal.
+    /// Prefer `reserve` if future insertions are expected.
+    ///
+    /// # Panics
+    ///
+    /// Panics if the new capacity exceeds `isize::MAX` bytes.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use slab::*;
+    /// let mut slab = Slab::new();
+    /// slab.insert("hello");
+    /// slab.reserve_exact(10);
+    /// assert!(slab.capacity() >= 11);
+    /// ```
+    pub fn reserve_exact(&mut self, additional: usize) {
+        if self.capacity() - self.len >= additional {
+            return;
+        }
+        let need_add = additional - (self.entries.len() - self.len);
+        self.entries.reserve_exact(need_add);
+    }
+
+    /// Shrink the capacity of the slab as much as possible without invalidating keys.
+    ///
+    /// Because values cannot be moved to a different index, the slab cannot
+    /// shrink past any stored values.
+    /// It will drop down as close as possible to the length but the allocator may
+    /// still inform the underlying vector that there is space for a few more elements.
+    ///
+    /// This function can take O(n) time even when the capacity cannot be reduced
+    /// or the allocation is shrunk in place. Repeated calls run in O(1) though.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use slab::*;
+    /// let mut slab = Slab::with_capacity(10);
+    ///
+    /// for i in 0..3 {
+    ///     slab.insert(i);
+    /// }
+    ///
+    /// slab.shrink_to_fit();
+    /// assert!(slab.capacity() >= 3 && slab.capacity() < 10);
+    /// ```
+    ///
+    /// The slab cannot shrink past the last present value even if previous
+    /// values are removed:
+    ///
+    /// ```
+    /// # use slab::*;
+    /// let mut slab = Slab::with_capacity(10);
+    ///
+    /// for i in 0..4 {
+    ///     slab.insert(i);
+    /// }
+    ///
+    /// slab.remove(0);
+    /// slab.remove(3);
+    ///
+    /// slab.shrink_to_fit();
+    /// assert!(slab.capacity() >= 3 && slab.capacity() < 10);
+    /// ```
+    pub fn shrink_to_fit(&mut self) {
+        // Remove all vacant entries after the last occupied one, so that
+        // the capacity can be reduced to what is actually needed.
+        // If the slab is empty the vector can simply be cleared, but that
+        // optimization would not affect time complexity when T: Drop.
+        let len_before = self.entries.len();
+        while let Some(&Entry::Vacant(_)) = self.entries.last() {
+            self.entries.pop();
+        }
+
+        // Removing entries breaks the list of vacant entries,
+        // so it must be repaired
+        if self.entries.len() != len_before {
+            // Some vacant entries were removed, so the list now likely¹
+            // either contains references to the removed entries, or has an
+            // invalid end marker. Fix this by recreating the list.
+            self.recreate_vacant_list();
+            // ¹: If the removed entries formed the tail of the list, with the
+            // most recently popped entry being the head of them, (so that its
+            // index is now the end marker) the list is still valid.
+            // Checking for that unlikely scenario of this infrequently called
+            // is not worth the code complexity.
+        }
+
+        self.entries.shrink_to_fit();
+    }
+
+    /// Iterate through all entries to recreate and repair the vacant list.
+    /// self.len must be correct and is not modified.
+    fn recreate_vacant_list(&mut self) {
+        self.next = self.entries.len();
+        // We can stop once we've found all vacant entries
+        let mut remaining_vacant = self.entries.len() - self.len;
+        if remaining_vacant == 0 {
+            return;
+        }
+
+        // Iterate in reverse order so that lower keys are at the start of
+        // the vacant list. This way future shrinks are more likely to be
+        // able to remove vacant entries.
+        for (i, entry) in self.entries.iter_mut().enumerate().rev() {
+            if let Entry::Vacant(ref mut next) = *entry {
+                *next = self.next;
+                self.next = i;
+                remaining_vacant -= 1;
+                if remaining_vacant == 0 {
+                    break;
+                }
+            }
+        }
+    }
+
+    /// Reduce the capacity as much as possible, changing the key for elements when necessary.
+    ///
+    /// To allow updating references to the elements which must be moved to a new key,
+    /// this function takes a closure which is called before moving each element.
+    /// The second and third parameters to the closure are the current key and
+    /// new key respectively.
+    /// In case changing the key for one element turns out not to be possible,
+    /// the move can be cancelled by returning `false` from the closure.
+    /// In that case no further attempts at relocating elements is made.
+    /// If the closure unwinds, the slab will be left in a consistent state,
+    /// but the value that the closure panicked on might be removed.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use slab::*;
+    ///
+    /// let mut slab = Slab::with_capacity(10);
+    /// let a = slab.insert('a');
+    /// slab.insert('b');
+    /// slab.insert('c');
+    /// slab.remove(a);
+    /// slab.compact(|&mut value, from, to| {
+    ///     assert_eq!((value, from, to), ('c', 2, 0));
+    ///     true
+    /// });
+    /// assert!(slab.capacity() >= 2 && slab.capacity() < 10);
+    /// ```
+    ///
+    /// The value is not moved when the closure returns `Err`:
+    ///
+    /// ```
+    /// # use slab::*;
+    ///
+    /// let mut slab = Slab::with_capacity(100);
+    /// let a = slab.insert('a');
+    /// let b = slab.insert('b');
+    /// slab.remove(a);
+    /// slab.compact(|&mut value, from, to| false);
+    /// assert_eq!(slab.iter().next(), Some((b, &'b')));
+    /// ```
+    pub fn compact<F>(&mut self, mut rekey: F)
+    where
+        F: FnMut(&mut T, usize, usize) -> bool,
+    {
+        // If the closure unwinds, we need to restore a valid list of vacant entries
+        struct CleanupGuard<'a, T> {
+            slab: &'a mut Slab<T>,
+            decrement: bool,
+        }
+        impl<T> Drop for CleanupGuard<'_, T> {
+            fn drop(&mut self) {
+                if self.decrement {
+                    // Value was popped and not pushed back on
+                    self.slab.len -= 1;
+                }
+                self.slab.recreate_vacant_list();
+            }
+        }
+        let mut guard = CleanupGuard {
+            slab: self,
+            decrement: true,
+        };
+
+        let mut occupied_until = 0;
+        // While there are vacant entries
+        while guard.slab.entries.len() > guard.slab.len {
+            // Find a value that needs to be moved,
+            // by popping entries until we find an occupied one.
+            // (entries cannot be empty because 0 is not greater than anything)
+            if let Some(Entry::Occupied(mut value)) = guard.slab.entries.pop() {
+                // Found one, now find a vacant entry to move it to
+                while let Some(&Entry::Occupied(_)) = guard.slab.entries.get(occupied_until) {
+                    occupied_until += 1;
+                }
+                // Let the caller try to update references to the key
+                if !rekey(&mut value, guard.slab.entries.len(), occupied_until) {
+                    // Changing the key failed, so push the entry back on at its old index.
+                    guard.slab.entries.push(Entry::Occupied(value));
+                    guard.decrement = false;
+                    guard.slab.entries.shrink_to_fit();
+                    return;
+                    // Guard drop handles cleanup
+                }
+                // Put the value in its new spot
+                guard.slab.entries[occupied_until] = Entry::Occupied(value);
+                // ... and mark it as occupied (this is optional)
+                occupied_until += 1;
+            }
+        }
+        guard.slab.next = guard.slab.len;
+        guard.slab.entries.shrink_to_fit();
+        // Normal cleanup is not necessary
+        mem::forget(guard);
+    }
+
+    /// Clear the slab of all values.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use slab::*;
+    /// let mut slab = Slab::new();
+    ///
+    /// for i in 0..3 {
+    ///     slab.insert(i);
+    /// }
+    ///
+    /// slab.clear();
+    /// assert!(slab.is_empty());
+    /// ```
+    pub fn clear(&mut self) {
+        self.entries.clear();
+        self.len = 0;
+        self.next = 0;
+    }
+
+    /// Return the number of stored values.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use slab::*;
+    /// let mut slab = Slab::new();
+    ///
+    /// for i in 0..3 {
+    ///     slab.insert(i);
+    /// }
+    ///
+    /// assert_eq!(3, slab.len());
+    /// ```
+    pub fn len(&self) -> usize {
+        self.len
+    }
+
+    /// Return `true` if there are no values stored in the slab.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use slab::*;
+    /// let mut slab = Slab::new();
+    /// assert!(slab.is_empty());
+    ///
+    /// slab.insert(1);
+    /// assert!(!slab.is_empty());
+    /// ```
+    pub fn is_empty(&self) -> bool {
+        self.len == 0
+    }
+
+    /// Return an iterator over the slab.
+    ///
+    /// This function should generally be **avoided** as it is not efficient.
+    /// Iterators must iterate over every slot in the slab even if it is
+    /// vacant. As such, a slab with a capacity of 1 million but only one
+    /// stored value must still iterate the million slots.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use slab::*;
+    /// let mut slab = Slab::new();
+    ///
+    /// for i in 0..3 {
+    ///     slab.insert(i);
+    /// }
+    ///
+    /// let mut iterator = slab.iter();
+    ///
+    /// assert_eq!(iterator.next(), Some((0, &0)));
+    /// assert_eq!(iterator.next(), Some((1, &1)));
+    /// assert_eq!(iterator.next(), Some((2, &2)));
+    /// assert_eq!(iterator.next(), None);
+    /// ```
+    pub fn iter(&self) -> Iter<'_, T> {
+        Iter {
+            entries: self.entries.iter().enumerate(),
+            len: self.len,
+        }
+    }
+
+    /// Return an iterator that allows modifying each value.
+    ///
+    /// This function should generally be **avoided** as it is not efficient.
+    /// Iterators must iterate over every slot in the slab even if it is
+    /// vacant. As such, a slab with a capacity of 1 million but only one
+    /// stored value must still iterate the million slots.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use slab::*;
+    /// let mut slab = Slab::new();
+    ///
+    /// let key1 = slab.insert(0);
+    /// let key2 = slab.insert(1);
+    ///
+    /// for (key, val) in slab.iter_mut() {
+    ///     if key == key1 {
+    ///         *val += 2;
+    ///     }
+    /// }
+    ///
+    /// assert_eq!(slab[key1], 2);
+    /// assert_eq!(slab[key2], 1);
+    /// ```
+    pub fn iter_mut(&mut self) -> IterMut<'_, T> {
+        IterMut {
+            entries: self.entries.iter_mut().enumerate(),
+            len: self.len,
+        }
+    }
+
+    /// Return a reference to the value associated with the given key.
+    ///
+    /// If the given key is not associated with a value, then `None` is
+    /// returned.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use slab::*;
+    /// let mut slab = Slab::new();
+    /// let key = slab.insert("hello");
+    ///
+    /// assert_eq!(slab.get(key), Some(&"hello"));
+    /// assert_eq!(slab.get(123), None);
+    /// ```
+    pub fn get(&self, key: usize) -> Option<&T> {
+        match self.entries.get(key) {
+            Some(Entry::Occupied(val)) => Some(val),
+            _ => None,
+        }
+    }
+
+    /// Return a mutable reference to the value associated with the given key.
+    ///
+    /// If the given key is not associated with a value, then `None` is
+    /// returned.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use slab::*;
+    /// let mut slab = Slab::new();
+    /// let key = slab.insert("hello");
+    ///
+    /// *slab.get_mut(key).unwrap() = "world";
+    ///
+    /// assert_eq!(slab[key], "world");
+    /// assert_eq!(slab.get_mut(123), None);
+    /// ```
+    pub fn get_mut(&mut self, key: usize) -> Option<&mut T> {
+        match self.entries.get_mut(key) {
+            Some(&mut Entry::Occupied(ref mut val)) => Some(val),
+            _ => None,
+        }
+    }
+
+    /// Return two mutable references to the values associated with the two
+    /// given keys simultaneously.
+    ///
+    /// If any one of the given keys is not associated with a value, then `None`
+    /// is returned.
+    ///
+    /// This function can be used to get two mutable references out of one slab,
+    /// so that you can manipulate both of them at the same time, eg. swap them.
+    ///
+    /// # Panics
+    ///
+    /// This function will panic if `key1` and `key2` are the same.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use slab::*;
+    /// use std::mem;
+    ///
+    /// let mut slab = Slab::new();
+    /// let key1 = slab.insert(1);
+    /// let key2 = slab.insert(2);
+    /// let (value1, value2) = slab.get2_mut(key1, key2).unwrap();
+    /// mem::swap(value1, value2);
+    /// assert_eq!(slab[key1], 2);
+    /// assert_eq!(slab[key2], 1);
+    /// ```
+    pub fn get2_mut(&mut self, key1: usize, key2: usize) -> Option<(&mut T, &mut T)> {
+        assert!(key1 != key2);
+
+        let (entry1, entry2);
+
+        if key1 > key2 {
+            let (slice1, slice2) = self.entries.split_at_mut(key1);
+            entry1 = slice2.get_mut(0);
+            entry2 = slice1.get_mut(key2);
+        } else {
+            let (slice1, slice2) = self.entries.split_at_mut(key2);
+            entry1 = slice1.get_mut(key1);
+            entry2 = slice2.get_mut(0);
+        }
+
+        match (entry1, entry2) {
+            (
+                Some(&mut Entry::Occupied(ref mut val1)),
+                Some(&mut Entry::Occupied(ref mut val2)),
+            ) => Some((val1, val2)),
+            _ => None,
+        }
+    }
+
+    /// Return a reference to the value associated with the given key without
+    /// performing bounds checking.
+    ///
+    /// For a safe alternative see [`get`](Slab::get).
+    ///
+    /// This function should be used with care.
+    ///
+    /// # Safety
+    ///
+    /// The key must be within bounds.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use slab::*;
+    /// let mut slab = Slab::new();
+    /// let key = slab.insert(2);
+    ///
+    /// unsafe {
+    ///     assert_eq!(slab.get_unchecked(key), &2);
+    /// }
+    /// ```
+    pub unsafe fn get_unchecked(&self, key: usize) -> &T {
+        match *self.entries.get_unchecked(key) {
+            Entry::Occupied(ref val) => val,
+            _ => unreachable!(),
+        }
+    }
+
+    /// Return a mutable reference to the value associated with the given key
+    /// without performing bounds checking.
+    ///
+    /// For a safe alternative see [`get_mut`](Slab::get_mut).
+    ///
+    /// This function should be used with care.
+    ///
+    /// # Safety
+    ///
+    /// The key must be within bounds.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use slab::*;
+    /// let mut slab = Slab::new();
+    /// let key = slab.insert(2);
+    ///
+    /// unsafe {
+    ///     let val = slab.get_unchecked_mut(key);
+    ///     *val = 13;
+    /// }
+    ///
+    /// assert_eq!(slab[key], 13);
+    /// ```
+    pub unsafe fn get_unchecked_mut(&mut self, key: usize) -> &mut T {
+        match *self.entries.get_unchecked_mut(key) {
+            Entry::Occupied(ref mut val) => val,
+            _ => unreachable!(),
+        }
+    }
+
+    /// Return two mutable references to the values associated with the two
+    /// given keys simultaneously without performing bounds checking and safety
+    /// condition checking.
+    ///
+    /// For a safe alternative see [`get2_mut`](Slab::get2_mut).
+    ///
+    /// This function should be used with care.
+    ///
+    /// # Safety
+    ///
+    /// - Both keys must be within bounds.
+    /// - The condition `key1 != key2` must hold.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use slab::*;
+    /// use std::mem;
+    ///
+    /// let mut slab = Slab::new();
+    /// let key1 = slab.insert(1);
+    /// let key2 = slab.insert(2);
+    /// let (value1, value2) = unsafe { slab.get2_unchecked_mut(key1, key2) };
+    /// mem::swap(value1, value2);
+    /// assert_eq!(slab[key1], 2);
+    /// assert_eq!(slab[key2], 1);
+    /// ```
+    pub unsafe fn get2_unchecked_mut(&mut self, key1: usize, key2: usize) -> (&mut T, &mut T) {
+        debug_assert_ne!(key1, key2);
+        let ptr = self.entries.as_mut_ptr();
+        let ptr1 = ptr.add(key1);
+        let ptr2 = ptr.add(key2);
+        match (&mut *ptr1, &mut *ptr2) {
+            (&mut Entry::Occupied(ref mut val1), &mut Entry::Occupied(ref mut val2)) => {
+                (val1, val2)
+            }
+            _ => unreachable!(),
+        }
+    }
+
+    /// Get the key for an element in the slab.
+    ///
+    /// The reference must point to an element owned by the slab.
+    /// Otherwise this function will panic.
+    /// This is a constant-time operation because the key can be calculated
+    /// from the reference with pointer arithmetic.
+    ///
+    /// # Panics
+    ///
+    /// This function will panic if the reference does not point to an element
+    /// of the slab.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use slab::*;
+    ///
+    /// let mut slab = Slab::new();
+    /// let key = slab.insert(String::from("foo"));
+    /// let value = &slab[key];
+    /// assert_eq!(slab.key_of(value), key);
+    /// ```
+    ///
+    /// Values are not compared, so passing a reference to a different location
+    /// will result in a panic:
+    ///
+    /// ```should_panic
+    /// # use slab::*;
+    ///
+    /// let mut slab = Slab::new();
+    /// let key = slab.insert(0);
+    /// let bad = &0;
+    /// slab.key_of(bad); // this will panic
+    /// unreachable!();
+    /// ```
+    #[cfg_attr(not(slab_no_track_caller), track_caller)]
+    pub fn key_of(&self, present_element: &T) -> usize {
+        let element_ptr = present_element as *const T as usize;
+        let base_ptr = self.entries.as_ptr() as usize;
+        // Use wrapping subtraction in case the reference is bad
+        let byte_offset = element_ptr.wrapping_sub(base_ptr);
+        // The division rounds away any offset of T inside Entry
+        // The size of Entry<T> is never zero even if T is due to Vacant(usize)
+        let key = byte_offset / mem::size_of::<Entry<T>>();
+        // Prevent returning unspecified (but out of bounds) values
+        if key >= self.entries.len() {
+            panic!("The reference points to a value outside this slab");
+        }
+        // The reference cannot point to a vacant entry, because then it would not be valid
+        key
+    }
+
+    /// Insert a value in the slab, returning key assigned to the value.
+    ///
+    /// The returned key can later be used to retrieve or remove the value using indexed
+    /// lookup and `remove`. Additional capacity is allocated if needed. See
+    /// [Capacity and reallocation](index.html#capacity-and-reallocation).
+    ///
+    /// # Panics
+    ///
+    /// Panics if the new storage in the vector exceeds `isize::MAX` bytes.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use slab::*;
+    /// let mut slab = Slab::new();
+    /// let key = slab.insert("hello");
+    /// assert_eq!(slab[key], "hello");
+    /// ```
+    pub fn insert(&mut self, val: T) -> usize {
+        let key = self.next;
+
+        self.insert_at(key, val);
+
+        key
+    }
+
+    /// Returns the key of the next vacant entry.
+    ///
+    /// This function returns the key of the vacant entry which  will be used
+    /// for the next insertion. This is equivalent to
+    /// `slab.vacant_entry().key()`, but it doesn't require mutable access.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use slab::*;
+    /// let mut slab = Slab::new();
+    /// assert_eq!(slab.vacant_key(), 0);
+    ///
+    /// slab.insert(0);
+    /// assert_eq!(slab.vacant_key(), 1);
+    ///
+    /// slab.insert(1);
+    /// slab.remove(0);
+    /// assert_eq!(slab.vacant_key(), 0);
+    /// ```
+    pub fn vacant_key(&self) -> usize {
+        self.next
+    }
+
+    /// Return a handle to a vacant entry allowing for further manipulation.
+    ///
+    /// This function is useful when creating values that must contain their
+    /// slab key. The returned `VacantEntry` reserves a slot in the slab and is
+    /// able to query the associated key.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use slab::*;
+    /// let mut slab = Slab::new();
+    ///
+    /// let hello = {
+    ///     let entry = slab.vacant_entry();
+    ///     let key = entry.key();
+    ///
+    ///     entry.insert((key, "hello"));
+    ///     key
+    /// };
+    ///
+    /// assert_eq!(hello, slab[hello].0);
+    /// assert_eq!("hello", slab[hello].1);
+    /// ```
+    pub fn vacant_entry(&mut self) -> VacantEntry<'_, T> {
+        VacantEntry {
+            key: self.next,
+            slab: self,
+        }
+    }
+
+    fn insert_at(&mut self, key: usize, val: T) {
+        self.len += 1;
+
+        if key == self.entries.len() {
+            self.entries.push(Entry::Occupied(val));
+            self.next = key + 1;
+        } else {
+            self.next = match self.entries.get(key) {
+                Some(&Entry::Vacant(next)) => next,
+                _ => unreachable!(),
+            };
+            self.entries[key] = Entry::Occupied(val);
+        }
+    }
+
+    /// Tries to remove the value associated with the given key,
+    /// returning the value if the key existed.
+    ///
+    /// The key is then released and may be associated with future stored
+    /// values.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use slab::*;
+    /// let mut slab = Slab::new();
+    ///
+    /// let hello = slab.insert("hello");
+    ///
+    /// assert_eq!(slab.try_remove(hello), Some("hello"));
+    /// assert!(!slab.contains(hello));
+    /// ```
+    pub fn try_remove(&mut self, key: usize) -> Option<T> {
+        if let Some(entry) = self.entries.get_mut(key) {
+            // Swap the entry at the provided value
+            let prev = mem::replace(entry, Entry::Vacant(self.next));
+
+            match prev {
+                Entry::Occupied(val) => {
+                    self.len -= 1;
+                    self.next = key;
+                    return val.into();
+                }
+                _ => {
+                    // Woops, the entry is actually vacant, restore the state
+                    *entry = prev;
+                }
+            }
+        }
+        None
+    }
+
+    /// Remove and return the value associated with the given key.
+    ///
+    /// The key is then released and may be associated with future stored
+    /// values.
+    ///
+    /// # Panics
+    ///
+    /// Panics if `key` is not associated with a value.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use slab::*;
+    /// let mut slab = Slab::new();
+    ///
+    /// let hello = slab.insert("hello");
+    ///
+    /// assert_eq!(slab.remove(hello), "hello");
+    /// assert!(!slab.contains(hello));
+    /// ```
+    #[cfg_attr(not(slab_no_track_caller), track_caller)]
+    pub fn remove(&mut self, key: usize) -> T {
+        self.try_remove(key).expect("invalid key")
+    }
+
+    /// Return `true` if a value is associated with the given key.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use slab::*;
+    /// let mut slab = Slab::new();
+    ///
+    /// let hello = slab.insert("hello");
+    /// assert!(slab.contains(hello));
+    ///
+    /// slab.remove(hello);
+    ///
+    /// assert!(!slab.contains(hello));
+    /// ```
+    pub fn contains(&self, key: usize) -> bool {
+        match self.entries.get(key) {
+            Some(&Entry::Occupied(_)) => true,
+            _ => false,
+        }
+    }
+
+    /// Retain only the elements specified by the predicate.
+    ///
+    /// In other words, remove all elements `e` such that `f(usize, &mut e)`
+    /// returns false. This method operates in place and preserves the key
+    /// associated with the retained values.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use slab::*;
+    /// let mut slab = Slab::new();
+    ///
+    /// let k1 = slab.insert(0);
+    /// let k2 = slab.insert(1);
+    /// let k3 = slab.insert(2);
+    ///
+    /// slab.retain(|key, val| key == k1 || *val == 1);
+    ///
+    /// assert!(slab.contains(k1));
+    /// assert!(slab.contains(k2));
+    /// assert!(!slab.contains(k3));
+    ///
+    /// assert_eq!(2, slab.len());
+    /// ```
+    pub fn retain<F>(&mut self, mut f: F)
+    where
+        F: FnMut(usize, &mut T) -> bool,
+    {
+        for i in 0..self.entries.len() {
+            let keep = match self.entries[i] {
+                Entry::Occupied(ref mut v) => f(i, v),
+                _ => true,
+            };
+
+            if !keep {
+                self.remove(i);
+            }
+        }
+    }
+
+    /// Return a draining iterator that removes all elements from the slab and
+    /// yields the removed items.
+    ///
+    /// Note: Elements are removed even if the iterator is only partially
+    /// consumed or not consumed at all.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use slab::*;
+    /// let mut slab = Slab::new();
+    ///
+    /// let _ = slab.insert(0);
+    /// let _ = slab.insert(1);
+    /// let _ = slab.insert(2);
+    ///
+    /// {
+    ///     let mut drain = slab.drain();
+    ///
+    ///     assert_eq!(Some(0), drain.next());
+    ///     assert_eq!(Some(1), drain.next());
+    ///     assert_eq!(Some(2), drain.next());
+    ///     assert_eq!(None, drain.next());
+    /// }
+    ///
+    /// assert!(slab.is_empty());
+    /// ```
+    pub fn drain(&mut self) -> Drain<'_, T> {
+        let old_len = self.len;
+        self.len = 0;
+        self.next = 0;
+        Drain {
+            inner: self.entries.drain(..),
+            len: old_len,
+        }
+    }
+}
+
+impl<T> ops::Index<usize> for Slab<T> {
+    type Output = T;
+
+    #[cfg_attr(not(slab_no_track_caller), track_caller)]
+    fn index(&self, key: usize) -> &T {
+        match self.entries.get(key) {
+            Some(Entry::Occupied(v)) => v,
+            _ => panic!("invalid key"),
+        }
+    }
+}
+
+impl<T> ops::IndexMut<usize> for Slab<T> {
+    #[cfg_attr(not(slab_no_track_caller), track_caller)]
+    fn index_mut(&mut self, key: usize) -> &mut T {
+        match self.entries.get_mut(key) {
+            Some(&mut Entry::Occupied(ref mut v)) => v,
+            _ => panic!("invalid key"),
+        }
+    }
+}
+
+impl<T> IntoIterator for Slab<T> {
+    type Item = (usize, T);
+    type IntoIter = IntoIter<T>;
+
+    fn into_iter(self) -> IntoIter<T> {
+        IntoIter {
+            entries: self.entries.into_iter().enumerate(),
+            len: self.len,
+        }
+    }
+}
+
+impl<'a, T> IntoIterator for &'a Slab<T> {
+    type Item = (usize, &'a T);
+    type IntoIter = Iter<'a, T>;
+
+    fn into_iter(self) -> Iter<'a, T> {
+        self.iter()
+    }
+}
+
+impl<'a, T> IntoIterator for &'a mut Slab<T> {
+    type Item = (usize, &'a mut T);
+    type IntoIter = IterMut<'a, T>;
+
+    fn into_iter(self) -> IterMut<'a, T> {
+        self.iter_mut()
+    }
+}
+
+/// Create a slab from an iterator of key-value pairs.
+///
+/// If the iterator produces duplicate keys, the previous value is replaced with the later one.
+/// The keys does not need to be sorted beforehand, and this function always
+/// takes O(n) time.
+/// Note that the returned slab will use space proportional to the largest key,
+/// so don't use `Slab` with untrusted keys.
+///
+/// # Examples
+///
+/// ```
+/// # use slab::*;
+///
+/// let vec = vec![(2,'a'), (6,'b'), (7,'c')];
+/// let slab = vec.into_iter().collect::<Slab<char>>();
+/// assert_eq!(slab.len(), 3);
+/// assert!(slab.capacity() >= 8);
+/// assert_eq!(slab[2], 'a');
+/// ```
+///
+/// With duplicate and unsorted keys:
+///
+/// ```
+/// # use slab::*;
+///
+/// let vec = vec![(20,'a'), (10,'b'), (11,'c'), (10,'d')];
+/// let slab = vec.into_iter().collect::<Slab<char>>();
+/// assert_eq!(slab.len(), 3);
+/// assert_eq!(slab[10], 'd');
+/// ```
+impl<T> FromIterator<(usize, T)> for Slab<T> {
+    fn from_iter<I>(iterable: I) -> Self
+    where
+        I: IntoIterator<Item = (usize, T)>,
+    {
+        let iterator = iterable.into_iter();
+        let mut builder = builder::Builder::with_capacity(iterator.size_hint().0);
+
+        for (key, value) in iterator {
+            builder.pair(key, value)
+        }
+        builder.build()
+    }
+}
+
+impl<T> fmt::Debug for Slab<T>
+where
+    T: fmt::Debug,
+{
+    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+        if fmt.alternate() {
+            fmt.debug_map().entries(self.iter()).finish()
+        } else {
+            fmt.debug_struct("Slab")
+                .field("len", &self.len)
+                .field("cap", &self.capacity())
+                .finish()
+        }
+    }
+}
+
+impl<T> fmt::Debug for IntoIter<T>
+where
+    T: fmt::Debug,
+{
+    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+        fmt.debug_struct("IntoIter")
+            .field("remaining", &self.len)
+            .finish()
+    }
+}
+
+impl<T> fmt::Debug for Iter<'_, T>
+where
+    T: fmt::Debug,
+{
+    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+        fmt.debug_struct("Iter")
+            .field("remaining", &self.len)
+            .finish()
+    }
+}
+
+impl<T> fmt::Debug for IterMut<'_, T>
+where
+    T: fmt::Debug,
+{
+    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+        fmt.debug_struct("IterMut")
+            .field("remaining", &self.len)
+            .finish()
+    }
+}
+
+impl<T> fmt::Debug for Drain<'_, T> {
+    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+        fmt.debug_struct("Drain").finish()
+    }
+}
+
+// ===== VacantEntry =====
+
+impl<'a, T> VacantEntry<'a, T> {
+    /// Insert a value in the entry, returning a mutable reference to the value.
+    ///
+    /// To get the key associated with the value, use `key` prior to calling
+    /// `insert`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use slab::*;
+    /// let mut slab = Slab::new();
+    ///
+    /// let hello = {
+    ///     let entry = slab.vacant_entry();
+    ///     let key = entry.key();
+    ///
+    ///     entry.insert((key, "hello"));
+    ///     key
+    /// };
+    ///
+    /// assert_eq!(hello, slab[hello].0);
+    /// assert_eq!("hello", slab[hello].1);
+    /// ```
+    pub fn insert(self, val: T) -> &'a mut T {
+        self.slab.insert_at(self.key, val);
+
+        match self.slab.entries.get_mut(self.key) {
+            Some(&mut Entry::Occupied(ref mut v)) => v,
+            _ => unreachable!(),
+        }
+    }
+
+    /// Return the key associated with this entry.
+    ///
+    /// A value stored in this entry will be associated with this key.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use slab::*;
+    /// let mut slab = Slab::new();
+    ///
+    /// let hello = {
+    ///     let entry = slab.vacant_entry();
+    ///     let key = entry.key();
+    ///
+    ///     entry.insert((key, "hello"));
+    ///     key
+    /// };
+    ///
+    /// assert_eq!(hello, slab[hello].0);
+    /// assert_eq!("hello", slab[hello].1);
+    /// ```
+    pub fn key(&self) -> usize {
+        self.key
+    }
+}
+
+// ===== IntoIter =====
+
+impl<T> Iterator for IntoIter<T> {
+    type Item = (usize, T);
+
+    fn next(&mut self) -> Option<Self::Item> {
+        for (key, entry) in &mut self.entries {
+            if let Entry::Occupied(v) = entry {
+                self.len -= 1;
+                return Some((key, v));
+            }
+        }
+
+        debug_assert_eq!(self.len, 0);
+        None
+    }
+
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        (self.len, Some(self.len))
+    }
+}
+
+impl<T> DoubleEndedIterator for IntoIter<T> {
+    fn next_back(&mut self) -> Option<Self::Item> {
+        while let Some((key, entry)) = self.entries.next_back() {
+            if let Entry::Occupied(v) = entry {
+                self.len -= 1;
+                return Some((key, v));
+            }
+        }
+
+        debug_assert_eq!(self.len, 0);
+        None
+    }
+}
+
+impl<T> ExactSizeIterator for IntoIter<T> {
+    fn len(&self) -> usize {
+        self.len
+    }
+}
+
+impl<T> FusedIterator for IntoIter<T> {}
+
+// ===== Iter =====
+
+impl<'a, T> Iterator for Iter<'a, T> {
+    type Item = (usize, &'a T);
+
+    fn next(&mut self) -> Option<Self::Item> {
+        for (key, entry) in &mut self.entries {
+            if let Entry::Occupied(ref v) = *entry {
+                self.len -= 1;
+                return Some((key, v));
+            }
+        }
+
+        debug_assert_eq!(self.len, 0);
+        None
+    }
+
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        (self.len, Some(self.len))
+    }
+}
+
+impl<T> DoubleEndedIterator for Iter<'_, T> {
+    fn next_back(&mut self) -> Option<Self::Item> {
+        while let Some((key, entry)) = self.entries.next_back() {
+            if let Entry::Occupied(ref v) = *entry {
+                self.len -= 1;
+                return Some((key, v));
+            }
+        }
+
+        debug_assert_eq!(self.len, 0);
+        None
+    }
+}
+
+impl<T> ExactSizeIterator for Iter<'_, T> {
+    fn len(&self) -> usize {
+        self.len
+    }
+}
+
+impl<T> FusedIterator for Iter<'_, T> {}
+
+// ===== IterMut =====
+
+impl<'a, T> Iterator for IterMut<'a, T> {
+    type Item = (usize, &'a mut T);
+
+    fn next(&mut self) -> Option<Self::Item> {
+        for (key, entry) in &mut self.entries {
+            if let Entry::Occupied(ref mut v) = *entry {
+                self.len -= 1;
+                return Some((key, v));
+            }
+        }
+
+        debug_assert_eq!(self.len, 0);
+        None
+    }
+
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        (self.len, Some(self.len))
+    }
+}
+
+impl<T> DoubleEndedIterator for IterMut<'_, T> {
+    fn next_back(&mut self) -> Option<Self::Item> {
+        while let Some((key, entry)) = self.entries.next_back() {
+            if let Entry::Occupied(ref mut v) = *entry {
+                self.len -= 1;
+                return Some((key, v));
+            }
+        }
+
+        debug_assert_eq!(self.len, 0);
+        None
+    }
+}
+
+impl<T> ExactSizeIterator for IterMut<'_, T> {
+    fn len(&self) -> usize {
+        self.len
+    }
+}
+
+impl<T> FusedIterator for IterMut<'_, T> {}
+
+// ===== Drain =====
+
+impl<T> Iterator for Drain<'_, T> {
+    type Item = T;
+
+    fn next(&mut self) -> Option<Self::Item> {
+        for entry in &mut self.inner {
+            if let Entry::Occupied(v) = entry {
+                self.len -= 1;
+                return Some(v);
+            }
+        }
+
+        debug_assert_eq!(self.len, 0);
+        None
+    }
+
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        (self.len, Some(self.len))
+    }
+}
+
+impl<T> DoubleEndedIterator for Drain<'_, T> {
+    fn next_back(&mut self) -> Option<Self::Item> {
+        while let Some(entry) = self.inner.next_back() {
+            if let Entry::Occupied(v) = entry {
+                self.len -= 1;
+                return Some(v);
+            }
+        }
+
+        debug_assert_eq!(self.len, 0);
+        None
+    }
+}
+
+impl<T> ExactSizeIterator for Drain<'_, T> {
+    fn len(&self) -> usize {
+        self.len
+    }
+}
+
+impl<T> FusedIterator for Drain<'_, T> {}
diff --git a/crates/slab/src/serde.rs b/crates/slab/src/serde.rs
new file mode 100644
index 0000000..894d59c
--- /dev/null
+++ b/crates/slab/src/serde.rs
@@ -0,0 +1,62 @@
+use core::fmt;
+use core::marker::PhantomData;
+
+use serde::de::{Deserialize, Deserializer, MapAccess, Visitor};
+use serde::ser::{Serialize, SerializeMap, Serializer};
+
+use super::{builder::Builder, Slab};
+
+impl<T> Serialize for Slab<T>
+where
+    T: Serialize,
+{
+    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+    where
+        S: Serializer,
+    {
+        let mut map_serializer = serializer.serialize_map(Some(self.len()))?;
+        for (key, value) in self {
+            map_serializer.serialize_key(&key)?;
+            map_serializer.serialize_value(value)?;
+        }
+        map_serializer.end()
+    }
+}
+
+struct SlabVisitor<T>(PhantomData<T>);
+
+impl<'de, T> Visitor<'de> for SlabVisitor<T>
+where
+    T: Deserialize<'de>,
+{
+    type Value = Slab<T>;
+
+    fn expecting(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(fmt, "a map")
+    }
+
+    fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error>
+    where
+        A: MapAccess<'de>,
+    {
+        let mut builder = Builder::with_capacity(map.size_hint().unwrap_or(0));
+
+        while let Some((key, value)) = map.next_entry()? {
+            builder.pair(key, value)
+        }
+
+        Ok(builder.build())
+    }
+}
+
+impl<'de, T> Deserialize<'de> for Slab<T>
+where
+    T: Deserialize<'de>,
+{
+    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+    where
+        D: Deserializer<'de>,
+    {
+        deserializer.deserialize_map(SlabVisitor(PhantomData))
+    }
+}
diff --git a/crates/slab/tests/serde.rs b/crates/slab/tests/serde.rs
new file mode 100644
index 0000000..1d4a204
--- /dev/null
+++ b/crates/slab/tests/serde.rs
@@ -0,0 +1,49 @@
+#![cfg(feature = "serde")]
+#![warn(rust_2018_idioms)]
+
+use serde::{Deserialize, Serialize};
+use serde_test::{assert_tokens, Token};
+use slab::Slab;
+
+#[derive(Debug, Serialize, Deserialize)]
+#[serde(transparent)]
+struct SlabPartialEq<T>(Slab<T>);
+
+impl<T: PartialEq> PartialEq for SlabPartialEq<T> {
+    fn eq(&self, other: &Self) -> bool {
+        self.0.len() == other.0.len()
+            && self
+                .0
+                .iter()
+                .zip(other.0.iter())
+                .all(|(this, other)| this.0 == other.0 && this.1 == other.1)
+    }
+}
+
+#[test]
+fn test_serde_empty() {
+    let slab = Slab::<usize>::new();
+    assert_tokens(
+        &SlabPartialEq(slab),
+        &[Token::Map { len: Some(0) }, Token::MapEnd],
+    );
+}
+
+#[test]
+fn test_serde() {
+    let vec = vec![(1, 2), (3, 4), (5, 6)];
+    let slab: Slab<_> = vec.iter().cloned().collect();
+    assert_tokens(
+        &SlabPartialEq(slab),
+        &[
+            Token::Map { len: Some(3) },
+            Token::U64(1),
+            Token::I32(2),
+            Token::U64(3),
+            Token::I32(4),
+            Token::U64(5),
+            Token::I32(6),
+            Token::MapEnd,
+        ],
+    );
+}
diff --git a/crates/slab/tests/slab.rs b/crates/slab/tests/slab.rs
new file mode 100644
index 0000000..e18b40e
--- /dev/null
+++ b/crates/slab/tests/slab.rs
@@ -0,0 +1,735 @@
+#![warn(rust_2018_idioms)]
+
+use slab::*;
+
+use std::panic::{catch_unwind, resume_unwind, AssertUnwindSafe};
+
+#[test]
+fn insert_get_remove_one() {
+    let mut slab = Slab::new();
+    assert!(slab.is_empty());
+
+    let key = slab.insert(10);
+
+    assert_eq!(slab[key], 10);
+    assert_eq!(slab.get(key), Some(&10));
+    assert!(!slab.is_empty());
+    assert!(slab.contains(key));
+
+    assert_eq!(slab.remove(key), 10);
+    assert!(!slab.contains(key));
+    assert!(slab.get(key).is_none());
+}
+
+#[test]
+fn insert_get_many() {
+    let mut slab = Slab::with_capacity(10);
+
+    for i in 0..10 {
+        let key = slab.insert(i + 10);
+        assert_eq!(slab[key], i + 10);
+    }
+
+    assert_eq!(slab.capacity(), 10);
+
+    // Storing another one grows the slab
+    let key = slab.insert(20);
+    assert_eq!(slab[key], 20);
+
+    // Capacity grows by 2x
+    assert_eq!(slab.capacity(), 20);
+}
+
+#[test]
+fn insert_get_remove_many() {
+    let mut slab = Slab::with_capacity(10);
+    let mut keys = vec![];
+
+    for i in 0..10 {
+        for j in 0..10 {
+            let val = (i * 10) + j;
+
+            let key = slab.insert(val);
+            keys.push((key, val));
+            assert_eq!(slab[key], val);
+        }
+
+        for (key, val) in keys.drain(..) {
+            assert_eq!(val, slab.remove(key));
+        }
+    }
+
+    assert_eq!(10, slab.capacity());
+}
+
+#[test]
+fn insert_with_vacant_entry() {
+    let mut slab = Slab::with_capacity(1);
+    let key;
+
+    {
+        let entry = slab.vacant_entry();
+        key = entry.key();
+        entry.insert(123);
+    }
+
+    assert_eq!(123, slab[key]);
+}
+
+#[test]
+fn get_vacant_entry_without_using() {
+    let mut slab = Slab::<usize>::with_capacity(1);
+    let key = slab.vacant_entry().key();
+    assert_eq!(key, slab.vacant_entry().key());
+}
+
+#[test]
+#[should_panic(expected = "invalid key")]
+fn invalid_get_panics() {
+    let slab = Slab::<usize>::with_capacity(1);
+    let _ = &slab[0];
+}
+
+#[test]
+#[should_panic(expected = "invalid key")]
+fn invalid_get_mut_panics() {
+    let mut slab = Slab::<usize>::new();
+    let _ = &mut slab[0];
+}
+
+#[test]
+#[should_panic(expected = "invalid key")]
+fn double_remove_panics() {
+    let mut slab = Slab::<usize>::with_capacity(1);
+    let key = slab.insert(123);
+    slab.remove(key);
+    slab.remove(key);
+}
+
+#[test]
+#[should_panic(expected = "invalid key")]
+fn invalid_remove_panics() {
+    let mut slab = Slab::<usize>::with_capacity(1);
+    slab.remove(0);
+}
+
+#[test]
+fn slab_get_mut() {
+    let mut slab = Slab::new();
+    let key = slab.insert(1);
+
+    slab[key] = 2;
+    assert_eq!(slab[key], 2);
+
+    *slab.get_mut(key).unwrap() = 3;
+    assert_eq!(slab[key], 3);
+}
+
+#[test]
+fn key_of_tagged() {
+    let mut slab = Slab::new();
+    slab.insert(0);
+    assert_eq!(slab.key_of(&slab[0]), 0);
+}
+
+#[test]
+fn key_of_layout_optimizable() {
+    // Entry<&str> doesn't need a discriminant tag because it can use the
+    // nonzero-ness of ptr and store Vacant's next at the same offset as len
+    let mut slab = Slab::new();
+    slab.insert("foo");
+    slab.insert("bar");
+    let third = slab.insert("baz");
+    slab.insert("quux");
+    assert_eq!(slab.key_of(&slab[third]), third);
+}
+
+#[test]
+fn key_of_zst() {
+    let mut slab = Slab::new();
+    slab.insert(());
+    let second = slab.insert(());
+    slab.insert(());
+    assert_eq!(slab.key_of(&slab[second]), second);
+}
+
+#[test]
+fn reserve_does_not_allocate_if_available() {
+    let mut slab = Slab::with_capacity(10);
+    let mut keys = vec![];
+
+    for i in 0..6 {
+        keys.push(slab.insert(i));
+    }
+
+    for key in 0..4 {
+        slab.remove(key);
+    }
+
+    assert!(slab.capacity() - slab.len() == 8);
+
+    slab.reserve(8);
+    assert_eq!(10, slab.capacity());
+}
+
+#[test]
+fn reserve_exact_does_not_allocate_if_available() {
+    let mut slab = Slab::with_capacity(10);
+    let mut keys = vec![];
+
+    for i in 0..6 {
+        keys.push(slab.insert(i));
+    }
+
+    for key in 0..4 {
+        slab.remove(key);
+    }
+
+    assert!(slab.capacity() - slab.len() == 8);
+
+    slab.reserve_exact(8);
+    assert_eq!(10, slab.capacity());
+}
+
+#[test]
+#[should_panic(expected = "capacity overflow")]
+fn reserve_does_panic_with_capacity_overflow() {
+    let mut slab = Slab::with_capacity(10);
+    slab.insert(true);
+    slab.reserve(std::isize::MAX as usize);
+}
+
+#[test]
+#[should_panic(expected = "capacity overflow")]
+fn reserve_does_panic_with_capacity_overflow_bytes() {
+    let mut slab = Slab::with_capacity(10);
+    slab.insert(1u16);
+    slab.reserve((std::isize::MAX as usize) / 2);
+}
+
+#[test]
+#[should_panic(expected = "capacity overflow")]
+fn reserve_exact_does_panic_with_capacity_overflow() {
+    let mut slab = Slab::with_capacity(10);
+    slab.insert(true);
+    slab.reserve_exact(std::isize::MAX as usize);
+}
+
+#[test]
+fn retain() {
+    let mut slab = Slab::with_capacity(2);
+
+    let key1 = slab.insert(0);
+    let key2 = slab.insert(1);
+
+    slab.retain(|key, x| {
+        assert_eq!(key, *x);
+        *x % 2 == 0
+    });
+
+    assert_eq!(slab.len(), 1);
+    assert_eq!(slab[key1], 0);
+    assert!(!slab.contains(key2));
+
+    // Ensure consistency is retained
+    let key = slab.insert(123);
+    assert_eq!(key, key2);
+
+    assert_eq!(2, slab.len());
+    assert_eq!(2, slab.capacity());
+
+    // Inserting another element grows
+    let key = slab.insert(345);
+    assert_eq!(key, 2);
+
+    assert_eq!(4, slab.capacity());
+}
+
+#[test]
+fn into_iter() {
+    let mut slab = Slab::new();
+
+    for i in 0..8 {
+        slab.insert(i);
+    }
+    slab.remove(0);
+    slab.remove(4);
+    slab.remove(5);
+    slab.remove(7);
+
+    let vals: Vec<_> = slab
+        .into_iter()
+        .inspect(|&(key, val)| assert_eq!(key, val))
+        .map(|(_, val)| val)
+        .collect();
+    assert_eq!(vals, vec![1, 2, 3, 6]);
+}
+
+#[test]
+fn into_iter_rev() {
+    let mut slab = Slab::new();
+
+    for i in 0..4 {
+        slab.insert(i);
+    }
+
+    let mut iter = slab.into_iter();
+    assert_eq!(iter.next_back(), Some((3, 3)));
+    assert_eq!(iter.next_back(), Some((2, 2)));
+    assert_eq!(iter.next(), Some((0, 0)));
+    assert_eq!(iter.next_back(), Some((1, 1)));
+    assert_eq!(iter.next_back(), None);
+    assert_eq!(iter.next(), None);
+}
+
+#[test]
+fn iter() {
+    let mut slab = Slab::new();
+
+    for i in 0..4 {
+        slab.insert(i);
+    }
+
+    let vals: Vec<_> = slab
+        .iter()
+        .enumerate()
+        .map(|(i, (key, val))| {
+            assert_eq!(i, key);
+            *val
+        })
+        .collect();
+    assert_eq!(vals, vec![0, 1, 2, 3]);
+
+    slab.remove(1);
+
+    let vals: Vec<_> = slab.iter().map(|(_, r)| *r).collect();
+    assert_eq!(vals, vec![0, 2, 3]);
+}
+
+#[test]
+fn iter_rev() {
+    let mut slab = Slab::new();
+
+    for i in 0..4 {
+        slab.insert(i);
+    }
+    slab.remove(0);
+
+    let vals = slab.iter().rev().collect::<Vec<_>>();
+    assert_eq!(vals, vec![(3, &3), (2, &2), (1, &1)]);
+}
+
+#[test]
+fn iter_mut() {
+    let mut slab = Slab::new();
+
+    for i in 0..4 {
+        slab.insert(i);
+    }
+
+    for (i, (key, e)) in slab.iter_mut().enumerate() {
+        assert_eq!(i, key);
+        *e += 1;
+    }
+
+    let vals: Vec<_> = slab.iter().map(|(_, r)| *r).collect();
+    assert_eq!(vals, vec![1, 2, 3, 4]);
+
+    slab.remove(2);
+
+    for (_, e) in slab.iter_mut() {
+        *e += 1;
+    }
+
+    let vals: Vec<_> = slab.iter().map(|(_, r)| *r).collect();
+    assert_eq!(vals, vec![2, 3, 5]);
+}
+
+#[test]
+fn iter_mut_rev() {
+    let mut slab = Slab::new();
+
+    for i in 0..4 {
+        slab.insert(i);
+    }
+    slab.remove(2);
+
+    {
+        let mut iter = slab.iter_mut();
+        assert_eq!(iter.next(), Some((0, &mut 0)));
+        let mut prev_key = !0;
+        for (key, e) in iter.rev() {
+            *e += 10;
+            assert!(prev_key > key);
+            prev_key = key;
+        }
+    }
+
+    assert_eq!(slab[0], 0);
+    assert_eq!(slab[1], 11);
+    assert_eq!(slab[3], 13);
+    assert!(!slab.contains(2));
+}
+
+#[test]
+fn from_iterator_sorted() {
+    let mut slab = (0..5).map(|i| (i, i)).collect::<Slab<_>>();
+    assert_eq!(slab.len(), 5);
+    assert_eq!(slab[0], 0);
+    assert_eq!(slab[2], 2);
+    assert_eq!(slab[4], 4);
+    assert_eq!(slab.vacant_entry().key(), 5);
+}
+
+#[test]
+fn from_iterator_new_in_order() {
+    // all new keys come in increasing order, but existing keys are overwritten
+    let mut slab = [(0, 'a'), (1, 'a'), (1, 'b'), (0, 'b'), (9, 'a'), (0, 'c')]
+        .iter()
+        .cloned()
+        .collect::<Slab<_>>();
+    assert_eq!(slab.len(), 3);
+    assert_eq!(slab[0], 'c');
+    assert_eq!(slab[1], 'b');
+    assert_eq!(slab[9], 'a');
+    assert_eq!(slab.get(5), None);
+    assert_eq!(slab.vacant_entry().key(), 8);
+}
+
+#[test]
+fn from_iterator_unordered() {
+    let mut slab = vec![(1, "one"), (50, "fifty"), (3, "three"), (20, "twenty")]
+        .into_iter()
+        .collect::<Slab<_>>();
+    assert_eq!(slab.len(), 4);
+    assert_eq!(slab.vacant_entry().key(), 0);
+    let mut iter = slab.iter();
+    assert_eq!(iter.next(), Some((1, &"one")));
+    assert_eq!(iter.next(), Some((3, &"three")));
+    assert_eq!(iter.next(), Some((20, &"twenty")));
+    assert_eq!(iter.next(), Some((50, &"fifty")));
+    assert_eq!(iter.next(), None);
+}
+
+// https://github.com/tokio-rs/slab/issues/100
+#[test]
+fn from_iterator_issue_100() {
+    let mut slab: slab::Slab<()> = vec![(1, ())].into_iter().collect();
+    assert_eq!(slab.len(), 1);
+    assert_eq!(slab.insert(()), 0);
+    assert_eq!(slab.insert(()), 2);
+    assert_eq!(slab.insert(()), 3);
+
+    let mut slab: slab::Slab<()> = vec![(1, ()), (2, ())].into_iter().collect();
+    assert_eq!(slab.len(), 2);
+    assert_eq!(slab.insert(()), 0);
+    assert_eq!(slab.insert(()), 3);
+    assert_eq!(slab.insert(()), 4);
+
+    let mut slab: slab::Slab<()> = vec![(1, ()), (3, ())].into_iter().collect();
+    assert_eq!(slab.len(), 2);
+    assert_eq!(slab.insert(()), 2);
+    assert_eq!(slab.insert(()), 0);
+    assert_eq!(slab.insert(()), 4);
+
+    let mut slab: slab::Slab<()> = vec![(0, ()), (2, ()), (3, ()), (5, ())]
+        .into_iter()
+        .collect();
+    assert_eq!(slab.len(), 4);
+    assert_eq!(slab.insert(()), 4);
+    assert_eq!(slab.insert(()), 1);
+    assert_eq!(slab.insert(()), 6);
+}
+
+#[test]
+fn clear() {
+    let mut slab = Slab::new();
+
+    for i in 0..4 {
+        slab.insert(i);
+    }
+
+    // clear full
+    slab.clear();
+    assert!(slab.is_empty());
+
+    assert_eq!(0, slab.len());
+    assert_eq!(4, slab.capacity());
+
+    for i in 0..2 {
+        slab.insert(i);
+    }
+
+    let vals: Vec<_> = slab.iter().map(|(_, r)| *r).collect();
+    assert_eq!(vals, vec![0, 1]);
+
+    // clear half-filled
+    slab.clear();
+    assert!(slab.is_empty());
+}
+
+#[test]
+fn shrink_to_fit_empty() {
+    let mut slab = Slab::<bool>::with_capacity(20);
+    slab.shrink_to_fit();
+    assert_eq!(slab.capacity(), 0);
+}
+
+#[test]
+fn shrink_to_fit_no_vacant() {
+    let mut slab = Slab::with_capacity(20);
+    slab.insert(String::new());
+    slab.shrink_to_fit();
+    assert!(slab.capacity() < 10);
+}
+
+#[test]
+fn shrink_to_fit_doesnt_move() {
+    let mut slab = Slab::with_capacity(8);
+    slab.insert("foo");
+    let bar = slab.insert("bar");
+    slab.insert("baz");
+    let quux = slab.insert("quux");
+    slab.remove(quux);
+    slab.remove(bar);
+    slab.shrink_to_fit();
+    assert_eq!(slab.len(), 2);
+    assert!(slab.capacity() >= 3);
+    assert_eq!(slab.get(0), Some(&"foo"));
+    assert_eq!(slab.get(2), Some(&"baz"));
+    assert_eq!(slab.vacant_entry().key(), bar);
+}
+
+#[test]
+fn shrink_to_fit_doesnt_recreate_list_when_nothing_can_be_done() {
+    let mut slab = Slab::with_capacity(16);
+    for i in 0..4 {
+        slab.insert(Box::new(i));
+    }
+    slab.remove(0);
+    slab.remove(2);
+    slab.remove(1);
+    assert_eq!(slab.vacant_entry().key(), 1);
+    slab.shrink_to_fit();
+    assert_eq!(slab.len(), 1);
+    assert!(slab.capacity() >= 4);
+    assert_eq!(slab.vacant_entry().key(), 1);
+}
+
+#[test]
+fn compact_empty() {
+    let mut slab = Slab::new();
+    slab.compact(|_, _, _| panic!());
+    assert_eq!(slab.len(), 0);
+    assert_eq!(slab.capacity(), 0);
+    slab.reserve(20);
+    slab.compact(|_, _, _| panic!());
+    assert_eq!(slab.len(), 0);
+    assert_eq!(slab.capacity(), 0);
+    slab.insert(0);
+    slab.insert(1);
+    slab.insert(2);
+    slab.remove(1);
+    slab.remove(2);
+    slab.remove(0);
+    slab.compact(|_, _, _| panic!());
+    assert_eq!(slab.len(), 0);
+    assert_eq!(slab.capacity(), 0);
+}
+
+#[test]
+fn compact_no_moves_needed() {
+    let mut slab = Slab::new();
+    for i in 0..10 {
+        slab.insert(i);
+    }
+    slab.remove(8);
+    slab.remove(9);
+    slab.remove(6);
+    slab.remove(7);
+    slab.compact(|_, _, _| panic!());
+    assert_eq!(slab.len(), 6);
+    for ((index, &value), want) in slab.iter().zip(0..6) {
+        assert!(index == value);
+        assert_eq!(index, want);
+    }
+    assert!(slab.capacity() >= 6 && slab.capacity() < 10);
+}
+
+#[test]
+fn compact_moves_successfully() {
+    let mut slab = Slab::with_capacity(20);
+    for i in 0..10 {
+        slab.insert(i);
+    }
+    for &i in &[0, 5, 9, 6, 3] {
+        slab.remove(i);
+    }
+    let mut moved = 0;
+    slab.compact(|&mut v, from, to| {
+        assert!(from > to);
+        assert!(from >= 5);
+        assert!(to < 5);
+        assert_eq!(from, v);
+        moved += 1;
+        true
+    });
+    assert_eq!(slab.len(), 5);
+    assert_eq!(moved, 2);
+    assert_eq!(slab.vacant_entry().key(), 5);
+    assert!(slab.capacity() >= 5 && slab.capacity() < 20);
+    let mut iter = slab.iter();
+    assert_eq!(iter.next(), Some((0, &8)));
+    assert_eq!(iter.next(), Some((1, &1)));
+    assert_eq!(iter.next(), Some((2, &2)));
+    assert_eq!(iter.next(), Some((3, &7)));
+    assert_eq!(iter.next(), Some((4, &4)));
+    assert_eq!(iter.next(), None);
+}
+
+#[test]
+fn compact_doesnt_move_if_closure_errors() {
+    let mut slab = Slab::with_capacity(20);
+    for i in 0..10 {
+        slab.insert(i);
+    }
+    for &i in &[9, 3, 1, 4, 0] {
+        slab.remove(i);
+    }
+    slab.compact(|&mut v, from, to| {
+        assert!(from > to);
+        assert_eq!(from, v);
+        v != 6
+    });
+    assert_eq!(slab.len(), 5);
+    assert!(slab.capacity() >= 7 && slab.capacity() < 20);
+    assert_eq!(slab.vacant_entry().key(), 3);
+    let mut iter = slab.iter();
+    assert_eq!(iter.next(), Some((0, &8)));
+    assert_eq!(iter.next(), Some((1, &7)));
+    assert_eq!(iter.next(), Some((2, &2)));
+    assert_eq!(iter.next(), Some((5, &5)));
+    assert_eq!(iter.next(), Some((6, &6)));
+    assert_eq!(iter.next(), None);
+}
+
+#[test]
+// Android aborts on panic and this test relies on stack unwinding.
+#[cfg(not(target_os = "android"))]
+fn compact_handles_closure_panic() {
+    let mut slab = Slab::new();
+    for i in 0..10 {
+        slab.insert(i);
+    }
+    for i in 1..6 {
+        slab.remove(i);
+    }
+    let result = catch_unwind(AssertUnwindSafe(|| {
+        slab.compact(|&mut v, from, to| {
+            assert!(from > to);
+            assert_eq!(from, v);
+            if v == 7 {
+                panic!("test");
+            }
+            true
+        })
+    }));
+    match result {
+        Err(ref payload) if payload.downcast_ref() == Some(&"test") => {}
+        Err(bug) => resume_unwind(bug),
+        Ok(()) => unreachable!(),
+    }
+    assert_eq!(slab.len(), 5 - 1);
+    assert_eq!(slab.vacant_entry().key(), 3);
+    let mut iter = slab.iter();
+    assert_eq!(iter.next(), Some((0, &0)));
+    assert_eq!(iter.next(), Some((1, &9)));
+    assert_eq!(iter.next(), Some((2, &8)));
+    assert_eq!(iter.next(), Some((6, &6)));
+    assert_eq!(iter.next(), None);
+}
+
+#[test]
+fn fully_consumed_drain() {
+    let mut slab = Slab::new();
+
+    for i in 0..3 {
+        slab.insert(i);
+    }
+
+    {
+        let mut drain = slab.drain();
+        assert_eq!(Some(0), drain.next());
+        assert_eq!(Some(1), drain.next());
+        assert_eq!(Some(2), drain.next());
+        assert_eq!(None, drain.next());
+    }
+
+    assert!(slab.is_empty());
+}
+
+#[test]
+fn partially_consumed_drain() {
+    let mut slab = Slab::new();
+
+    for i in 0..3 {
+        slab.insert(i);
+    }
+
+    {
+        let mut drain = slab.drain();
+        assert_eq!(Some(0), drain.next());
+    }
+
+    assert!(slab.is_empty())
+}
+
+#[test]
+fn drain_rev() {
+    let mut slab = Slab::new();
+    for i in 0..10 {
+        slab.insert(i);
+    }
+    slab.remove(9);
+
+    let vals: Vec<u64> = slab.drain().rev().collect();
+    assert_eq!(vals, (0..9).rev().collect::<Vec<u64>>());
+}
+
+#[test]
+fn try_remove() {
+    let mut slab = Slab::new();
+
+    let key = slab.insert(1);
+
+    assert_eq!(slab.try_remove(key), Some(1));
+    assert_eq!(slab.try_remove(key), None);
+    assert_eq!(slab.get(key), None);
+}
+
+#[rustversion::since(1.39)]
+#[test]
+fn const_new() {
+    static _SLAB: Slab<()> = Slab::new();
+}
+
+#[test]
+fn clone_from() {
+    let mut slab1 = Slab::new();
+    let mut slab2 = Slab::new();
+    for i in 0..5 {
+        slab1.insert(i);
+        slab2.insert(2 * i);
+        slab2.insert(2 * i + 1);
+    }
+    slab1.remove(1);
+    slab1.remove(3);
+    slab2.clone_from(&slab1);
+
+    let mut iter2 = slab2.iter();
+    assert_eq!(iter2.next(), Some((0, &0)));
+    assert_eq!(iter2.next(), Some((2, &2)));
+    assert_eq!(iter2.next(), Some((4, &4)));
+    assert_eq!(iter2.next(), None);
+    assert!(slab2.capacity() >= 10);
+}
diff --git a/crates/smallvec/.cargo-checksum.json b/crates/smallvec/.cargo-checksum.json
new file mode 100644
index 0000000..bdb1fcd
--- /dev/null
+++ b/crates/smallvec/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.toml":"668bb964a243127d65605bb7a0d8d3c81bcbd8f7656a5b5734766ef534b4abcb","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0b28172679e0009b655da42797c03fd163a3379d5cfa67ba1f1655e974a2a1a9","README.md":"a01127c37308457e8d396b176fb790846be0978c173be3f13260b62efcef011b","benches/bench.rs":"d82015eae942ee5cf74ace8c3c260ee2c6b5bcbeeb87254d2c72622c747a708a","debug_metadata/README.md":"4d7f1c1b2c25ce2231ef71864d06e54323867459035b53bc9e00f66a0a44f82e","debug_metadata/smallvec.natvis":"3092ddebd8fffc3486536d7f27f8c5eae3a8a093d45cd8eeb3946ea2b0c35a15","scripts/run_miri.sh":"74a9f9adc43f986e81977b03846f7dd00122a0150bd8ec3fe4842a1a787e0f07","src/arbitrary.rs":"22e55cfbf60374945b30e6d0855129eff67cd8b878cef6fa997e1f4be67b9e3d","src/lib.rs":"25fe85b6ae7b3972211bf57aeded4c7b72c47e4d843c7a4ba66908442197b5a0","src/specialization.rs":"46433586203399251cba496d67b88d34e1be3c2b591986b77463513da1c66471","src/tests.rs":"29c6e5dad62ebfea74e5116ac4a344b127b91cfb769fe9ba8b02b53773cf7ec8","tests/debugger_visualizer.rs":"185456ad253957fc0c9e904ff8a1135397ac991c29fa3c60f75d8d81f7463022","tests/macro.rs":"22ad4f6f104a599fdcba19cad8834105b8656b212fb6c7573a427d447f5db14f"},"package":"e6ecd384b10a64542d77071bd64bd7b231f4ed5940fba55e98c3de13824cf3d7"}
\ No newline at end of file
diff --git a/crates/smallvec/Android.bp b/crates/smallvec/Android.bp
new file mode 100644
index 0000000..3c94ecc
--- /dev/null
+++ b/crates/smallvec/Android.bp
@@ -0,0 +1,32 @@
+// This file is generated by cargo_embargo.
+// Do not modify this file because the changes will be overridden on upgrade.
+
+package {
+    default_applicable_licenses: ["external_rust_crates_smallvec_license"],
+    default_team: "trendy_team_android_rust",
+}
+
+license {
+    name: "external_rust_crates_smallvec_license",
+    visibility: [":__subpackages__"],
+    license_kinds: ["SPDX-license-identifier-Apache-2.0"],
+    license_text: ["LICENSE"],
+}
+
+rust_library {
+    name: "libsmallvec",
+    host_supported: true,
+    crate_name: "smallvec",
+    cargo_env_compat: true,
+    cargo_pkg_version: "1.13.1",
+    crate_root: "src/lib.rs",
+    edition: "2018",
+    features: ["write"],
+    apex_available: [
+        "//apex_available:platform",
+        "//apex_available:anyapex",
+    ],
+    product_available: true,
+    vendor_available: true,
+    min_sdk_version: "29",
+}
diff --git a/crates/smallvec/Cargo.lock b/crates/smallvec/Cargo.lock
new file mode 100644
index 0000000..a557f83
--- /dev/null
+++ b/crates/smallvec/Cargo.lock
@@ -0,0 +1,174 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "aho-corasick"
+version = "1.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916"
+dependencies = [
+ "memchr",
+]
+
+[[package]]
+name = "anyhow"
+version = "1.0.86"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da"
+
+[[package]]
+name = "arbitrary"
+version = "1.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7d5a26814d8dcb93b0e5a0ff3c6d80a8843bafb21b39e8e18a6f05471870e110"
+
+[[package]]
+name = "bincode"
+version = "1.3.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad"
+dependencies = [
+ "serde",
+]
+
+[[package]]
+name = "debugger_test"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d95bb55f592fbb86947bee426d831de84bd65602a54f5cdcb10bfa70a62e52a0"
+dependencies = [
+ "anyhow",
+ "log",
+ "quote",
+ "syn 1.0.109",
+]
+
+[[package]]
+name = "debugger_test_parser"
+version = "0.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ebe536452a777752b9316f0c840afbb94a2411684d4f15c081449ea801ef9e75"
+dependencies = [
+ "anyhow",
+ "log",
+ "regex",
+]
+
+[[package]]
+name = "log"
+version = "0.4.22"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
+
+[[package]]
+name = "memchr"
+version = "2.7.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3"
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.86"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77"
+dependencies = [
+ "unicode-ident",
+]
+
+[[package]]
+name = "quote"
+version = "1.0.37"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af"
+dependencies = [
+ "proc-macro2",
+]
+
+[[package]]
+name = "regex"
+version = "1.10.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619"
+dependencies = [
+ "aho-corasick",
+ "memchr",
+ "regex-automata",
+ "regex-syntax",
+]
+
+[[package]]
+name = "regex-automata"
+version = "0.4.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df"
+dependencies = [
+ "aho-corasick",
+ "memchr",
+ "regex-syntax",
+]
+
+[[package]]
+name = "regex-syntax"
+version = "0.8.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b"
+
+[[package]]
+name = "serde"
+version = "1.0.209"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "99fce0ffe7310761ca6bf9faf5115afbc19688edd00171d81b1bb1b116c63e09"
+dependencies = [
+ "serde_derive",
+]
+
+[[package]]
+name = "serde_derive"
+version = "1.0.209"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a5831b979fd7b5439637af1752d535ff49f4860c0f341d1baeb6faf0f4242170"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.76",
+]
+
+[[package]]
+name = "smallvec"
+version = "1.13.1"
+dependencies = [
+ "arbitrary",
+ "bincode",
+ "debugger_test",
+ "debugger_test_parser",
+ "serde",
+]
+
+[[package]]
+name = "syn"
+version = "1.0.109"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
+]
+
+[[package]]
+name = "syn"
+version = "2.0.76"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "578e081a14e0cefc3279b0472138c513f37b41a08d5a3cca9b6e4e8ceb6cd525"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
+]
+
+[[package]]
+name = "unicode-ident"
+version = "1.0.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
diff --git a/crates/smallvec/Cargo.toml b/crates/smallvec/Cargo.toml
new file mode 100644
index 0000000..baba153
--- /dev/null
+++ b/crates/smallvec/Cargo.toml
@@ -0,0 +1,72 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2018"
+name = "smallvec"
+version = "1.13.1"
+authors = ["The Servo Project Developers"]
+description = "'Small vector' optimization: store up to a small number of items on the stack"
+documentation = "https://docs.rs/smallvec/"
+readme = "README.md"
+keywords = [
+    "small",
+    "vec",
+    "vector",
+    "stack",
+    "no_std",
+]
+categories = ["data-structures"]
+license = "MIT OR Apache-2.0"
+repository = "https://github.com/servo/rust-smallvec"
+
+[package.metadata.docs.rs]
+all-features = true
+rustdoc-args = [
+    "--cfg",
+    "docsrs",
+    "--generate-link-to-definition",
+]
+
+[[test]]
+name = "debugger_visualizer"
+path = "tests/debugger_visualizer.rs"
+test = false
+required-features = ["debugger_visualizer"]
+
+[dependencies.arbitrary]
+version = "1"
+optional = true
+
+[dependencies.serde]
+version = "1"
+optional = true
+default-features = false
+
+[dev-dependencies.bincode]
+version = "1.0.1"
+
+[dev-dependencies.debugger_test]
+version = "0.1.0"
+
+[dev-dependencies.debugger_test_parser]
+version = "0.1.0"
+
+[features]
+const_generics = []
+const_new = ["const_generics"]
+debugger_visualizer = []
+drain_filter = []
+drain_keep_rest = ["drain_filter"]
+may_dangle = []
+specialization = []
+union = []
+write = []
diff --git a/crates/smallvec/LICENSE b/crates/smallvec/LICENSE
new file mode 120000
index 0000000..6b579aa
--- /dev/null
+++ b/crates/smallvec/LICENSE
@@ -0,0 +1 @@
+LICENSE-APACHE
\ No newline at end of file
diff --git a/crates/smallvec/LICENSE-APACHE b/crates/smallvec/LICENSE-APACHE
new file mode 100644
index 0000000..16fe87b
--- /dev/null
+++ b/crates/smallvec/LICENSE-APACHE
@@ -0,0 +1,201 @@
+                              Apache License
+                        Version 2.0, January 2004
+                     http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+   "License" shall mean the terms and conditions for use, reproduction,
+   and distribution as defined by Sections 1 through 9 of this document.
+
+   "Licensor" shall mean the copyright owner or entity authorized by
+   the copyright owner that is granting the License.
+
+   "Legal Entity" shall mean the union of the acting entity and all
+   other entities that control, are controlled by, or are under common
+   control with that entity. For the purposes of this definition,
+   "control" means (i) the power, direct or indirect, to cause the
+   direction or management of such entity, whether by contract or
+   otherwise, or (ii) ownership of fifty percent (50%) or more of the
+   outstanding shares, or (iii) beneficial ownership of such entity.
+
+   "You" (or "Your") shall mean an individual or Legal Entity
+   exercising permissions granted by this License.
+
+   "Source" form shall mean the preferred form for making modifications,
+   including but not limited to software source code, documentation
+   source, and configuration files.
+
+   "Object" form shall mean any form resulting from mechanical
+   transformation or translation of a Source form, including but
+   not limited to compiled object code, generated documentation,
+   and conversions to other media types.
+
+   "Work" shall mean the work of authorship, whether in Source or
+   Object form, made available under the License, as indicated by a
+   copyright notice that is included in or attached to the work
+   (an example is provided in the Appendix below).
+
+   "Derivative Works" shall mean any work, whether in Source or Object
+   form, that is based on (or derived from) the Work and for which the
+   editorial revisions, annotations, elaborations, or other modifications
+   represent, as a whole, an original work of authorship. For the purposes
+   of this License, Derivative Works shall not include works that remain
+   separable from, or merely link (or bind by name) to the interfaces of,
+   the Work and Derivative Works thereof.
+
+   "Contribution" shall mean any work of authorship, including
+   the original version of the Work and any modifications or additions
+   to that Work or Derivative Works thereof, that is intentionally
+   submitted to Licensor for inclusion in the Work by the copyright owner
+   or by an individual or Legal Entity authorized to submit on behalf of
+   the copyright owner. For the purposes of this definition, "submitted"
+   means any form of electronic, verbal, or written communication sent
+   to the Licensor or its representatives, including but not limited to
+   communication on electronic mailing lists, source code control systems,
+   and issue tracking systems that are managed by, or on behalf of, the
+   Licensor for the purpose of discussing and improving the Work, but
+   excluding communication that is conspicuously marked or otherwise
+   designated in writing by the copyright owner as "Not a Contribution."
+
+   "Contributor" shall mean Licensor and any individual or Legal Entity
+   on behalf of whom a Contribution has been received by Licensor and
+   subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+   this License, each Contributor hereby grants to You a perpetual,
+   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+   copyright license to reproduce, prepare Derivative Works of,
+   publicly display, publicly perform, sublicense, and distribute the
+   Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+   this License, each Contributor hereby grants to You a perpetual,
+   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+   (except as stated in this section) patent license to make, have made,
+   use, offer to sell, sell, import, and otherwise transfer the Work,
+   where such license applies only to those patent claims licensable
+   by such Contributor that are necessarily infringed by their
+   Contribution(s) alone or by combination of their Contribution(s)
+   with the Work to which such Contribution(s) was submitted. If You
+   institute patent litigation against any entity (including a
+   cross-claim or counterclaim in a lawsuit) alleging that the Work
+   or a Contribution incorporated within the Work constitutes direct
+   or contributory patent infringement, then any patent licenses
+   granted to You under this License for that Work shall terminate
+   as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+   Work or Derivative Works thereof in any medium, with or without
+   modifications, and in Source or Object form, provided that You
+   meet the following conditions:
+
+   (a) You must give any other recipients of the Work or
+       Derivative Works a copy of this License; and
+
+   (b) You must cause any modified files to carry prominent notices
+       stating that You changed the files; and
+
+   (c) You must retain, in the Source form of any Derivative Works
+       that You distribute, all copyright, patent, trademark, and
+       attribution notices from the Source form of the Work,
+       excluding those notices that do not pertain to any part of
+       the Derivative Works; and
+
+   (d) If the Work includes a "NOTICE" text file as part of its
+       distribution, then any Derivative Works that You distribute must
+       include a readable copy of the attribution notices contained
+       within such NOTICE file, excluding those notices that do not
+       pertain to any part of the Derivative Works, in at least one
+       of the following places: within a NOTICE text file distributed
+       as part of the Derivative Works; within the Source form or
+       documentation, if provided along with the Derivative Works; or,
+       within a display generated by the Derivative Works, if and
+       wherever such third-party notices normally appear. The contents
+       of the NOTICE file are for informational purposes only and
+       do not modify the License. You may add Your own attribution
+       notices within Derivative Works that You distribute, alongside
+       or as an addendum to the NOTICE text from the Work, provided
+       that such additional attribution notices cannot be construed
+       as modifying the License.
+
+   You may add Your own copyright statement to Your modifications and
+   may provide additional or different license terms and conditions
+   for use, reproduction, or distribution of Your modifications, or
+   for any such Derivative Works as a whole, provided Your use,
+   reproduction, and distribution of the Work otherwise complies with
+   the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+   any Contribution intentionally submitted for inclusion in the Work
+   by You to the Licensor shall be under the terms and conditions of
+   this License, without any additional terms or conditions.
+   Notwithstanding the above, nothing herein shall supersede or modify
+   the terms of any separate license agreement you may have executed
+   with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+   names, trademarks, service marks, or product names of the Licensor,
+   except as required for reasonable and customary use in describing the
+   origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+   agreed to in writing, Licensor provides the Work (and each
+   Contributor provides its Contributions) on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+   implied, including, without limitation, any warranties or conditions
+   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+   PARTICULAR PURPOSE. You are solely responsible for determining the
+   appropriateness of using or redistributing the Work and assume any
+   risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+   whether in tort (including negligence), contract, or otherwise,
+   unless required by applicable law (such as deliberate and grossly
+   negligent acts) or agreed to in writing, shall any Contributor be
+   liable to You for damages, including any direct, indirect, special,
+   incidental, or consequential damages of any character arising as a
+   result of this License or out of the use or inability to use the
+   Work (including but not limited to damages for loss of goodwill,
+   work stoppage, computer failure or malfunction, or any and all
+   other commercial damages or losses), even if such Contributor
+   has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+   the Work or Derivative Works thereof, You may choose to offer,
+   and charge a fee for, acceptance of support, warranty, indemnity,
+   or other liability obligations and/or rights consistent with this
+   License. However, in accepting such obligations, You may act only
+   on Your own behalf and on Your sole responsibility, not on behalf
+   of any other Contributor, and only if You agree to indemnify,
+   defend, and hold each Contributor harmless for any liability
+   incurred by, or claims asserted against, such Contributor by reason
+   of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+   To apply the Apache License to your work, attach the following
+   boilerplate notice, with the fields enclosed by brackets "[]"
+   replaced with your own identifying information. (Don't include
+   the brackets!)  The text should be enclosed in the appropriate
+   comment syntax for the file format. We also recommend that a
+   file or class name and description of purpose be included on the
+   same "printed page" as the copyright notice for easier
+   identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+	http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/crates/smallvec/LICENSE-MIT b/crates/smallvec/LICENSE-MIT
new file mode 100644
index 0000000..9729c12
--- /dev/null
+++ b/crates/smallvec/LICENSE-MIT
@@ -0,0 +1,25 @@
+Copyright (c) 2018 The Servo Project Developers
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/crates/smallvec/METADATA b/crates/smallvec/METADATA
new file mode 100644
index 0000000..6449035
--- /dev/null
+++ b/crates/smallvec/METADATA
@@ -0,0 +1,20 @@
+# This project was upgraded with external_updater.
+# Usage: tools/external_updater/updater.sh update external/rust/crates/smallvec
+# For more info, check https://cs.android.com/android/platform/superproject/+/main:tools/external_updater/README.md
+
+name: "smallvec"
+description: "\'Small vector\' optimization: store up to a small number of items on the stack"
+third_party {
+  license_type: NOTICE
+  last_upgrade_date {
+    year: 2024
+    month: 2
+    day: 5
+  }
+  homepage: "https://crates.io/crates/smallvec"
+  identifier {
+    type: "Archive"
+    value: "https://static.crates.io/crates/smallvec/smallvec-1.13.1.crate"
+    version: "1.13.1"
+  }
+}
diff --git a/crates/smallvec/MODULE_LICENSE_APACHE2 b/crates/smallvec/MODULE_LICENSE_APACHE2
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/crates/smallvec/MODULE_LICENSE_APACHE2
diff --git a/crates/smallvec/README.md b/crates/smallvec/README.md
new file mode 100644
index 0000000..724637c
--- /dev/null
+++ b/crates/smallvec/README.md
@@ -0,0 +1,26 @@
+rust-smallvec
+=============
+
+[Documentation](https://docs.rs/smallvec/)
+
+[Release notes](https://github.com/servo/rust-smallvec/releases)
+
+"Small vector" optimization for Rust: store up to a small number of items on the stack
+
+## Example
+
+```rust
+use smallvec::{SmallVec, smallvec};
+    
+// This SmallVec can hold up to 4 items on the stack:
+let mut v: SmallVec<[i32; 4]> = smallvec![1, 2, 3, 4];
+
+// It will automatically move its contents to the heap if
+// contains more than four items:
+v.push(5);
+
+// SmallVec points to a slice, so you can use normal slice
+// indexing and other methods to access its contents:
+v[0] = v[1] + v[2];
+v.sort();
+```
diff --git a/crates/smallvec/TEST_MAPPING b/crates/smallvec/TEST_MAPPING
new file mode 100644
index 0000000..71ccfc4
--- /dev/null
+++ b/crates/smallvec/TEST_MAPPING
@@ -0,0 +1,29 @@
+// Generated by update_crate_tests.py for tests that depend on this crate.
+{
+  "imports": [
+    {
+      "path": "external/rust/crates/parking_lot_core"
+    },
+    {
+      "path": "external/rust/crates/tinyvec"
+    },
+    {
+      "path": "external/rust/crates/vulkano"
+    },
+    {
+      "path": "packages/modules/Virtualization/authfs"
+    },
+    {
+      "path": "packages/modules/Virtualization/virtualizationmanager"
+    },
+    {
+      "path": "packages/modules/Virtualization/zipfuse"
+    },
+    {
+      "path": "system/security/keystore2"
+    },
+    {
+      "path": "system/security/keystore2/legacykeystore"
+    }
+  ]
+}
diff --git a/crates/smallvec/benches/bench.rs b/crates/smallvec/benches/bench.rs
new file mode 100644
index 0000000..b6a8b85
--- /dev/null
+++ b/crates/smallvec/benches/bench.rs
@@ -0,0 +1,312 @@
+#![feature(test)]
+#![allow(deprecated)]
+
+extern crate test;
+
+use self::test::Bencher;
+use smallvec::{ExtendFromSlice, smallvec, SmallVec};
+
+const VEC_SIZE: usize = 16;
+const SPILLED_SIZE: usize = 100;
+
+trait Vector<T>: for<'a> From<&'a [T]> + Extend<T> + ExtendFromSlice<T> {
+    fn new() -> Self;
+    fn push(&mut self, val: T);
+    fn pop(&mut self) -> Option<T>;
+    fn remove(&mut self, p: usize) -> T;
+    fn insert(&mut self, n: usize, val: T);
+    fn from_elem(val: T, n: usize) -> Self;
+    fn from_elems(val: &[T]) -> Self;
+}
+
+impl<T: Copy> Vector<T> for Vec<T> {
+    fn new() -> Self {
+        Self::with_capacity(VEC_SIZE)
+    }
+
+    fn push(&mut self, val: T) {
+        self.push(val)
+    }
+
+    fn pop(&mut self) -> Option<T> {
+        self.pop()
+    }
+
+    fn remove(&mut self, p: usize) -> T {
+        self.remove(p)
+    }
+
+    fn insert(&mut self, n: usize, val: T) {
+        self.insert(n, val)
+    }
+
+    fn from_elem(val: T, n: usize) -> Self {
+        vec![val; n]
+    }
+
+    fn from_elems(val: &[T]) -> Self {
+        val.to_owned()
+    }
+}
+
+impl<T: Copy> Vector<T> for SmallVec<[T; VEC_SIZE]> {
+    fn new() -> Self {
+        Self::new()
+    }
+
+    fn push(&mut self, val: T) {
+        self.push(val)
+    }
+
+    fn pop(&mut self) -> Option<T> {
+        self.pop()
+    }
+
+    fn remove(&mut self, p: usize) -> T {
+        self.remove(p)
+    }
+
+    fn insert(&mut self, n: usize, val: T) {
+        self.insert(n, val)
+    }
+
+    fn from_elem(val: T, n: usize) -> Self {
+        smallvec![val; n]
+    }
+
+    fn from_elems(val: &[T]) -> Self {
+        SmallVec::from_slice(val)
+    }
+}
+
+macro_rules! make_benches {
+    ($typ:ty { $($b_name:ident => $g_name:ident($($args:expr),*),)* }) => {
+        $(
+            #[bench]
+            fn $b_name(b: &mut Bencher) {
+                $g_name::<$typ>($($args,)* b)
+            }
+        )*
+    }
+}
+
+make_benches! {
+    SmallVec<[u64; VEC_SIZE]> {
+        bench_push => gen_push(SPILLED_SIZE as _),
+        bench_push_small => gen_push(VEC_SIZE as _),
+        bench_insert_push => gen_insert_push(SPILLED_SIZE as _),
+        bench_insert_push_small => gen_insert_push(VEC_SIZE as _),
+        bench_insert => gen_insert(SPILLED_SIZE as _),
+        bench_insert_small => gen_insert(VEC_SIZE as _),
+        bench_remove => gen_remove(SPILLED_SIZE as _),
+        bench_remove_small => gen_remove(VEC_SIZE as _),
+        bench_extend => gen_extend(SPILLED_SIZE as _),
+        bench_extend_small => gen_extend(VEC_SIZE as _),
+        bench_from_iter => gen_from_iter(SPILLED_SIZE as _),
+        bench_from_iter_small => gen_from_iter(VEC_SIZE as _),
+        bench_from_slice => gen_from_slice(SPILLED_SIZE as _),
+        bench_from_slice_small => gen_from_slice(VEC_SIZE as _),
+        bench_extend_from_slice => gen_extend_from_slice(SPILLED_SIZE as _),
+        bench_extend_from_slice_small => gen_extend_from_slice(VEC_SIZE as _),
+        bench_macro_from_elem => gen_from_elem(SPILLED_SIZE as _),
+        bench_macro_from_elem_small => gen_from_elem(VEC_SIZE as _),
+        bench_pushpop => gen_pushpop(),
+    }
+}
+
+make_benches! {
+    Vec<u64> {
+        bench_push_vec => gen_push(SPILLED_SIZE as _),
+        bench_push_vec_small => gen_push(VEC_SIZE as _),
+        bench_insert_push_vec => gen_insert_push(SPILLED_SIZE as _),
+        bench_insert_push_vec_small => gen_insert_push(VEC_SIZE as _),
+        bench_insert_vec => gen_insert(SPILLED_SIZE as _),
+        bench_insert_vec_small => gen_insert(VEC_SIZE as _),
+        bench_remove_vec => gen_remove(SPILLED_SIZE as _),
+        bench_remove_vec_small => gen_remove(VEC_SIZE as _),
+        bench_extend_vec => gen_extend(SPILLED_SIZE as _),
+        bench_extend_vec_small => gen_extend(VEC_SIZE as _),
+        bench_from_iter_vec => gen_from_iter(SPILLED_SIZE as _),
+        bench_from_iter_vec_small => gen_from_iter(VEC_SIZE as _),
+        bench_from_slice_vec => gen_from_slice(SPILLED_SIZE as _),
+        bench_from_slice_vec_small => gen_from_slice(VEC_SIZE as _),
+        bench_extend_from_slice_vec => gen_extend_from_slice(SPILLED_SIZE as _),
+        bench_extend_from_slice_vec_small => gen_extend_from_slice(VEC_SIZE as _),
+        bench_macro_from_elem_vec => gen_from_elem(SPILLED_SIZE as _),
+        bench_macro_from_elem_vec_small => gen_from_elem(VEC_SIZE as _),
+        bench_pushpop_vec => gen_pushpop(),
+    }
+}
+
+fn gen_push<V: Vector<u64>>(n: u64, b: &mut Bencher) {
+    #[inline(never)]
+    fn push_noinline<V: Vector<u64>>(vec: &mut V, x: u64) {
+        vec.push(x);
+    }
+
+    b.iter(|| {
+        let mut vec = V::new();
+        for x in 0..n {
+            push_noinline(&mut vec, x);
+        }
+        vec
+    });
+}
+
+fn gen_insert_push<V: Vector<u64>>(n: u64, b: &mut Bencher) {
+    #[inline(never)]
+    fn insert_push_noinline<V: Vector<u64>>(vec: &mut V, x: u64) {
+        vec.insert(x as usize, x);
+    }
+
+    b.iter(|| {
+        let mut vec = V::new();
+        for x in 0..n {
+            insert_push_noinline(&mut vec, x);
+        }
+        vec
+    });
+}
+
+fn gen_insert<V: Vector<u64>>(n: u64, b: &mut Bencher) {
+    #[inline(never)]
+    fn insert_noinline<V: Vector<u64>>(vec: &mut V, p: usize, x: u64) {
+        vec.insert(p, x)
+    }
+
+    b.iter(|| {
+        let mut vec = V::new();
+        // Always insert at position 0 so that we are subject to shifts of
+        // many different lengths.
+        vec.push(0);
+        for x in 0..n {
+            insert_noinline(&mut vec, 0, x);
+        }
+        vec
+    });
+}
+
+fn gen_remove<V: Vector<u64>>(n: usize, b: &mut Bencher) {
+    #[inline(never)]
+    fn remove_noinline<V: Vector<u64>>(vec: &mut V, p: usize) -> u64 {
+        vec.remove(p)
+    }
+
+    b.iter(|| {
+        let mut vec = V::from_elem(0, n as _);
+
+        for _ in 0..n {
+            remove_noinline(&mut vec, 0);
+        }
+    });
+}
+
+fn gen_extend<V: Vector<u64>>(n: u64, b: &mut Bencher) {
+    b.iter(|| {
+        let mut vec = V::new();
+        vec.extend(0..n);
+        vec
+    });
+}
+
+fn gen_from_iter<V: Vector<u64>>(n: u64, b: &mut Bencher) {
+    let v: Vec<u64> = (0..n).collect();
+    b.iter(|| {
+        let vec = V::from(&v);
+        vec
+    });
+}
+
+fn gen_from_slice<V: Vector<u64>>(n: u64, b: &mut Bencher) {
+    let v: Vec<u64> = (0..n).collect();
+    b.iter(|| {
+        let vec = V::from_elems(&v);
+        vec
+    });
+}
+
+fn gen_extend_from_slice<V: Vector<u64>>(n: u64, b: &mut Bencher) {
+    let v: Vec<u64> = (0..n).collect();
+    b.iter(|| {
+        let mut vec = V::new();
+        vec.extend_from_slice(&v);
+        vec
+    });
+}
+
+fn gen_pushpop<V: Vector<u64>>(b: &mut Bencher) {
+    #[inline(never)]
+    fn pushpop_noinline<V: Vector<u64>>(vec: &mut V, x: u64) -> Option<u64> {
+        vec.push(x);
+        vec.pop()
+    }
+
+    b.iter(|| {
+        let mut vec = V::new();
+        for x in 0..SPILLED_SIZE as _ {
+            pushpop_noinline(&mut vec, x);
+        }
+        vec
+    });
+}
+
+fn gen_from_elem<V: Vector<u64>>(n: usize, b: &mut Bencher) {
+    b.iter(|| {
+        let vec = V::from_elem(42, n);
+        vec
+    });
+}
+
+#[bench]
+fn bench_insert_many(b: &mut Bencher) {
+    #[inline(never)]
+    fn insert_many_noinline<I: IntoIterator<Item = u64>>(
+        vec: &mut SmallVec<[u64; VEC_SIZE]>,
+        index: usize,
+        iterable: I,
+    ) {
+        vec.insert_many(index, iterable)
+    }
+
+    b.iter(|| {
+        let mut vec = SmallVec::<[u64; VEC_SIZE]>::new();
+        insert_many_noinline(&mut vec, 0, 0..SPILLED_SIZE as _);
+        insert_many_noinline(&mut vec, 0, 0..SPILLED_SIZE as _);
+        vec
+    });
+}
+
+#[bench]
+fn bench_insert_from_slice(b: &mut Bencher) {
+    let v: Vec<u64> = (0..SPILLED_SIZE as _).collect();
+    b.iter(|| {
+        let mut vec = SmallVec::<[u64; VEC_SIZE]>::new();
+        vec.insert_from_slice(0, &v);
+        vec.insert_from_slice(0, &v);
+        vec
+    });
+}
+
+#[bench]
+fn bench_macro_from_list(b: &mut Bencher) {
+    b.iter(|| {
+        let vec: SmallVec<[u64; 16]> = smallvec![
+            0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 20, 24, 32, 36, 0x40, 0x80,
+            0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000, 0x8000, 0x10000, 0x20000, 0x40000,
+            0x80000, 0x100000,
+        ];
+        vec
+    });
+}
+
+#[bench]
+fn bench_macro_from_list_vec(b: &mut Bencher) {
+    b.iter(|| {
+        let vec: Vec<u64> = vec![
+            0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 20, 24, 32, 36, 0x40, 0x80,
+            0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000, 0x8000, 0x10000, 0x20000, 0x40000,
+            0x80000, 0x100000,
+        ];
+        vec
+    });
+}
diff --git a/crates/smallvec/cargo_embargo.json b/crates/smallvec/cargo_embargo.json
new file mode 100644
index 0000000..6fe4bdf
--- /dev/null
+++ b/crates/smallvec/cargo_embargo.json
@@ -0,0 +1,7 @@
+{
+  "min_sdk_version": "29",
+  "run_cargo": false,
+  "features": [
+    "write"
+  ]
+}
diff --git a/crates/smallvec/debug_metadata/README.md b/crates/smallvec/debug_metadata/README.md
new file mode 100644
index 0000000..9a5596b
--- /dev/null
+++ b/crates/smallvec/debug_metadata/README.md
@@ -0,0 +1,111 @@
+## Debugger Visualizers
+
+Many languages and debuggers enable developers to control how a type is
+displayed in a debugger. These are called "debugger visualizations" or "debugger
+views".
+
+The Windows debuggers (WinDbg\CDB) support defining custom debugger visualizations using
+the `Natvis` framework. To use Natvis, developers write XML documents using the natvis
+schema that describe how debugger types should be displayed with the `.natvis` extension.
+(See: https://docs.microsoft.com/en-us/visualstudio/debugger/create-custom-views-of-native-objects?view=vs-2019)
+The Natvis files provide patterns which match type names a description of how to display
+those types.
+
+The Natvis schema can be found either online (See: https://code.visualstudio.com/docs/cpp/natvis#_schema)
+or locally at `<VS Installation Folder>\Xml\Schemas\1033\natvis.xsd`.
+
+The GNU debugger (GDB) supports defining custom debugger views using Pretty Printers.
+Pretty printers are written as python scripts that describe how a type should be displayed
+when loaded up in GDB/LLDB. (See: https://sourceware.org/gdb/onlinedocs/gdb/Pretty-Printing.html#Pretty-Printing)
+The pretty printers provide patterns, which match type names, and for matching
+types, describe how to display those types. (For writing a pretty printer, see: https://sourceware.org/gdb/onlinedocs/gdb/Writing-a-Pretty_002dPrinter.html#Writing-a-Pretty_002dPrinter).
+
+### Embedding Visualizers
+
+Through the use of the currently unstable `#[debugger_visualizer]` attribute, the `smallvec`
+crate can embed debugger visualizers into the crate metadata.
+
+Currently the two types of visualizers supported are Natvis and Pretty printers.
+
+For Natvis files, when linking an executable with a crate that includes Natvis files,
+the MSVC linker will embed the contents of all Natvis files into the generated `PDB`.
+
+For pretty printers, the compiler will encode the contents of the pretty printer
+in the `.debug_gdb_scripts` section of the `ELF` generated.
+
+### Testing Visualizers
+
+The `smallvec` crate supports testing debugger visualizers defined for this crate. The entry point for
+these tests are `tests/debugger_visualizer.rs`. These tests are defined using the `debugger_test` and
+`debugger_test_parser` crates. The `debugger_test` crate is a proc macro crate which defines a
+single proc macro attribute, `#[debugger_test]`. For more detailed information about this crate,
+see https://crates.io/crates/debugger_test. The CI pipeline for the `smallvec` crate has been updated
+to run the debugger visualizer tests to ensure debugger visualizers do not become broken/stale.
+
+The `#[debugger_test]` proc macro attribute may only be used on test functions and will run the
+function under the debugger specified by the `debugger` meta item.
+
+This proc macro attribute has 3 required values:
+
+1. The first required meta item, `debugger`, takes a string value which specifies the debugger to launch.
+2. The second required meta item, `commands`, takes a string of new line (`\n`) separated list of debugger
+commands to run.
+3. The third required meta item, `expected_statements`, takes a string of new line (`\n`) separated list of
+statements that must exist in the debugger output. Pattern matching through regular expressions is also
+supported by using the `pattern:` prefix for each expected statement.
+
+#### Example:
+
+```rust
+#[debugger_test(
+    debugger = "cdb",
+    commands = "command1\ncommand2\ncommand3",
+    expected_statements = "statement1\nstatement2\nstatement3")]
+fn test() {
+
+}
+```
+
+Using a multiline string is also supported, with a single debugger command/expected statement per line:
+
+```rust
+#[debugger_test(
+    debugger = "cdb",
+    commands = "
+command1
+command2
+command3",
+    expected_statements = "
+statement1
+pattern:statement[0-9]+
+statement3")]
+fn test() {
+    
+}
+```
+
+In the example above, the second expected statement uses pattern matching through a regular expression
+by using the `pattern:` prefix.
+
+#### Testing Locally
+
+Currently, only Natvis visualizations have been defined for the `smallvec` crate via `debug_metadata/smallvec.natvis`,
+which means the `tests/debugger_visualizer.rs` tests need to be run on Windows using the `*-pc-windows-msvc` targets.
+To run these tests locally, first ensure the debugging tools for Windows are installed or install them following
+the steps listed here, [Debugging Tools for Windows](https://docs.microsoft.com/en-us/windows-hardware/drivers/debugger/).
+Once the debugging tools have been installed, the tests can be run in the same manner as they are in the CI
+pipeline.
+
+#### Note
+
+When running the debugger visualizer tests, `tests/debugger_visualizer.rs`, they need to be run consecutively
+and not in parallel. This can be achieved by passing the flag `--test-threads=1` to rustc. This is due to
+how the debugger tests are run. Each test marked with the `#[debugger_test]` attribute launches a debugger
+and attaches it to the current test process. If tests are running in parallel, the test will try to attach
+a debugger to the current process which may already have a debugger attached causing the test to fail.
+
+For example:
+
+```
+cargo test --test debugger_visualizer --features debugger_visualizer -- --test-threads=1
+```
diff --git a/crates/smallvec/debug_metadata/smallvec.natvis b/crates/smallvec/debug_metadata/smallvec.natvis
new file mode 100644
index 0000000..8731860
--- /dev/null
+++ b/crates/smallvec/debug_metadata/smallvec.natvis
@@ -0,0 +1,35 @@
+<AutoVisualizer xmlns="http://schemas.microsoft.com/vstudio/debugger/natvis/2010">
+  <Type Name="smallvec::SmallVec&lt;array$&lt;*,*&gt;&gt;" Priority="Medium">
+    <Intrinsic Name="is_inline" Expression="$T2 &gt;= capacity" />
+    <Intrinsic Name="len" Expression="is_inline() ? capacity : data.variant1.value.len" />
+    <Intrinsic Name="data_ptr" Expression="is_inline() ? data.variant0.value.__0.value.value : data.variant1.value.ptr.pointer" />
+
+    <DisplayString>{{ len={len()} is_inline={is_inline()} }}</DisplayString>
+    <Expand>
+        <Item Name="[capacity]">is_inline() ? $T2 : capacity</Item>
+        <Item Name="[len]">len()</Item>
+        <Item Name="[data_ptr]">data_ptr()</Item>
+
+        <ArrayItems>
+            <Size>len()</Size>
+            <ValuePointer>data_ptr()</ValuePointer>
+        </ArrayItems>
+    </Expand>
+  </Type>
+
+  <Type Name="smallvec::SmallVec&lt;array$&lt;*,*&gt;&gt;" Priority="MediumLow">
+    <Intrinsic Name="is_inline" Expression="$T2 &gt;= capacity" />
+    <Intrinsic Name="len" Expression="is_inline() ? capacity : data.heap.__1" />
+    <Intrinsic Name="data_ptr" Expression="is_inline() ? data.inline.value.value.value : data.heap.__0.pointer" />
+    <DisplayString>{{ len={len()} is_inline={is_inline()} }}</DisplayString>
+    <Expand>
+        <Item Name="[capacity]">is_inline() ? $T2 : capacity</Item>
+        <Item Name="[len]">len()</Item>
+
+        <ArrayItems>
+            <Size>len()</Size>
+            <ValuePointer>data_ptr()</ValuePointer>
+        </ArrayItems>
+    </Expand>
+  </Type>
+</AutoVisualizer>
\ No newline at end of file
diff --git a/crates/smallvec/scripts/run_miri.sh b/crates/smallvec/scripts/run_miri.sh
new file mode 100644
index 0000000..010ceb0
--- /dev/null
+++ b/crates/smallvec/scripts/run_miri.sh
@@ -0,0 +1,24 @@
+#!/usr/bin/bash
+
+set -ex
+
+# Clean out our target dir, which may have artifacts compiled by a version of
+# rust different from the one we're about to download.
+cargo clean
+
+# Install and run the latest version of nightly where miri built successfully.
+# Taken from: https://github.com/rust-lang/miri#running-miri-on-ci
+
+MIRI_NIGHTLY=nightly-$(curl -s https://rust-lang.github.io/rustup-components-history/x86_64-unknown-linux-gnu/miri)
+echo "Installing latest nightly with Miri: $MIRI_NIGHTLY"
+rustup override unset
+rustup default "$MIRI_NIGHTLY"
+
+rustup component add miri
+cargo miri setup
+
+cargo miri test --verbose
+cargo miri test --verbose --features union
+cargo miri test --verbose --all-features
+
+rustup override set nightly
diff --git a/crates/smallvec/src/arbitrary.rs b/crates/smallvec/src/arbitrary.rs
new file mode 100644
index 0000000..cbdfcb0
--- /dev/null
+++ b/crates/smallvec/src/arbitrary.rs
@@ -0,0 +1,19 @@
+use crate::{Array, SmallVec};
+use arbitrary::{Arbitrary, Unstructured};
+
+impl<'a, A: Array> Arbitrary<'a> for SmallVec<A>
+where
+    <A as Array>::Item: Arbitrary<'a>,
+{
+    fn arbitrary(u: &mut Unstructured<'a>) -> arbitrary::Result<Self> {
+        u.arbitrary_iter()?.collect()
+    }
+
+    fn arbitrary_take_rest(u: Unstructured<'a>) -> arbitrary::Result<Self> {
+        u.arbitrary_take_rest_iter()?.collect()
+    }
+
+    fn size_hint(depth: usize) -> (usize, Option<usize>) {
+        arbitrary::size_hint::and(<usize as Arbitrary>::size_hint(depth), (0, None))
+    }
+}
diff --git a/crates/smallvec/src/lib.rs b/crates/smallvec/src/lib.rs
new file mode 100644
index 0000000..cadb5d8
--- /dev/null
+++ b/crates/smallvec/src/lib.rs
@@ -0,0 +1,2471 @@
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Small vectors in various sizes. These store a certain number of elements inline, and fall back
+//! to the heap for larger allocations.  This can be a useful optimization for improving cache
+//! locality and reducing allocator traffic for workloads that fit within the inline buffer.
+//!
+//! ## `no_std` support
+//!
+//! By default, `smallvec` does not depend on `std`.  However, the optional
+//! `write` feature implements the `std::io::Write` trait for vectors of `u8`.
+//! When this feature is enabled, `smallvec` depends on `std`.
+//!
+//! ## Optional features
+//!
+//! ### `serde`
+//!
+//! When this optional dependency is enabled, `SmallVec` implements the `serde::Serialize` and
+//! `serde::Deserialize` traits.
+//!
+//! ### `write`
+//!
+//! When this feature is enabled, `SmallVec<[u8; _]>` implements the `std::io::Write` trait.
+//! This feature is not compatible with `#![no_std]` programs.
+//!
+//! ### `union`
+//!
+//! **This feature requires Rust 1.49.**
+//!
+//! When the `union` feature is enabled `smallvec` will track its state (inline or spilled)
+//! without the use of an enum tag, reducing the size of the `smallvec` by one machine word.
+//! This means that there is potentially no space overhead compared to `Vec`.
+//! Note that `smallvec` can still be larger than `Vec` if the inline buffer is larger than two
+//! machine words.
+//!
+//! To use this feature add `features = ["union"]` in the `smallvec` section of Cargo.toml.
+//! Note that this feature requires Rust 1.49.
+//!
+//! Tracking issue: [rust-lang/rust#55149](https://github.com/rust-lang/rust/issues/55149)
+//!
+//! ### `const_generics`
+//!
+//! **This feature requires Rust 1.51.**
+//!
+//! When this feature is enabled, `SmallVec` works with any arrays of any size, not just a fixed
+//! list of sizes.
+//!
+//! ### `const_new`
+//!
+//! **This feature requires Rust 1.51.**
+//!
+//! This feature exposes the functions [`SmallVec::new_const`], [`SmallVec::from_const`], and [`smallvec_inline`] which enables the `SmallVec` to be initialized from a const context.
+//! For details, see the
+//! [Rust Reference](https://doc.rust-lang.org/reference/const_eval.html#const-functions).
+//!
+//! ### `drain_filter`
+//!
+//! **This feature is unstable.** It may change to match the unstable `drain_filter` method in libstd.
+//!
+//! Enables the `drain_filter` method, which produces an iterator that calls a user-provided
+//! closure to determine which elements of the vector to remove and yield from the iterator.
+//!
+//! ### `drain_keep_rest`
+//!
+//! **This feature is unstable.** It may change to match the unstable `drain_keep_rest` method in libstd.
+//!
+//! Enables the `DrainFilter::keep_rest` method.
+//!
+//! ### `specialization`
+//!
+//! **This feature is unstable and requires a nightly build of the Rust toolchain.**
+//!
+//! When this feature is enabled, `SmallVec::from(slice)` has improved performance for slices
+//! of `Copy` types.  (Without this feature, you can use `SmallVec::from_slice` to get optimal
+//! performance for `Copy` types.)
+//!
+//! Tracking issue: [rust-lang/rust#31844](https://github.com/rust-lang/rust/issues/31844)
+//!
+//! ### `may_dangle`
+//!
+//! **This feature is unstable and requires a nightly build of the Rust toolchain.**
+//!
+//! This feature makes the Rust compiler less strict about use of vectors that contain borrowed
+//! references. For details, see the
+//! [Rustonomicon](https://doc.rust-lang.org/1.42.0/nomicon/dropck.html#an-escape-hatch).
+//!
+//! Tracking issue: [rust-lang/rust#34761](https://github.com/rust-lang/rust/issues/34761)
+
+#![no_std]
+#![cfg_attr(docsrs, feature(doc_cfg))]
+#![cfg_attr(feature = "specialization", allow(incomplete_features))]
+#![cfg_attr(feature = "specialization", feature(specialization))]
+#![cfg_attr(feature = "may_dangle", feature(dropck_eyepatch))]
+#![cfg_attr(
+    feature = "debugger_visualizer",
+    feature(debugger_visualizer),
+    debugger_visualizer(natvis_file = "../debug_metadata/smallvec.natvis")
+)]
+#![deny(missing_docs)]
+
+#[doc(hidden)]
+pub extern crate alloc;
+
+#[cfg(any(test, feature = "write"))]
+extern crate std;
+
+#[cfg(test)]
+mod tests;
+
+#[allow(deprecated)]
+use alloc::alloc::{Layout, LayoutErr};
+use alloc::boxed::Box;
+use alloc::{vec, vec::Vec};
+use core::borrow::{Borrow, BorrowMut};
+use core::cmp;
+use core::fmt;
+use core::hash::{Hash, Hasher};
+use core::hint::unreachable_unchecked;
+use core::iter::{repeat, FromIterator, FusedIterator, IntoIterator};
+use core::mem;
+use core::mem::MaybeUninit;
+use core::ops::{self, Range, RangeBounds};
+use core::ptr::{self, NonNull};
+use core::slice::{self, SliceIndex};
+
+#[cfg(feature = "serde")]
+use serde::{
+    de::{Deserialize, Deserializer, SeqAccess, Visitor},
+    ser::{Serialize, SerializeSeq, Serializer},
+};
+
+#[cfg(feature = "serde")]
+use core::marker::PhantomData;
+
+#[cfg(feature = "write")]
+use std::io;
+
+#[cfg(feature = "drain_keep_rest")]
+use core::mem::ManuallyDrop;
+
+/// Creates a [`SmallVec`] containing the arguments.
+///
+/// `smallvec!` allows `SmallVec`s to be defined with the same syntax as array expressions.
+/// There are two forms of this macro:
+///
+/// - Create a [`SmallVec`] containing a given list of elements:
+///
+/// ```
+/// # use smallvec::{smallvec, SmallVec};
+/// # fn main() {
+/// let v: SmallVec<[_; 128]> = smallvec![1, 2, 3];
+/// assert_eq!(v[0], 1);
+/// assert_eq!(v[1], 2);
+/// assert_eq!(v[2], 3);
+/// # }
+/// ```
+///
+/// - Create a [`SmallVec`] from a given element and size:
+///
+/// ```
+/// # use smallvec::{smallvec, SmallVec};
+/// # fn main() {
+/// let v: SmallVec<[_; 0x8000]> = smallvec![1; 3];
+/// assert_eq!(v, SmallVec::from_buf([1, 1, 1]));
+/// # }
+/// ```
+///
+/// Note that unlike array expressions this syntax supports all elements
+/// which implement [`Clone`] and the number of elements doesn't have to be
+/// a constant.
+///
+/// This will use `clone` to duplicate an expression, so one should be careful
+/// using this with types having a nonstandard `Clone` implementation. For
+/// example, `smallvec![Rc::new(1); 5]` will create a vector of five references
+/// to the same boxed integer value, not five references pointing to independently
+/// boxed integers.
+
+#[macro_export]
+macro_rules! smallvec {
+    // count helper: transform any expression into 1
+    (@one $x:expr) => (1usize);
+    ($elem:expr; $n:expr) => ({
+        $crate::SmallVec::from_elem($elem, $n)
+    });
+    ($($x:expr),*$(,)*) => ({
+        let count = 0usize $(+ $crate::smallvec!(@one $x))*;
+        #[allow(unused_mut)]
+        let mut vec = $crate::SmallVec::new();
+        if count <= vec.inline_size() {
+            $(vec.push($x);)*
+            vec
+        } else {
+            $crate::SmallVec::from_vec($crate::alloc::vec![$($x,)*])
+        }
+    });
+}
+
+/// Creates an inline [`SmallVec`] containing the arguments. This macro is enabled by the feature `const_new`.
+///
+/// `smallvec_inline!` allows `SmallVec`s to be defined with the same syntax as array expressions in `const` contexts.
+/// The inline storage `A` will always be an array of the size specified by the arguments.
+/// There are two forms of this macro:
+///
+/// - Create a [`SmallVec`] containing a given list of elements:
+///
+/// ```
+/// # use smallvec::{smallvec_inline, SmallVec};
+/// # fn main() {
+/// const V: SmallVec<[i32; 3]> = smallvec_inline![1, 2, 3];
+/// assert_eq!(V[0], 1);
+/// assert_eq!(V[1], 2);
+/// assert_eq!(V[2], 3);
+/// # }
+/// ```
+///
+/// - Create a [`SmallVec`] from a given element and size:
+///
+/// ```
+/// # use smallvec::{smallvec_inline, SmallVec};
+/// # fn main() {
+/// const V: SmallVec<[i32; 3]> = smallvec_inline![1; 3];
+/// assert_eq!(V, SmallVec::from_buf([1, 1, 1]));
+/// # }
+/// ```
+///
+/// Note that the behavior mimics that of array expressions, in contrast to [`smallvec`].
+#[cfg(feature = "const_new")]
+#[cfg_attr(docsrs, doc(cfg(feature = "const_new")))]
+#[macro_export]
+macro_rules! smallvec_inline {
+    // count helper: transform any expression into 1
+    (@one $x:expr) => (1usize);
+    ($elem:expr; $n:expr) => ({
+        $crate::SmallVec::<[_; $n]>::from_const([$elem; $n])
+    });
+    ($($x:expr),+ $(,)?) => ({
+        const N: usize = 0usize $(+ $crate::smallvec_inline!(@one $x))*;
+        $crate::SmallVec::<[_; N]>::from_const([$($x,)*])
+    });
+}
+
+/// `panic!()` in debug builds, optimization hint in release.
+#[cfg(not(feature = "union"))]
+macro_rules! debug_unreachable {
+    () => {
+        debug_unreachable!("entered unreachable code")
+    };
+    ($e:expr) => {
+        if cfg!(debug_assertions) {
+            panic!($e);
+        } else {
+            unreachable_unchecked();
+        }
+    };
+}
+
+/// Trait to be implemented by a collection that can be extended from a slice
+///
+/// ## Example
+///
+/// ```rust
+/// use smallvec::{ExtendFromSlice, SmallVec};
+///
+/// fn initialize<V: ExtendFromSlice<u8>>(v: &mut V) {
+///     v.extend_from_slice(b"Test!");
+/// }
+///
+/// let mut vec = Vec::new();
+/// initialize(&mut vec);
+/// assert_eq!(&vec, b"Test!");
+///
+/// let mut small_vec = SmallVec::<[u8; 8]>::new();
+/// initialize(&mut small_vec);
+/// assert_eq!(&small_vec as &[_], b"Test!");
+/// ```
+#[doc(hidden)]
+#[deprecated]
+pub trait ExtendFromSlice<T> {
+    /// Extends a collection from a slice of its element type
+    fn extend_from_slice(&mut self, other: &[T]);
+}
+
+#[allow(deprecated)]
+impl<T: Clone> ExtendFromSlice<T> for Vec<T> {
+    fn extend_from_slice(&mut self, other: &[T]) {
+        Vec::extend_from_slice(self, other)
+    }
+}
+
+/// Error type for APIs with fallible heap allocation
+#[derive(Debug)]
+pub enum CollectionAllocErr {
+    /// Overflow `usize::MAX` or other error during size computation
+    CapacityOverflow,
+    /// The allocator return an error
+    AllocErr {
+        /// The layout that was passed to the allocator
+        layout: Layout,
+    },
+}
+
+impl fmt::Display for CollectionAllocErr {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(f, "Allocation error: {:?}", self)
+    }
+}
+
+#[allow(deprecated)]
+impl From<LayoutErr> for CollectionAllocErr {
+    fn from(_: LayoutErr) -> Self {
+        CollectionAllocErr::CapacityOverflow
+    }
+}
+
+fn infallible<T>(result: Result<T, CollectionAllocErr>) -> T {
+    match result {
+        Ok(x) => x,
+        Err(CollectionAllocErr::CapacityOverflow) => panic!("capacity overflow"),
+        Err(CollectionAllocErr::AllocErr { layout }) => alloc::alloc::handle_alloc_error(layout),
+    }
+}
+
+/// FIXME: use `Layout::array` when we require a Rust version where it’s stable
+/// <https://github.com/rust-lang/rust/issues/55724>
+fn layout_array<T>(n: usize) -> Result<Layout, CollectionAllocErr> {
+    let size = mem::size_of::<T>()
+        .checked_mul(n)
+        .ok_or(CollectionAllocErr::CapacityOverflow)?;
+    let align = mem::align_of::<T>();
+    Layout::from_size_align(size, align).map_err(|_| CollectionAllocErr::CapacityOverflow)
+}
+
+unsafe fn deallocate<T>(ptr: NonNull<T>, capacity: usize) {
+    // This unwrap should succeed since the same did when allocating.
+    let layout = layout_array::<T>(capacity).unwrap();
+    alloc::alloc::dealloc(ptr.as_ptr() as *mut u8, layout)
+}
+
+/// An iterator that removes the items from a `SmallVec` and yields them by value.
+///
+/// Returned from [`SmallVec::drain`][1].
+///
+/// [1]: struct.SmallVec.html#method.drain
+pub struct Drain<'a, T: 'a + Array> {
+    tail_start: usize,
+    tail_len: usize,
+    iter: slice::Iter<'a, T::Item>,
+    vec: NonNull<SmallVec<T>>,
+}
+
+impl<'a, T: 'a + Array> fmt::Debug for Drain<'a, T>
+where
+    T::Item: fmt::Debug,
+{
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        f.debug_tuple("Drain").field(&self.iter.as_slice()).finish()
+    }
+}
+
+unsafe impl<'a, T: Sync + Array> Sync for Drain<'a, T> {}
+unsafe impl<'a, T: Send + Array> Send for Drain<'a, T> {}
+
+impl<'a, T: 'a + Array> Iterator for Drain<'a, T> {
+    type Item = T::Item;
+
+    #[inline]
+    fn next(&mut self) -> Option<T::Item> {
+        self.iter
+            .next()
+            .map(|reference| unsafe { ptr::read(reference) })
+    }
+
+    #[inline]
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        self.iter.size_hint()
+    }
+}
+
+impl<'a, T: 'a + Array> DoubleEndedIterator for Drain<'a, T> {
+    #[inline]
+    fn next_back(&mut self) -> Option<T::Item> {
+        self.iter
+            .next_back()
+            .map(|reference| unsafe { ptr::read(reference) })
+    }
+}
+
+impl<'a, T: Array> ExactSizeIterator for Drain<'a, T> {
+    #[inline]
+    fn len(&self) -> usize {
+        self.iter.len()
+    }
+}
+
+impl<'a, T: Array> FusedIterator for Drain<'a, T> {}
+
+impl<'a, T: 'a + Array> Drop for Drain<'a, T> {
+    fn drop(&mut self) {
+        self.for_each(drop);
+
+        if self.tail_len > 0 {
+            unsafe {
+                let source_vec = self.vec.as_mut();
+
+                // memmove back untouched tail, update to new length
+                let start = source_vec.len();
+                let tail = self.tail_start;
+                if tail != start {
+                    // as_mut_ptr creates a &mut, invalidating other pointers.
+                    // This pattern avoids calling it with a pointer already present.
+                    let ptr = source_vec.as_mut_ptr();
+                    let src = ptr.add(tail);
+                    let dst = ptr.add(start);
+                    ptr::copy(src, dst, self.tail_len);
+                }
+                source_vec.set_len(start + self.tail_len);
+            }
+        }
+    }
+}
+
+#[cfg(feature = "drain_filter")]
+/// An iterator which uses a closure to determine if an element should be removed.
+///
+/// Returned from [`SmallVec::drain_filter`][1].
+///
+/// [1]: struct.SmallVec.html#method.drain_filter
+pub struct DrainFilter<'a, T, F>
+where
+    F: FnMut(&mut T::Item) -> bool,
+    T: Array,
+{
+    vec: &'a mut SmallVec<T>,
+    /// The index of the item that will be inspected by the next call to `next`.
+    idx: usize,
+    /// The number of items that have been drained (removed) thus far.
+    del: usize,
+    /// The original length of `vec` prior to draining.
+    old_len: usize,
+    /// The filter test predicate.
+    pred: F,
+    /// A flag that indicates a panic has occurred in the filter test predicate.
+    /// This is used as a hint in the drop implementation to prevent consumption
+    /// of the remainder of the `DrainFilter`. Any unprocessed items will be
+    /// backshifted in the `vec`, but no further items will be dropped or
+    /// tested by the filter predicate.
+    panic_flag: bool,
+}
+
+#[cfg(feature = "drain_filter")]
+impl <T, F> fmt::Debug for DrainFilter<'_, T, F>
+where
+    F: FnMut(&mut T::Item) -> bool,
+    T: Array,
+    T::Item: fmt::Debug,
+{
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        f.debug_tuple("DrainFilter").field(&self.vec.as_slice()).finish()
+    }
+}
+
+#[cfg(feature = "drain_filter")]
+impl <T, F> Iterator for DrainFilter<'_, T, F>
+where
+    F: FnMut(&mut T::Item) -> bool,
+    T: Array,
+{
+    type Item = T::Item;
+
+    fn next(&mut self) -> Option<T::Item>
+    {
+        unsafe {
+            while self.idx < self.old_len {
+                let i = self.idx;
+                let v = slice::from_raw_parts_mut(self.vec.as_mut_ptr(), self.old_len);
+                self.panic_flag = true;
+                let drained = (self.pred)(&mut v[i]);
+                self.panic_flag = false;
+                // Update the index *after* the predicate is called. If the index
+                // is updated prior and the predicate panics, the element at this
+                // index would be leaked.
+                self.idx += 1;
+                if drained {
+                    self.del += 1;
+                    return Some(ptr::read(&v[i]));
+                } else if self.del > 0 {
+                    let del = self.del;
+                    let src: *const Self::Item = &v[i];
+                    let dst: *mut Self::Item = &mut v[i - del];
+                    ptr::copy_nonoverlapping(src, dst, 1);
+                }
+            }
+            None
+        }
+    }
+
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        (0, Some(self.old_len - self.idx))
+    }
+}
+
+#[cfg(feature = "drain_filter")]
+impl <T, F> Drop for DrainFilter<'_, T, F>
+where
+    F: FnMut(&mut T::Item) -> bool,
+    T: Array,
+{
+    fn drop(&mut self) {
+        struct BackshiftOnDrop<'a, 'b, T, F>
+        where
+            F: FnMut(&mut T::Item) -> bool,
+            T: Array
+        {
+            drain: &'b mut DrainFilter<'a, T, F>,
+        }
+
+        impl<'a, 'b, T, F> Drop for BackshiftOnDrop<'a, 'b, T, F>
+        where
+            F: FnMut(&mut T::Item) -> bool,
+            T: Array
+        {
+            fn drop(&mut self) {
+                unsafe {
+                    if self.drain.idx < self.drain.old_len && self.drain.del > 0 {
+                        // This is a pretty messed up state, and there isn't really an
+                        // obviously right thing to do. We don't want to keep trying
+                        // to execute `pred`, so we just backshift all the unprocessed
+                        // elements and tell the vec that they still exist. The backshift
+                        // is required to prevent a double-drop of the last successfully
+                        // drained item prior to a panic in the predicate.
+                        let ptr = self.drain.vec.as_mut_ptr();
+                        let src = ptr.add(self.drain.idx);
+                        let dst = src.sub(self.drain.del);
+                        let tail_len = self.drain.old_len - self.drain.idx;
+                        src.copy_to(dst, tail_len);
+                    }
+                    self.drain.vec.set_len(self.drain.old_len - self.drain.del);
+                }
+            }
+        }
+
+        let backshift = BackshiftOnDrop { drain: self };
+
+        // Attempt to consume any remaining elements if the filter predicate
+        // has not yet panicked. We'll backshift any remaining elements
+        // whether we've already panicked or if the consumption here panics.
+        if !backshift.drain.panic_flag {
+            backshift.drain.for_each(drop);
+        }
+    }
+}
+
+#[cfg(feature = "drain_keep_rest")]
+impl <T, F> DrainFilter<'_, T, F>
+where
+    F: FnMut(&mut T::Item) -> bool,
+    T: Array
+{
+    /// Keep unyielded elements in the source `Vec`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use smallvec::{smallvec, SmallVec};
+    ///
+    /// let mut vec: SmallVec<[char; 2]> = smallvec!['a', 'b', 'c'];
+    /// let mut drain = vec.drain_filter(|_| true);
+    ///
+    /// assert_eq!(drain.next().unwrap(), 'a');
+    ///
+    /// // This call keeps 'b' and 'c' in the vec.
+    /// drain.keep_rest();
+    ///
+    /// // If we wouldn't call `keep_rest()`,
+    /// // `vec` would be empty.
+    /// assert_eq!(vec, SmallVec::<[char; 2]>::from_slice(&['b', 'c']));
+    /// ```
+    pub fn keep_rest(self)
+    {
+        // At this moment layout looks like this:
+        //
+        //  _____________________/-- old_len
+        // /                     \
+        // [kept] [yielded] [tail]
+        //        \_______/ ^-- idx
+        //                \-- del
+        //
+        // Normally `Drop` impl would drop [tail] (via .for_each(drop), ie still calling `pred`)
+        //
+        // 1. Move [tail] after [kept]
+        // 2. Update length of the original vec to `old_len - del`
+        //    a. In case of ZST, this is the only thing we want to do
+        // 3. Do *not* drop self, as everything is put in a consistent state already, there is nothing to do
+        let mut this = ManuallyDrop::new(self);
+
+        unsafe {
+            // ZSTs have no identity, so we don't need to move them around.
+            let needs_move = mem::size_of::<T>() != 0;
+
+            if needs_move && this.idx < this.old_len && this.del > 0 {
+                let ptr = this.vec.as_mut_ptr();
+                let src = ptr.add(this.idx);
+                let dst = src.sub(this.del);
+                let tail_len = this.old_len - this.idx;
+                src.copy_to(dst, tail_len);
+            }
+
+            let new_len = this.old_len - this.del;
+            this.vec.set_len(new_len);
+        }
+    }
+}
+
+#[cfg(feature = "union")]
+union SmallVecData<A: Array> {
+    inline: core::mem::ManuallyDrop<MaybeUninit<A>>,
+    heap: (NonNull<A::Item>, usize),
+}
+
+#[cfg(all(feature = "union", feature = "const_new"))]
+impl<T, const N: usize> SmallVecData<[T; N]> {
+    #[cfg_attr(docsrs, doc(cfg(feature = "const_new")))]
+    #[inline]
+    const fn from_const(inline: MaybeUninit<[T; N]>) -> Self {
+        SmallVecData {
+            inline: core::mem::ManuallyDrop::new(inline),
+        }
+    }
+}
+
+#[cfg(feature = "union")]
+impl<A: Array> SmallVecData<A> {
+    #[inline]
+    unsafe fn inline(&self) -> ConstNonNull<A::Item> {
+        ConstNonNull::new(self.inline.as_ptr() as *const A::Item).unwrap()
+    }
+    #[inline]
+    unsafe fn inline_mut(&mut self) -> NonNull<A::Item> {
+        NonNull::new(self.inline.as_mut_ptr() as *mut A::Item).unwrap()
+    }
+    #[inline]
+    fn from_inline(inline: MaybeUninit<A>) -> SmallVecData<A> {
+        SmallVecData {
+            inline: core::mem::ManuallyDrop::new(inline),
+        }
+    }
+    #[inline]
+    unsafe fn into_inline(self) -> MaybeUninit<A> {
+        core::mem::ManuallyDrop::into_inner(self.inline)
+    }
+    #[inline]
+    unsafe fn heap(&self) -> (ConstNonNull<A::Item>, usize) {
+        (ConstNonNull(self.heap.0), self.heap.1)
+    }
+    #[inline]
+    unsafe fn heap_mut(&mut self) -> (NonNull<A::Item>, &mut usize) {
+        let h = &mut self.heap;
+        (h.0, &mut h.1)
+    }
+    #[inline]
+    fn from_heap(ptr: NonNull<A::Item>, len: usize) -> SmallVecData<A> {
+        SmallVecData { heap: (ptr, len) }
+    }
+}
+
+#[cfg(not(feature = "union"))]
+enum SmallVecData<A: Array> {
+    Inline(MaybeUninit<A>),
+    // Using NonNull and NonZero here allows to reduce size of `SmallVec`.
+    Heap {
+        // Since we never allocate on heap
+        // unless our capacity is bigger than inline capacity
+        // heap capacity cannot be less than 1.
+        // Therefore, pointer cannot be null too.
+        ptr: NonNull<A::Item>,
+        len: usize,
+    },
+}
+
+#[cfg(all(not(feature = "union"), feature = "const_new"))]
+impl<T, const N: usize> SmallVecData<[T; N]> {
+    #[cfg_attr(docsrs, doc(cfg(feature = "const_new")))]
+    #[inline]
+    const fn from_const(inline: MaybeUninit<[T; N]>) -> Self {
+        SmallVecData::Inline(inline)
+    }
+}
+
+#[cfg(not(feature = "union"))]
+impl<A: Array> SmallVecData<A> {
+    #[inline]
+    unsafe fn inline(&self) -> ConstNonNull<A::Item> {
+        match self {
+            SmallVecData::Inline(a) => ConstNonNull::new(a.as_ptr() as *const A::Item).unwrap(),
+            _ => debug_unreachable!(),
+        }
+    }
+    #[inline]
+    unsafe fn inline_mut(&mut self) -> NonNull<A::Item> {
+        match self {
+            SmallVecData::Inline(a) => NonNull::new(a.as_mut_ptr() as *mut A::Item).unwrap(),
+            _ => debug_unreachable!(),
+        }
+    }
+    #[inline]
+    fn from_inline(inline: MaybeUninit<A>) -> SmallVecData<A> {
+        SmallVecData::Inline(inline)
+    }
+    #[inline]
+    unsafe fn into_inline(self) -> MaybeUninit<A> {
+        match self {
+            SmallVecData::Inline(a) => a,
+            _ => debug_unreachable!(),
+        }
+    }
+    #[inline]
+    unsafe fn heap(&self) -> (ConstNonNull<A::Item>, usize) {
+        match self {
+            SmallVecData::Heap { ptr, len } => (ConstNonNull(*ptr), *len),
+            _ => debug_unreachable!(),
+        }
+    }
+    #[inline]
+    unsafe fn heap_mut(&mut self) -> (NonNull<A::Item>, &mut usize) {
+        match self {
+            SmallVecData::Heap { ptr, len } => (*ptr, len),
+            _ => debug_unreachable!(),
+        }
+    }
+    #[inline]
+    fn from_heap(ptr: NonNull<A::Item>, len: usize) -> SmallVecData<A> {
+        SmallVecData::Heap { ptr, len }
+    }
+}
+
+unsafe impl<A: Array + Send> Send for SmallVecData<A> {}
+unsafe impl<A: Array + Sync> Sync for SmallVecData<A> {}
+
+/// A `Vec`-like container that can store a small number of elements inline.
+///
+/// `SmallVec` acts like a vector, but can store a limited amount of data inline within the
+/// `SmallVec` struct rather than in a separate allocation.  If the data exceeds this limit, the
+/// `SmallVec` will "spill" its data onto the heap, allocating a new buffer to hold it.
+///
+/// The amount of data that a `SmallVec` can store inline depends on its backing store. The backing
+/// store can be any type that implements the `Array` trait; usually it is a small fixed-sized
+/// array.  For example a `SmallVec<[u64; 8]>` can hold up to eight 64-bit integers inline.
+///
+/// ## Example
+///
+/// ```rust
+/// use smallvec::SmallVec;
+/// let mut v = SmallVec::<[u8; 4]>::new(); // initialize an empty vector
+///
+/// // The vector can hold up to 4 items without spilling onto the heap.
+/// v.extend(0..4);
+/// assert_eq!(v.len(), 4);
+/// assert!(!v.spilled());
+///
+/// // Pushing another element will force the buffer to spill:
+/// v.push(4);
+/// assert_eq!(v.len(), 5);
+/// assert!(v.spilled());
+/// ```
+pub struct SmallVec<A: Array> {
+    // The capacity field is used to determine which of the storage variants is active:
+    // If capacity <= Self::inline_capacity() then the inline variant is used and capacity holds the current length of the vector (number of elements actually in use).
+    // If capacity > Self::inline_capacity() then the heap variant is used and capacity holds the size of the memory allocation.
+    capacity: usize,
+    data: SmallVecData<A>,
+}
+
+impl<A: Array> SmallVec<A> {
+    /// Construct an empty vector
+    #[inline]
+    pub fn new() -> SmallVec<A> {
+        // Try to detect invalid custom implementations of `Array`. Hopefully,
+        // this check should be optimized away entirely for valid ones.
+        assert!(
+            mem::size_of::<A>() == A::size() * mem::size_of::<A::Item>()
+                && mem::align_of::<A>() >= mem::align_of::<A::Item>()
+        );
+        SmallVec {
+            capacity: 0,
+            data: SmallVecData::from_inline(MaybeUninit::uninit()),
+        }
+    }
+
+    /// Construct an empty vector with enough capacity pre-allocated to store at least `n`
+    /// elements.
+    ///
+    /// Will create a heap allocation only if `n` is larger than the inline capacity.
+    ///
+    /// ```
+    /// # use smallvec::SmallVec;
+    ///
+    /// let v: SmallVec<[u8; 3]> = SmallVec::with_capacity(100);
+    ///
+    /// assert!(v.is_empty());
+    /// assert!(v.capacity() >= 100);
+    /// ```
+    #[inline]
+    pub fn with_capacity(n: usize) -> Self {
+        let mut v = SmallVec::new();
+        v.reserve_exact(n);
+        v
+    }
+
+    /// Construct a new `SmallVec` from a `Vec<A::Item>`.
+    ///
+    /// Elements will be copied to the inline buffer if `vec.capacity() <= Self::inline_capacity()`.
+    ///
+    /// ```rust
+    /// use smallvec::SmallVec;
+    ///
+    /// let vec = vec![1, 2, 3, 4, 5];
+    /// let small_vec: SmallVec<[_; 3]> = SmallVec::from_vec(vec);
+    ///
+    /// assert_eq!(&*small_vec, &[1, 2, 3, 4, 5]);
+    /// ```
+    #[inline]
+    pub fn from_vec(mut vec: Vec<A::Item>) -> SmallVec<A> {
+        if vec.capacity() <= Self::inline_capacity() {
+            // Cannot use Vec with smaller capacity
+            // because we use value of `Self::capacity` field as indicator.
+            unsafe {
+                let mut data = SmallVecData::<A>::from_inline(MaybeUninit::uninit());
+                let len = vec.len();
+                vec.set_len(0);
+                ptr::copy_nonoverlapping(vec.as_ptr(), data.inline_mut().as_ptr(), len);
+
+                SmallVec {
+                    capacity: len,
+                    data,
+                }
+            }
+        } else {
+            let (ptr, cap, len) = (vec.as_mut_ptr(), vec.capacity(), vec.len());
+            mem::forget(vec);
+            let ptr = NonNull::new(ptr)
+                // See docs: https://doc.rust-lang.org/std/vec/struct.Vec.html#method.as_mut_ptr
+                .expect("Cannot be null by `Vec` invariant");
+
+            SmallVec {
+                capacity: cap,
+                data: SmallVecData::from_heap(ptr, len),
+            }
+        }
+    }
+
+    /// Constructs a new `SmallVec` on the stack from an `A` without
+    /// copying elements.
+    ///
+    /// ```rust
+    /// use smallvec::SmallVec;
+    ///
+    /// let buf = [1, 2, 3, 4, 5];
+    /// let small_vec: SmallVec<_> = SmallVec::from_buf(buf);
+    ///
+    /// assert_eq!(&*small_vec, &[1, 2, 3, 4, 5]);
+    /// ```
+    #[inline]
+    pub fn from_buf(buf: A) -> SmallVec<A> {
+        SmallVec {
+            capacity: A::size(),
+            data: SmallVecData::from_inline(MaybeUninit::new(buf)),
+        }
+    }
+
+    /// Constructs a new `SmallVec` on the stack from an `A` without
+    /// copying elements. Also sets the length, which must be less or
+    /// equal to the size of `buf`.
+    ///
+    /// ```rust
+    /// use smallvec::SmallVec;
+    ///
+    /// let buf = [1, 2, 3, 4, 5, 0, 0, 0];
+    /// let small_vec: SmallVec<_> = SmallVec::from_buf_and_len(buf, 5);
+    ///
+    /// assert_eq!(&*small_vec, &[1, 2, 3, 4, 5]);
+    /// ```
+    #[inline]
+    pub fn from_buf_and_len(buf: A, len: usize) -> SmallVec<A> {
+        assert!(len <= A::size());
+        unsafe { SmallVec::from_buf_and_len_unchecked(MaybeUninit::new(buf), len) }
+    }
+
+    /// Constructs a new `SmallVec` on the stack from an `A` without
+    /// copying elements. Also sets the length. The user is responsible
+    /// for ensuring that `len <= A::size()`.
+    ///
+    /// ```rust
+    /// use smallvec::SmallVec;
+    /// use std::mem::MaybeUninit;
+    ///
+    /// let buf = [1, 2, 3, 4, 5, 0, 0, 0];
+    /// let small_vec: SmallVec<_> = unsafe {
+    ///     SmallVec::from_buf_and_len_unchecked(MaybeUninit::new(buf), 5)
+    /// };
+    ///
+    /// assert_eq!(&*small_vec, &[1, 2, 3, 4, 5]);
+    /// ```
+    #[inline]
+    pub unsafe fn from_buf_and_len_unchecked(buf: MaybeUninit<A>, len: usize) -> SmallVec<A> {
+        SmallVec {
+            capacity: len,
+            data: SmallVecData::from_inline(buf),
+        }
+    }
+
+    /// Sets the length of a vector.
+    ///
+    /// This will explicitly set the size of the vector, without actually
+    /// modifying its buffers, so it is up to the caller to ensure that the
+    /// vector is actually the specified size.
+    pub unsafe fn set_len(&mut self, new_len: usize) {
+        let (_, len_ptr, _) = self.triple_mut();
+        *len_ptr = new_len;
+    }
+
+    /// The maximum number of elements this vector can hold inline
+    #[inline]
+    fn inline_capacity() -> usize {
+        if mem::size_of::<A::Item>() > 0 {
+            A::size()
+        } else {
+            // For zero-size items code like `ptr.add(offset)` always returns the same pointer.
+            // Therefore all items are at the same address,
+            // and any array size has capacity for infinitely many items.
+            // The capacity is limited by the bit width of the length field.
+            //
+            // `Vec` also does this:
+            // https://github.com/rust-lang/rust/blob/1.44.0/src/liballoc/raw_vec.rs#L186
+            //
+            // In our case, this also ensures that a smallvec of zero-size items never spills,
+            // and we never try to allocate zero bytes which `std::alloc::alloc` disallows.
+            core::usize::MAX
+        }
+    }
+
+    /// The maximum number of elements this vector can hold inline
+    #[inline]
+    pub fn inline_size(&self) -> usize {
+        Self::inline_capacity()
+    }
+
+    /// The number of elements stored in the vector
+    #[inline]
+    pub fn len(&self) -> usize {
+        self.triple().1
+    }
+
+    /// Returns `true` if the vector is empty
+    #[inline]
+    pub fn is_empty(&self) -> bool {
+        self.len() == 0
+    }
+
+    /// The number of items the vector can hold without reallocating
+    #[inline]
+    pub fn capacity(&self) -> usize {
+        self.triple().2
+    }
+
+    /// Returns a tuple with (data ptr, len, capacity)
+    /// Useful to get all `SmallVec` properties with a single check of the current storage variant.
+    #[inline]
+    fn triple(&self) -> (ConstNonNull<A::Item>, usize, usize) {
+        unsafe {
+            if self.spilled() {
+                let (ptr, len) = self.data.heap();
+                (ptr, len, self.capacity)
+            } else {
+                (self.data.inline(), self.capacity, Self::inline_capacity())
+            }
+        }
+    }
+
+    /// Returns a tuple with (data ptr, len ptr, capacity)
+    #[inline]
+    fn triple_mut(&mut self) -> (NonNull<A::Item>, &mut usize, usize) {
+        unsafe {
+            if self.spilled() {
+                let (ptr, len_ptr) = self.data.heap_mut();
+                (ptr, len_ptr, self.capacity)
+            } else {
+                (
+                    self.data.inline_mut(),
+                    &mut self.capacity,
+                    Self::inline_capacity(),
+                )
+            }
+        }
+    }
+
+    /// Returns `true` if the data has spilled into a separate heap-allocated buffer.
+    #[inline]
+    pub fn spilled(&self) -> bool {
+        self.capacity > Self::inline_capacity()
+    }
+
+    /// Creates a draining iterator that removes the specified range in the vector
+    /// and yields the removed items.
+    ///
+    /// Note 1: The element range is removed even if the iterator is only
+    /// partially consumed or not consumed at all.
+    ///
+    /// Note 2: It is unspecified how many elements are removed from the vector
+    /// if the `Drain` value is leaked.
+    ///
+    /// # Panics
+    ///
+    /// Panics if the starting point is greater than the end point or if
+    /// the end point is greater than the length of the vector.
+    pub fn drain<R>(&mut self, range: R) -> Drain<'_, A>
+    where
+        R: RangeBounds<usize>,
+    {
+        use core::ops::Bound::*;
+
+        let len = self.len();
+        let start = match range.start_bound() {
+            Included(&n) => n,
+            Excluded(&n) => n.checked_add(1).expect("Range start out of bounds"),
+            Unbounded => 0,
+        };
+        let end = match range.end_bound() {
+            Included(&n) => n.checked_add(1).expect("Range end out of bounds"),
+            Excluded(&n) => n,
+            Unbounded => len,
+        };
+
+        assert!(start <= end);
+        assert!(end <= len);
+
+        unsafe {
+            self.set_len(start);
+
+            let range_slice = slice::from_raw_parts(self.as_ptr().add(start), end - start);
+
+            Drain {
+                tail_start: end,
+                tail_len: len - end,
+                iter: range_slice.iter(),
+                // Since self is a &mut, passing it to a function would invalidate the slice iterator.
+                vec: NonNull::new_unchecked(self as *mut _),
+            }
+        }
+    }
+
+    #[cfg(feature = "drain_filter")]
+    /// Creates an iterator which uses a closure to determine if an element should be removed.
+    ///
+    /// If the closure returns true, the element is removed and yielded. If the closure returns
+    /// false, the element will remain in the vector and will not be yielded by the iterator.
+    ///
+    /// Using this method is equivalent to the following code:
+    /// ```
+    /// # use smallvec::SmallVec;
+    /// # let some_predicate = |x: &mut i32| { *x == 2 || *x == 3 || *x == 6 };
+    /// # let mut vec: SmallVec<[i32; 8]> = SmallVec::from_slice(&[1i32, 2, 3, 4, 5, 6]);
+    /// let mut i = 0;
+    /// while i < vec.len() {
+    ///     if some_predicate(&mut vec[i]) {
+    ///         let val = vec.remove(i);
+    ///         // your code here
+    ///     } else {
+    ///         i += 1;
+    ///     }
+    /// }
+    ///
+    /// # assert_eq!(vec, SmallVec::<[i32; 8]>::from_slice(&[1i32, 4, 5]));
+    /// ```
+    /// ///
+    /// But `drain_filter` is easier to use. `drain_filter` is also more efficient,
+    /// because it can backshift the elements of the array in bulk.
+    ///
+    /// Note that `drain_filter` also lets you mutate every element in the filter closure,
+    /// regardless of whether you choose to keep or remove it.
+    ///
+    /// # Examples
+    ///
+    /// Splitting an array into evens and odds, reusing the original allocation:
+    ///
+    /// ```
+    /// # use smallvec::SmallVec;
+    /// let mut numbers: SmallVec<[i32; 16]> = SmallVec::from_slice(&[1i32, 2, 3, 4, 5, 6, 8, 9, 11, 13, 14, 15]);
+    ///
+    /// let evens = numbers.drain_filter(|x| *x % 2 == 0).collect::<SmallVec<[i32; 16]>>();
+    /// let odds = numbers;
+    ///
+    /// assert_eq!(evens, SmallVec::<[i32; 16]>::from_slice(&[2i32, 4, 6, 8, 14]));
+    /// assert_eq!(odds, SmallVec::<[i32; 16]>::from_slice(&[1i32, 3, 5, 9, 11, 13, 15]));
+    /// ```
+    pub fn drain_filter<F>(&mut self, filter: F) -> DrainFilter<'_, A, F,>
+    where
+        F: FnMut(&mut A::Item) -> bool,
+    {
+        let old_len = self.len();
+
+        // Guard against us getting leaked (leak amplification)
+        unsafe {
+            self.set_len(0);
+        }
+
+        DrainFilter { vec: self, idx: 0, del: 0, old_len, pred: filter, panic_flag: false }
+    }
+
+    /// Append an item to the vector.
+    #[inline]
+    pub fn push(&mut self, value: A::Item) {
+        unsafe {
+            let (mut ptr, mut len, cap) = self.triple_mut();
+            if *len == cap {
+                self.reserve_one_unchecked();
+                let (heap_ptr, heap_len) = self.data.heap_mut();
+                ptr = heap_ptr;
+                len = heap_len;
+            }
+            ptr::write(ptr.as_ptr().add(*len), value);
+            *len += 1;
+        }
+    }
+
+    /// Remove an item from the end of the vector and return it, or None if empty.
+    #[inline]
+    pub fn pop(&mut self) -> Option<A::Item> {
+        unsafe {
+            let (ptr, len_ptr, _) = self.triple_mut();
+            let ptr: *const _ = ptr.as_ptr();
+            if *len_ptr == 0 {
+                return None;
+            }
+            let last_index = *len_ptr - 1;
+            *len_ptr = last_index;
+            Some(ptr::read(ptr.add(last_index)))
+        }
+    }
+
+    /// Moves all the elements of `other` into `self`, leaving `other` empty.
+    ///
+    /// # Example
+    ///
+    /// ```
+    /// # use smallvec::{SmallVec, smallvec};
+    /// let mut v0: SmallVec<[u8; 16]> = smallvec![1, 2, 3];
+    /// let mut v1: SmallVec<[u8; 32]> = smallvec![4, 5, 6];
+    /// v0.append(&mut v1);
+    /// assert_eq!(*v0, [1, 2, 3, 4, 5, 6]);
+    /// assert_eq!(*v1, []);
+    /// ```
+    pub fn append<B>(&mut self, other: &mut SmallVec<B>)
+    where
+        B: Array<Item = A::Item>,
+    {
+        self.extend(other.drain(..))
+    }
+
+    /// Re-allocate to set the capacity to `max(new_cap, inline_size())`.
+    ///
+    /// Panics if `new_cap` is less than the vector's length
+    /// or if the capacity computation overflows `usize`.
+    pub fn grow(&mut self, new_cap: usize) {
+        infallible(self.try_grow(new_cap))
+    }
+
+    /// Re-allocate to set the capacity to `max(new_cap, inline_size())`.
+    ///
+    /// Panics if `new_cap` is less than the vector's length
+    pub fn try_grow(&mut self, new_cap: usize) -> Result<(), CollectionAllocErr> {
+        unsafe {
+            let unspilled = !self.spilled();
+            let (ptr, &mut len, cap) = self.triple_mut();
+            assert!(new_cap >= len);
+            if new_cap <= Self::inline_capacity() {
+                if unspilled {
+                    return Ok(());
+                }
+                self.data = SmallVecData::from_inline(MaybeUninit::uninit());
+                ptr::copy_nonoverlapping(ptr.as_ptr(), self.data.inline_mut().as_ptr(), len);
+                self.capacity = len;
+                deallocate(ptr, cap);
+            } else if new_cap != cap {
+                let layout = layout_array::<A::Item>(new_cap)?;
+                debug_assert!(layout.size() > 0);
+                let new_alloc;
+                if unspilled {
+                    new_alloc = NonNull::new(alloc::alloc::alloc(layout))
+                        .ok_or(CollectionAllocErr::AllocErr { layout })?
+                        .cast();
+                    ptr::copy_nonoverlapping(ptr.as_ptr(), new_alloc.as_ptr(), len);
+                } else {
+                    // This should never fail since the same succeeded
+                    // when previously allocating `ptr`.
+                    let old_layout = layout_array::<A::Item>(cap)?;
+
+                    let new_ptr =
+                        alloc::alloc::realloc(ptr.as_ptr() as *mut u8, old_layout, layout.size());
+                    new_alloc = NonNull::new(new_ptr)
+                        .ok_or(CollectionAllocErr::AllocErr { layout })?
+                        .cast();
+                }
+                self.data = SmallVecData::from_heap(new_alloc, len);
+                self.capacity = new_cap;
+            }
+            Ok(())
+        }
+    }
+
+    /// Reserve capacity for `additional` more elements to be inserted.
+    ///
+    /// May reserve more space to avoid frequent reallocations.
+    ///
+    /// Panics if the capacity computation overflows `usize`.
+    #[inline]
+    pub fn reserve(&mut self, additional: usize) {
+        infallible(self.try_reserve(additional))
+    }
+
+    /// Internal method used to grow in push() and insert(), where we know already we have to grow.
+    #[cold]
+    fn reserve_one_unchecked(&mut self) {
+        debug_assert_eq!(self.len(), self.capacity());
+        let new_cap = self.len()
+            .checked_add(1)
+            .and_then(usize::checked_next_power_of_two)
+            .expect("capacity overflow");
+        infallible(self.try_grow(new_cap))
+    }
+
+    /// Reserve capacity for `additional` more elements to be inserted.
+    ///
+    /// May reserve more space to avoid frequent reallocations.
+    pub fn try_reserve(&mut self, additional: usize) -> Result<(), CollectionAllocErr> {
+        // prefer triple_mut() even if triple() would work so that the optimizer removes duplicated
+        // calls to it from callers.
+        let (_, &mut len, cap) = self.triple_mut();
+        if cap - len >= additional {
+            return Ok(());
+        }
+        let new_cap = len
+            .checked_add(additional)
+            .and_then(usize::checked_next_power_of_two)
+            .ok_or(CollectionAllocErr::CapacityOverflow)?;
+        self.try_grow(new_cap)
+    }
+
+    /// Reserve the minimum capacity for `additional` more elements to be inserted.
+    ///
+    /// Panics if the new capacity overflows `usize`.
+    pub fn reserve_exact(&mut self, additional: usize) {
+        infallible(self.try_reserve_exact(additional))
+    }
+
+    /// Reserve the minimum capacity for `additional` more elements to be inserted.
+    pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), CollectionAllocErr> {
+        let (_, &mut len, cap) = self.triple_mut();
+        if cap - len >= additional {
+            return Ok(());
+        }
+        let new_cap = len
+            .checked_add(additional)
+            .ok_or(CollectionAllocErr::CapacityOverflow)?;
+        self.try_grow(new_cap)
+    }
+
+    /// Shrink the capacity of the vector as much as possible.
+    ///
+    /// When possible, this will move data from an external heap buffer to the vector's inline
+    /// storage.
+    pub fn shrink_to_fit(&mut self) {
+        if !self.spilled() {
+            return;
+        }
+        let len = self.len();
+        if self.inline_size() >= len {
+            unsafe {
+                let (ptr, len) = self.data.heap();
+                self.data = SmallVecData::from_inline(MaybeUninit::uninit());
+                ptr::copy_nonoverlapping(ptr.as_ptr(), self.data.inline_mut().as_ptr(), len);
+                deallocate(ptr.0, self.capacity);
+                self.capacity = len;
+            }
+        } else if self.capacity() > len {
+            self.grow(len);
+        }
+    }
+
+    /// Shorten the vector, keeping the first `len` elements and dropping the rest.
+    ///
+    /// If `len` is greater than or equal to the vector's current length, this has no
+    /// effect.
+    ///
+    /// This does not re-allocate.  If you want the vector's capacity to shrink, call
+    /// `shrink_to_fit` after truncating.
+    pub fn truncate(&mut self, len: usize) {
+        unsafe {
+            let (ptr, len_ptr, _) = self.triple_mut();
+            let ptr = ptr.as_ptr();
+            while len < *len_ptr {
+                let last_index = *len_ptr - 1;
+                *len_ptr = last_index;
+                ptr::drop_in_place(ptr.add(last_index));
+            }
+        }
+    }
+
+    /// Extracts a slice containing the entire vector.
+    ///
+    /// Equivalent to `&s[..]`.
+    pub fn as_slice(&self) -> &[A::Item] {
+        self
+    }
+
+    /// Extracts a mutable slice of the entire vector.
+    ///
+    /// Equivalent to `&mut s[..]`.
+    pub fn as_mut_slice(&mut self) -> &mut [A::Item] {
+        self
+    }
+
+    /// Remove the element at position `index`, replacing it with the last element.
+    ///
+    /// This does not preserve ordering, but is O(1).
+    ///
+    /// Panics if `index` is out of bounds.
+    #[inline]
+    pub fn swap_remove(&mut self, index: usize) -> A::Item {
+        let len = self.len();
+        self.swap(len - 1, index);
+        self.pop()
+            .unwrap_or_else(|| unsafe { unreachable_unchecked() })
+    }
+
+    /// Remove all elements from the vector.
+    #[inline]
+    pub fn clear(&mut self) {
+        self.truncate(0);
+    }
+
+    /// Remove and return the element at position `index`, shifting all elements after it to the
+    /// left.
+    ///
+    /// Panics if `index` is out of bounds.
+    pub fn remove(&mut self, index: usize) -> A::Item {
+        unsafe {
+            let (ptr, len_ptr, _) = self.triple_mut();
+            let len = *len_ptr;
+            assert!(index < len);
+            *len_ptr = len - 1;
+            let ptr = ptr.as_ptr().add(index);
+            let item = ptr::read(ptr);
+            ptr::copy(ptr.add(1), ptr, len - index - 1);
+            item
+        }
+    }
+
+    /// Insert an element at position `index`, shifting all elements after it to the right.
+    ///
+    /// Panics if `index > len`.
+    pub fn insert(&mut self, index: usize, element: A::Item) {
+        unsafe {
+            let (mut ptr, mut len_ptr, cap) = self.triple_mut();
+            if *len_ptr == cap {
+                self.reserve_one_unchecked();
+                let (heap_ptr, heap_len_ptr) = self.data.heap_mut();
+                ptr = heap_ptr;
+                len_ptr = heap_len_ptr;
+            }
+            let mut ptr = ptr.as_ptr();
+            let len = *len_ptr;
+            ptr = ptr.add(index);
+            if index < len {
+                ptr::copy(ptr, ptr.add(1), len - index);
+            } else if index == len {
+                // No elements need shifting.
+            } else {
+                panic!("index exceeds length");
+            }
+            *len_ptr = len + 1;
+            ptr::write(ptr, element);
+        }
+    }
+
+    /// Insert multiple elements at position `index`, shifting all following elements toward the
+    /// back.
+    pub fn insert_many<I: IntoIterator<Item = A::Item>>(&mut self, index: usize, iterable: I) {
+        let mut iter = iterable.into_iter();
+        if index == self.len() {
+            return self.extend(iter);
+        }
+
+        let (lower_size_bound, _) = iter.size_hint();
+        assert!(lower_size_bound <= core::isize::MAX as usize); // Ensure offset is indexable
+        assert!(index + lower_size_bound >= index); // Protect against overflow
+
+        let mut num_added = 0;
+        let old_len = self.len();
+        assert!(index <= old_len);
+
+        unsafe {
+            // Reserve space for `lower_size_bound` elements.
+            self.reserve(lower_size_bound);
+            let start = self.as_mut_ptr();
+            let ptr = start.add(index);
+
+            // Move the trailing elements.
+            ptr::copy(ptr, ptr.add(lower_size_bound), old_len - index);
+
+            // In case the iterator panics, don't double-drop the items we just copied above.
+            self.set_len(0);
+            let mut guard = DropOnPanic {
+                start,
+                skip: index..(index + lower_size_bound),
+                len: old_len + lower_size_bound,
+            };
+
+            // The set_len above invalidates the previous pointers, so we must re-create them.
+            let start = self.as_mut_ptr();
+            let ptr = start.add(index);
+
+            while num_added < lower_size_bound {
+                let element = match iter.next() {
+                    Some(x) => x,
+                    None => break,
+                };
+                let cur = ptr.add(num_added);
+                ptr::write(cur, element);
+                guard.skip.start += 1;
+                num_added += 1;
+            }
+
+            if num_added < lower_size_bound {
+                // Iterator provided fewer elements than the hint. Move the tail backward.
+                ptr::copy(
+                    ptr.add(lower_size_bound),
+                    ptr.add(num_added),
+                    old_len - index,
+                );
+            }
+            // There are no more duplicate or uninitialized slots, so the guard is not needed.
+            self.set_len(old_len + num_added);
+            mem::forget(guard);
+        }
+
+        // Insert any remaining elements one-by-one.
+        for element in iter {
+            self.insert(index + num_added, element);
+            num_added += 1;
+        }
+
+        struct DropOnPanic<T> {
+            start: *mut T,
+            skip: Range<usize>, // Space we copied-out-of, but haven't written-to yet.
+            len: usize,
+        }
+
+        impl<T> Drop for DropOnPanic<T> {
+            fn drop(&mut self) {
+                for i in 0..self.len {
+                    if !self.skip.contains(&i) {
+                        unsafe {
+                            ptr::drop_in_place(self.start.add(i));
+                        }
+                    }
+                }
+            }
+        }
+    }
+
+    /// Convert a `SmallVec` to a `Vec`, without reallocating if the `SmallVec` has already spilled onto
+    /// the heap.
+    pub fn into_vec(mut self) -> Vec<A::Item> {
+        if self.spilled() {
+            unsafe {
+                let (ptr, &mut len) = self.data.heap_mut();
+                let v = Vec::from_raw_parts(ptr.as_ptr(), len, self.capacity);
+                mem::forget(self);
+                v
+            }
+        } else {
+            self.into_iter().collect()
+        }
+    }
+
+    /// Converts a `SmallVec` into a `Box<[T]>` without reallocating if the `SmallVec` has already spilled
+    /// onto the heap.
+    ///
+    /// Note that this will drop any excess capacity.
+    pub fn into_boxed_slice(self) -> Box<[A::Item]> {
+        self.into_vec().into_boxed_slice()
+    }
+
+    /// Convert the `SmallVec` into an `A` if possible. Otherwise return `Err(Self)`.
+    ///
+    /// This method returns `Err(Self)` if the `SmallVec` is too short (and the `A` contains uninitialized elements),
+    /// or if the `SmallVec` is too long (and all the elements were spilled to the heap).
+    pub fn into_inner(self) -> Result<A, Self> {
+        if self.spilled() || self.len() != A::size() {
+            // Note: A::size, not Self::inline_capacity
+            Err(self)
+        } else {
+            unsafe {
+                let data = ptr::read(&self.data);
+                mem::forget(self);
+                Ok(data.into_inline().assume_init())
+            }
+        }
+    }
+
+    /// Retains only the elements specified by the predicate.
+    ///
+    /// In other words, remove all elements `e` such that `f(&e)` returns `false`.
+    /// This method operates in place and preserves the order of the retained
+    /// elements.
+    pub fn retain<F: FnMut(&mut A::Item) -> bool>(&mut self, mut f: F) {
+        let mut del = 0;
+        let len = self.len();
+        for i in 0..len {
+            if !f(&mut self[i]) {
+                del += 1;
+            } else if del > 0 {
+                self.swap(i - del, i);
+            }
+        }
+        self.truncate(len - del);
+    }
+
+    /// Retains only the elements specified by the predicate.
+    ///
+    /// This method is identical in behaviour to [`retain`]; it is included only
+    /// to maintain api-compatability with `std::Vec`, where the methods are
+    /// separate for historical reasons.
+    pub fn retain_mut<F: FnMut(&mut A::Item) -> bool>(&mut self, f: F) {
+        self.retain(f)
+    }
+
+    /// Removes consecutive duplicate elements.
+    pub fn dedup(&mut self)
+    where
+        A::Item: PartialEq<A::Item>,
+    {
+        self.dedup_by(|a, b| a == b);
+    }
+
+    /// Removes consecutive duplicate elements using the given equality relation.
+    pub fn dedup_by<F>(&mut self, mut same_bucket: F)
+    where
+        F: FnMut(&mut A::Item, &mut A::Item) -> bool,
+    {
+        // See the implementation of Vec::dedup_by in the
+        // standard library for an explanation of this algorithm.
+        let len = self.len();
+        if len <= 1 {
+            return;
+        }
+
+        let ptr = self.as_mut_ptr();
+        let mut w: usize = 1;
+
+        unsafe {
+            for r in 1..len {
+                let p_r = ptr.add(r);
+                let p_wm1 = ptr.add(w - 1);
+                if !same_bucket(&mut *p_r, &mut *p_wm1) {
+                    if r != w {
+                        let p_w = p_wm1.add(1);
+                        mem::swap(&mut *p_r, &mut *p_w);
+                    }
+                    w += 1;
+                }
+            }
+        }
+
+        self.truncate(w);
+    }
+
+    /// Removes consecutive elements that map to the same key.
+    pub fn dedup_by_key<F, K>(&mut self, mut key: F)
+    where
+        F: FnMut(&mut A::Item) -> K,
+        K: PartialEq<K>,
+    {
+        self.dedup_by(|a, b| key(a) == key(b));
+    }
+
+    /// Resizes the `SmallVec` in-place so that `len` is equal to `new_len`.
+    ///
+    /// If `new_len` is greater than `len`, the `SmallVec` is extended by the difference, with each
+    /// additional slot filled with the result of calling the closure `f`. The return values from `f`
+    /// will end up in the `SmallVec` in the order they have been generated.
+    ///
+    /// If `new_len` is less than `len`, the `SmallVec` is simply truncated.
+    ///
+    /// This method uses a closure to create new values on every push. If you'd rather `Clone` a given
+    /// value, use `resize`. If you want to use the `Default` trait to generate values, you can pass
+    /// `Default::default()` as the second argument.
+    ///
+    /// Added for `std::vec::Vec` compatibility (added in Rust 1.33.0)
+    ///
+    /// ```
+    /// # use smallvec::{smallvec, SmallVec};
+    /// let mut vec : SmallVec<[_; 4]> = smallvec![1, 2, 3];
+    /// vec.resize_with(5, Default::default);
+    /// assert_eq!(&*vec, &[1, 2, 3, 0, 0]);
+    ///
+    /// let mut vec : SmallVec<[_; 4]> = smallvec![];
+    /// let mut p = 1;
+    /// vec.resize_with(4, || { p *= 2; p });
+    /// assert_eq!(&*vec, &[2, 4, 8, 16]);
+    /// ```
+    pub fn resize_with<F>(&mut self, new_len: usize, f: F)
+    where
+        F: FnMut() -> A::Item,
+    {
+        let old_len = self.len();
+        if old_len < new_len {
+            let mut f = f;
+            let additional = new_len - old_len;
+            self.reserve(additional);
+            for _ in 0..additional {
+                self.push(f());
+            }
+        } else if old_len > new_len {
+            self.truncate(new_len);
+        }
+    }
+
+    /// Creates a `SmallVec` directly from the raw components of another
+    /// `SmallVec`.
+    ///
+    /// # Safety
+    ///
+    /// This is highly unsafe, due to the number of invariants that aren't
+    /// checked:
+    ///
+    /// * `ptr` needs to have been previously allocated via `SmallVec` for its
+    ///   spilled storage (at least, it's highly likely to be incorrect if it
+    ///   wasn't).
+    /// * `ptr`'s `A::Item` type needs to be the same size and alignment that
+    ///   it was allocated with
+    /// * `length` needs to be less than or equal to `capacity`.
+    /// * `capacity` needs to be the capacity that the pointer was allocated
+    ///   with.
+    ///
+    /// Violating these may cause problems like corrupting the allocator's
+    /// internal data structures.
+    ///
+    /// Additionally, `capacity` must be greater than the amount of inline
+    /// storage `A` has; that is, the new `SmallVec` must need to spill over
+    /// into heap allocated storage. This condition is asserted against.
+    ///
+    /// The ownership of `ptr` is effectively transferred to the
+    /// `SmallVec` which may then deallocate, reallocate or change the
+    /// contents of memory pointed to by the pointer at will. Ensure
+    /// that nothing else uses the pointer after calling this
+    /// function.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use smallvec::{smallvec, SmallVec};
+    /// use std::mem;
+    /// use std::ptr;
+    ///
+    /// fn main() {
+    ///     let mut v: SmallVec<[_; 1]> = smallvec![1, 2, 3];
+    ///
+    ///     // Pull out the important parts of `v`.
+    ///     let p = v.as_mut_ptr();
+    ///     let len = v.len();
+    ///     let cap = v.capacity();
+    ///     let spilled = v.spilled();
+    ///
+    ///     unsafe {
+    ///         // Forget all about `v`. The heap allocation that stored the
+    ///         // three values won't be deallocated.
+    ///         mem::forget(v);
+    ///
+    ///         // Overwrite memory with [4, 5, 6].
+    ///         //
+    ///         // This is only safe if `spilled` is true! Otherwise, we are
+    ///         // writing into the old `SmallVec`'s inline storage on the
+    ///         // stack.
+    ///         assert!(spilled);
+    ///         for i in 0..len {
+    ///             ptr::write(p.add(i), 4 + i);
+    ///         }
+    ///
+    ///         // Put everything back together into a SmallVec with a different
+    ///         // amount of inline storage, but which is still less than `cap`.
+    ///         let rebuilt = SmallVec::<[_; 2]>::from_raw_parts(p, len, cap);
+    ///         assert_eq!(&*rebuilt, &[4, 5, 6]);
+    ///     }
+    /// }
+    #[inline]
+    pub unsafe fn from_raw_parts(ptr: *mut A::Item, length: usize, capacity: usize) -> SmallVec<A> {
+        // SAFETY: We require caller to provide same ptr as we alloc
+        // and we never alloc null pointer.
+        let ptr = unsafe {
+            debug_assert!(!ptr.is_null(), "Called `from_raw_parts` with null pointer.");
+            NonNull::new_unchecked(ptr)
+        };
+        assert!(capacity > Self::inline_capacity());
+        SmallVec {
+            capacity,
+            data: SmallVecData::from_heap(ptr, length),
+        }
+    }
+
+    /// Returns a raw pointer to the vector's buffer.
+    pub fn as_ptr(&self) -> *const A::Item {
+        // We shadow the slice method of the same name to avoid going through
+        // `deref`, which creates an intermediate reference that may place
+        // additional safety constraints on the contents of the slice.
+        self.triple().0.as_ptr()
+    }
+
+    /// Returns a raw mutable pointer to the vector's buffer.
+    pub fn as_mut_ptr(&mut self) -> *mut A::Item {
+        // We shadow the slice method of the same name to avoid going through
+        // `deref_mut`, which creates an intermediate reference that may place
+        // additional safety constraints on the contents of the slice.
+        self.triple_mut().0.as_ptr()
+    }
+}
+
+impl<A: Array> SmallVec<A>
+where
+    A::Item: Copy,
+{
+    /// Copy the elements from a slice into a new `SmallVec`.
+    ///
+    /// For slices of `Copy` types, this is more efficient than `SmallVec::from(slice)`.
+    pub fn from_slice(slice: &[A::Item]) -> Self {
+        let len = slice.len();
+        if len <= Self::inline_capacity() {
+            SmallVec {
+                capacity: len,
+                data: SmallVecData::from_inline(unsafe {
+                    let mut data: MaybeUninit<A> = MaybeUninit::uninit();
+                    ptr::copy_nonoverlapping(
+                        slice.as_ptr(),
+                        data.as_mut_ptr() as *mut A::Item,
+                        len,
+                    );
+                    data
+                }),
+            }
+        } else {
+            let mut b = slice.to_vec();
+            let cap = b.capacity();
+            let ptr = NonNull::new(b.as_mut_ptr()).expect("Vec always contain non null pointers.");
+            mem::forget(b);
+            SmallVec {
+                capacity: cap,
+                data: SmallVecData::from_heap(ptr, len),
+            }
+        }
+    }
+
+    /// Copy elements from a slice into the vector at position `index`, shifting any following
+    /// elements toward the back.
+    ///
+    /// For slices of `Copy` types, this is more efficient than `insert`.
+    #[inline]
+    pub fn insert_from_slice(&mut self, index: usize, slice: &[A::Item]) {
+        self.reserve(slice.len());
+
+        let len = self.len();
+        assert!(index <= len);
+
+        unsafe {
+            let slice_ptr = slice.as_ptr();
+            let ptr = self.as_mut_ptr().add(index);
+            ptr::copy(ptr, ptr.add(slice.len()), len - index);
+            ptr::copy_nonoverlapping(slice_ptr, ptr, slice.len());
+            self.set_len(len + slice.len());
+        }
+    }
+
+    /// Copy elements from a slice and append them to the vector.
+    ///
+    /// For slices of `Copy` types, this is more efficient than `extend`.
+    #[inline]
+    pub fn extend_from_slice(&mut self, slice: &[A::Item]) {
+        let len = self.len();
+        self.insert_from_slice(len, slice);
+    }
+}
+
+impl<A: Array> SmallVec<A>
+where
+    A::Item: Clone,
+{
+    /// Resizes the vector so that its length is equal to `len`.
+    ///
+    /// If `len` is less than the current length, the vector simply truncated.
+    ///
+    /// If `len` is greater than the current length, `value` is appended to the
+    /// vector until its length equals `len`.
+    pub fn resize(&mut self, len: usize, value: A::Item) {
+        let old_len = self.len();
+
+        if len > old_len {
+            self.extend(repeat(value).take(len - old_len));
+        } else {
+            self.truncate(len);
+        }
+    }
+
+    /// Creates a `SmallVec` with `n` copies of `elem`.
+    /// ```
+    /// use smallvec::SmallVec;
+    ///
+    /// let v = SmallVec::<[char; 128]>::from_elem('d', 2);
+    /// assert_eq!(v, SmallVec::from_buf(['d', 'd']));
+    /// ```
+    pub fn from_elem(elem: A::Item, n: usize) -> Self {
+        if n > Self::inline_capacity() {
+            vec![elem; n].into()
+        } else {
+            let mut v = SmallVec::<A>::new();
+            unsafe {
+                let (ptr, len_ptr, _) = v.triple_mut();
+                let ptr = ptr.as_ptr();
+                let mut local_len = SetLenOnDrop::new(len_ptr);
+
+                for i in 0..n {
+                    ::core::ptr::write(ptr.add(i), elem.clone());
+                    local_len.increment_len(1);
+                }
+            }
+            v
+        }
+    }
+}
+
+impl<A: Array> ops::Deref for SmallVec<A> {
+    type Target = [A::Item];
+    #[inline]
+    fn deref(&self) -> &[A::Item] {
+        unsafe {
+            let (ptr, len, _) = self.triple();
+            slice::from_raw_parts(ptr.as_ptr(), len)
+        }
+    }
+}
+
+impl<A: Array> ops::DerefMut for SmallVec<A> {
+    #[inline]
+    fn deref_mut(&mut self) -> &mut [A::Item] {
+        unsafe {
+            let (ptr, &mut len, _) = self.triple_mut();
+            slice::from_raw_parts_mut(ptr.as_ptr(), len)
+        }
+    }
+}
+
+impl<A: Array> AsRef<[A::Item]> for SmallVec<A> {
+    #[inline]
+    fn as_ref(&self) -> &[A::Item] {
+        self
+    }
+}
+
+impl<A: Array> AsMut<[A::Item]> for SmallVec<A> {
+    #[inline]
+    fn as_mut(&mut self) -> &mut [A::Item] {
+        self
+    }
+}
+
+impl<A: Array> Borrow<[A::Item]> for SmallVec<A> {
+    #[inline]
+    fn borrow(&self) -> &[A::Item] {
+        self
+    }
+}
+
+impl<A: Array> BorrowMut<[A::Item]> for SmallVec<A> {
+    #[inline]
+    fn borrow_mut(&mut self) -> &mut [A::Item] {
+        self
+    }
+}
+
+#[cfg(feature = "write")]
+#[cfg_attr(docsrs, doc(cfg(feature = "write")))]
+impl<A: Array<Item = u8>> io::Write for SmallVec<A> {
+    #[inline]
+    fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+        self.extend_from_slice(buf);
+        Ok(buf.len())
+    }
+
+    #[inline]
+    fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
+        self.extend_from_slice(buf);
+        Ok(())
+    }
+
+    #[inline]
+    fn flush(&mut self) -> io::Result<()> {
+        Ok(())
+    }
+}
+
+#[cfg(feature = "serde")]
+#[cfg_attr(docsrs, doc(cfg(feature = "serde")))]
+impl<A: Array> Serialize for SmallVec<A>
+where
+    A::Item: Serialize,
+{
+    fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
+        let mut state = serializer.serialize_seq(Some(self.len()))?;
+        for item in self {
+            state.serialize_element(&item)?;
+        }
+        state.end()
+    }
+}
+
+#[cfg(feature = "serde")]
+#[cfg_attr(docsrs, doc(cfg(feature = "serde")))]
+impl<'de, A: Array> Deserialize<'de> for SmallVec<A>
+where
+    A::Item: Deserialize<'de>,
+{
+    fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
+        deserializer.deserialize_seq(SmallVecVisitor {
+            phantom: PhantomData,
+        })
+    }
+}
+
+#[cfg(feature = "serde")]
+struct SmallVecVisitor<A> {
+    phantom: PhantomData<A>,
+}
+
+#[cfg(feature = "serde")]
+impl<'de, A: Array> Visitor<'de> for SmallVecVisitor<A>
+where
+    A::Item: Deserialize<'de>,
+{
+    type Value = SmallVec<A>;
+
+    fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
+        formatter.write_str("a sequence")
+    }
+
+    fn visit_seq<B>(self, mut seq: B) -> Result<Self::Value, B::Error>
+    where
+        B: SeqAccess<'de>,
+    {
+        use serde::de::Error;
+        let len = seq.size_hint().unwrap_or(0);
+        let mut values = SmallVec::new();
+        values.try_reserve(len).map_err(B::Error::custom)?;
+
+        while let Some(value) = seq.next_element()? {
+            values.push(value);
+        }
+
+        Ok(values)
+    }
+}
+
+#[cfg(feature = "specialization")]
+trait SpecFrom<A: Array, S> {
+    fn spec_from(slice: S) -> SmallVec<A>;
+}
+
+#[cfg(feature = "specialization")]
+mod specialization;
+
+#[cfg(feature = "arbitrary")]
+mod arbitrary;
+
+#[cfg(feature = "specialization")]
+impl<'a, A: Array> SpecFrom<A, &'a [A::Item]> for SmallVec<A>
+where
+    A::Item: Copy,
+{
+    #[inline]
+    fn spec_from(slice: &'a [A::Item]) -> SmallVec<A> {
+        SmallVec::from_slice(slice)
+    }
+}
+
+impl<'a, A: Array> From<&'a [A::Item]> for SmallVec<A>
+where
+    A::Item: Clone,
+{
+    #[cfg(not(feature = "specialization"))]
+    #[inline]
+    fn from(slice: &'a [A::Item]) -> SmallVec<A> {
+        slice.iter().cloned().collect()
+    }
+
+    #[cfg(feature = "specialization")]
+    #[inline]
+    fn from(slice: &'a [A::Item]) -> SmallVec<A> {
+        SmallVec::spec_from(slice)
+    }
+}
+
+impl<A: Array> From<Vec<A::Item>> for SmallVec<A> {
+    #[inline]
+    fn from(vec: Vec<A::Item>) -> SmallVec<A> {
+        SmallVec::from_vec(vec)
+    }
+}
+
+impl<A: Array> From<A> for SmallVec<A> {
+    #[inline]
+    fn from(array: A) -> SmallVec<A> {
+        SmallVec::from_buf(array)
+    }
+}
+
+impl<A: Array, I: SliceIndex<[A::Item]>> ops::Index<I> for SmallVec<A> {
+    type Output = I::Output;
+
+    fn index(&self, index: I) -> &I::Output {
+        &(**self)[index]
+    }
+}
+
+impl<A: Array, I: SliceIndex<[A::Item]>> ops::IndexMut<I> for SmallVec<A> {
+    fn index_mut(&mut self, index: I) -> &mut I::Output {
+        &mut (&mut **self)[index]
+    }
+}
+
+#[allow(deprecated)]
+impl<A: Array> ExtendFromSlice<A::Item> for SmallVec<A>
+where
+    A::Item: Copy,
+{
+    fn extend_from_slice(&mut self, other: &[A::Item]) {
+        SmallVec::extend_from_slice(self, other)
+    }
+}
+
+impl<A: Array> FromIterator<A::Item> for SmallVec<A> {
+    #[inline]
+    fn from_iter<I: IntoIterator<Item = A::Item>>(iterable: I) -> SmallVec<A> {
+        let mut v = SmallVec::new();
+        v.extend(iterable);
+        v
+    }
+}
+
+impl<A: Array> Extend<A::Item> for SmallVec<A> {
+    fn extend<I: IntoIterator<Item = A::Item>>(&mut self, iterable: I) {
+        let mut iter = iterable.into_iter();
+        let (lower_size_bound, _) = iter.size_hint();
+        self.reserve(lower_size_bound);
+
+        unsafe {
+            let (ptr, len_ptr, cap) = self.triple_mut();
+            let ptr = ptr.as_ptr();
+            let mut len = SetLenOnDrop::new(len_ptr);
+            while len.get() < cap {
+                if let Some(out) = iter.next() {
+                    ptr::write(ptr.add(len.get()), out);
+                    len.increment_len(1);
+                } else {
+                    return;
+                }
+            }
+        }
+
+        for elem in iter {
+            self.push(elem);
+        }
+    }
+}
+
+impl<A: Array> fmt::Debug for SmallVec<A>
+where
+    A::Item: fmt::Debug,
+{
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        f.debug_list().entries(self.iter()).finish()
+    }
+}
+
+impl<A: Array> Default for SmallVec<A> {
+    #[inline]
+    fn default() -> SmallVec<A> {
+        SmallVec::new()
+    }
+}
+
+#[cfg(feature = "may_dangle")]
+unsafe impl<#[may_dangle] A: Array> Drop for SmallVec<A> {
+    fn drop(&mut self) {
+        unsafe {
+            if self.spilled() {
+                let (ptr, &mut len) = self.data.heap_mut();
+                Vec::from_raw_parts(ptr.as_ptr(), len, self.capacity);
+            } else {
+                ptr::drop_in_place(&mut self[..]);
+            }
+        }
+    }
+}
+
+#[cfg(not(feature = "may_dangle"))]
+impl<A: Array> Drop for SmallVec<A> {
+    fn drop(&mut self) {
+        unsafe {
+            if self.spilled() {
+                let (ptr, &mut len) = self.data.heap_mut();
+                drop(Vec::from_raw_parts(ptr.as_ptr(), len, self.capacity));
+            } else {
+                ptr::drop_in_place(&mut self[..]);
+            }
+        }
+    }
+}
+
+impl<A: Array> Clone for SmallVec<A>
+where
+    A::Item: Clone,
+{
+    #[inline]
+    fn clone(&self) -> SmallVec<A> {
+        SmallVec::from(self.as_slice())
+    }
+
+    fn clone_from(&mut self, source: &Self) {
+        // Inspired from `impl Clone for Vec`.
+
+        // drop anything that will not be overwritten
+        self.truncate(source.len());
+
+        // self.len <= other.len due to the truncate above, so the
+        // slices here are always in-bounds.
+        let (init, tail) = source.split_at(self.len());
+
+        // reuse the contained values' allocations/resources.
+        self.clone_from_slice(init);
+        self.extend(tail.iter().cloned());
+    }
+}
+
+impl<A: Array, B: Array> PartialEq<SmallVec<B>> for SmallVec<A>
+where
+    A::Item: PartialEq<B::Item>,
+{
+    #[inline]
+    fn eq(&self, other: &SmallVec<B>) -> bool {
+        self[..] == other[..]
+    }
+}
+
+impl<A: Array> Eq for SmallVec<A> where A::Item: Eq {}
+
+impl<A: Array> PartialOrd for SmallVec<A>
+where
+    A::Item: PartialOrd,
+{
+    #[inline]
+    fn partial_cmp(&self, other: &SmallVec<A>) -> Option<cmp::Ordering> {
+        PartialOrd::partial_cmp(&**self, &**other)
+    }
+}
+
+impl<A: Array> Ord for SmallVec<A>
+where
+    A::Item: Ord,
+{
+    #[inline]
+    fn cmp(&self, other: &SmallVec<A>) -> cmp::Ordering {
+        Ord::cmp(&**self, &**other)
+    }
+}
+
+impl<A: Array> Hash for SmallVec<A>
+where
+    A::Item: Hash,
+{
+    fn hash<H: Hasher>(&self, state: &mut H) {
+        (**self).hash(state)
+    }
+}
+
+unsafe impl<A: Array> Send for SmallVec<A> where A::Item: Send {}
+
+/// An iterator that consumes a `SmallVec` and yields its items by value.
+///
+/// Returned from [`SmallVec::into_iter`][1].
+///
+/// [1]: struct.SmallVec.html#method.into_iter
+pub struct IntoIter<A: Array> {
+    data: SmallVec<A>,
+    current: usize,
+    end: usize,
+}
+
+impl<A: Array> fmt::Debug for IntoIter<A>
+where
+    A::Item: fmt::Debug,
+{
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        f.debug_tuple("IntoIter").field(&self.as_slice()).finish()
+    }
+}
+
+impl<A: Array + Clone> Clone for IntoIter<A>
+where
+    A::Item: Clone,
+{
+    fn clone(&self) -> IntoIter<A> {
+        SmallVec::from(self.as_slice()).into_iter()
+    }
+}
+
+impl<A: Array> Drop for IntoIter<A> {
+    fn drop(&mut self) {
+        for _ in self {}
+    }
+}
+
+impl<A: Array> Iterator for IntoIter<A> {
+    type Item = A::Item;
+
+    #[inline]
+    fn next(&mut self) -> Option<A::Item> {
+        if self.current == self.end {
+            None
+        } else {
+            unsafe {
+                let current = self.current;
+                self.current += 1;
+                Some(ptr::read(self.data.as_ptr().add(current)))
+            }
+        }
+    }
+
+    #[inline]
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        let size = self.end - self.current;
+        (size, Some(size))
+    }
+}
+
+impl<A: Array> DoubleEndedIterator for IntoIter<A> {
+    #[inline]
+    fn next_back(&mut self) -> Option<A::Item> {
+        if self.current == self.end {
+            None
+        } else {
+            unsafe {
+                self.end -= 1;
+                Some(ptr::read(self.data.as_ptr().add(self.end)))
+            }
+        }
+    }
+}
+
+impl<A: Array> ExactSizeIterator for IntoIter<A> {}
+impl<A: Array> FusedIterator for IntoIter<A> {}
+
+impl<A: Array> IntoIter<A> {
+    /// Returns the remaining items of this iterator as a slice.
+    pub fn as_slice(&self) -> &[A::Item] {
+        let len = self.end - self.current;
+        unsafe { core::slice::from_raw_parts(self.data.as_ptr().add(self.current), len) }
+    }
+
+    /// Returns the remaining items of this iterator as a mutable slice.
+    pub fn as_mut_slice(&mut self) -> &mut [A::Item] {
+        let len = self.end - self.current;
+        unsafe { core::slice::from_raw_parts_mut(self.data.as_mut_ptr().add(self.current), len) }
+    }
+}
+
+impl<A: Array> IntoIterator for SmallVec<A> {
+    type IntoIter = IntoIter<A>;
+    type Item = A::Item;
+    fn into_iter(mut self) -> Self::IntoIter {
+        unsafe {
+            // Set SmallVec len to zero as `IntoIter` drop handles dropping of the elements
+            let len = self.len();
+            self.set_len(0);
+            IntoIter {
+                data: self,
+                current: 0,
+                end: len,
+            }
+        }
+    }
+}
+
+impl<'a, A: Array> IntoIterator for &'a SmallVec<A> {
+    type IntoIter = slice::Iter<'a, A::Item>;
+    type Item = &'a A::Item;
+    fn into_iter(self) -> Self::IntoIter {
+        self.iter()
+    }
+}
+
+impl<'a, A: Array> IntoIterator for &'a mut SmallVec<A> {
+    type IntoIter = slice::IterMut<'a, A::Item>;
+    type Item = &'a mut A::Item;
+    fn into_iter(self) -> Self::IntoIter {
+        self.iter_mut()
+    }
+}
+
+/// Types that can be used as the backing store for a [`SmallVec`].
+pub unsafe trait Array {
+    /// The type of the array's elements.
+    type Item;
+    /// Returns the number of items the array can hold.
+    fn size() -> usize;
+}
+
+/// Set the length of the vec when the `SetLenOnDrop` value goes out of scope.
+///
+/// Copied from <https://github.com/rust-lang/rust/pull/36355>
+struct SetLenOnDrop<'a> {
+    len: &'a mut usize,
+    local_len: usize,
+}
+
+impl<'a> SetLenOnDrop<'a> {
+    #[inline]
+    fn new(len: &'a mut usize) -> Self {
+        SetLenOnDrop {
+            local_len: *len,
+            len,
+        }
+    }
+
+    #[inline]
+    fn get(&self) -> usize {
+        self.local_len
+    }
+
+    #[inline]
+    fn increment_len(&mut self, increment: usize) {
+        self.local_len += increment;
+    }
+}
+
+impl<'a> Drop for SetLenOnDrop<'a> {
+    #[inline]
+    fn drop(&mut self) {
+        *self.len = self.local_len;
+    }
+}
+
+#[cfg(feature = "const_new")]
+impl<T, const N: usize> SmallVec<[T; N]> {
+    /// Construct an empty vector.
+    ///
+    /// This is a `const` version of [`SmallVec::new`] that is enabled by the feature `const_new`, with the limitation that it only works for arrays.
+    #[cfg_attr(docsrs, doc(cfg(feature = "const_new")))]
+    #[inline]
+    pub const fn new_const() -> Self {
+        SmallVec {
+            capacity: 0,
+            data: SmallVecData::from_const(MaybeUninit::uninit()),
+        }
+    }
+
+    /// The array passed as an argument is moved to be an inline version of `SmallVec`.
+    ///
+    /// This is a `const` version of [`SmallVec::from_buf`] that is enabled by the feature `const_new`, with the limitation that it only works for arrays.
+    #[cfg_attr(docsrs, doc(cfg(feature = "const_new")))]
+    #[inline]
+    pub const fn from_const(items: [T; N]) -> Self {
+        SmallVec {
+            capacity: N,
+            data: SmallVecData::from_const(MaybeUninit::new(items)),
+        }
+    }
+
+    /// Constructs a new `SmallVec` on the stack from an array without
+    /// copying elements. Also sets the length. The user is responsible
+    /// for ensuring that `len <= N`.
+    /// 
+    /// This is a `const` version of [`SmallVec::from_buf_and_len_unchecked`] that is enabled by the feature `const_new`, with the limitation that it only works for arrays.
+    #[cfg_attr(docsrs, doc(cfg(feature = "const_new")))]
+    #[inline]
+    pub const unsafe fn from_const_with_len_unchecked(items: [T; N], len: usize) -> Self {
+        SmallVec {
+            capacity: len,
+            data: SmallVecData::from_const(MaybeUninit::new(items)),
+        }
+    }
+}
+
+#[cfg(feature = "const_generics")]
+#[cfg_attr(docsrs, doc(cfg(feature = "const_generics")))]
+unsafe impl<T, const N: usize> Array for [T; N] {
+    type Item = T;
+    #[inline]
+    fn size() -> usize {
+        N
+    }
+}
+
+#[cfg(not(feature = "const_generics"))]
+macro_rules! impl_array(
+    ($($size:expr),+) => {
+        $(
+            unsafe impl<T> Array for [T; $size] {
+                type Item = T;
+                #[inline]
+                fn size() -> usize { $size }
+            }
+        )+
+    }
+);
+
+#[cfg(not(feature = "const_generics"))]
+impl_array!(
+    0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
+    26, 27, 28, 29, 30, 31, 32, 36, 0x40, 0x60, 0x80, 0x100, 0x200, 0x400, 0x600, 0x800, 0x1000,
+    0x2000, 0x4000, 0x6000, 0x8000, 0x10000, 0x20000, 0x40000, 0x60000, 0x80000, 0x10_0000
+);
+
+/// Convenience trait for constructing a `SmallVec`
+pub trait ToSmallVec<A: Array> {
+    /// Construct a new `SmallVec` from a slice.
+    fn to_smallvec(&self) -> SmallVec<A>;
+}
+
+impl<A: Array> ToSmallVec<A> for [A::Item]
+where
+    A::Item: Copy,
+{
+    #[inline]
+    fn to_smallvec(&self) -> SmallVec<A> {
+        SmallVec::from_slice(self)
+    }
+}
+
+// Immutable counterpart for `NonNull<T>`.
+#[repr(transparent)]
+struct ConstNonNull<T>(NonNull<T>);
+
+impl<T> ConstNonNull<T> {
+    #[inline]
+    fn new(ptr: *const T) -> Option<Self> {
+        NonNull::new(ptr as *mut T).map(Self)
+    }
+    #[inline]
+    fn as_ptr(self) -> *const T {
+        self.0.as_ptr()
+    }
+}
+
+impl<T> Clone for ConstNonNull<T> {
+    #[inline]
+    fn clone(&self) -> Self {
+        *self
+    }
+}
+
+impl<T> Copy for ConstNonNull<T> {}
diff --git a/crates/smallvec/src/specialization.rs b/crates/smallvec/src/specialization.rs
new file mode 100644
index 0000000..658fa77
--- /dev/null
+++ b/crates/smallvec/src/specialization.rs
@@ -0,0 +1,19 @@
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Implementations that require `default fn`.
+
+use super::{Array, SmallVec, SpecFrom};
+
+impl<'a, A: Array> SpecFrom<A, &'a [A::Item]> for SmallVec<A>
+where
+    A::Item: Clone,
+{
+    #[inline]
+    default fn spec_from(slice: &'a [A::Item]) -> SmallVec<A> {
+        slice.into_iter().cloned().collect()
+    }
+}
diff --git a/crates/smallvec/src/tests.rs b/crates/smallvec/src/tests.rs
new file mode 100644
index 0000000..3eab846
--- /dev/null
+++ b/crates/smallvec/src/tests.rs
@@ -0,0 +1,1025 @@
+use crate::{smallvec, SmallVec};
+
+use std::iter::FromIterator;
+
+use alloc::borrow::ToOwned;
+use alloc::boxed::Box;
+use alloc::rc::Rc;
+use alloc::{vec, vec::Vec};
+
+#[test]
+pub fn test_zero() {
+    let mut v = SmallVec::<[_; 0]>::new();
+    assert!(!v.spilled());
+    v.push(0usize);
+    assert!(v.spilled());
+    assert_eq!(&*v, &[0]);
+}
+
+// We heap allocate all these strings so that double frees will show up under valgrind.
+
+#[test]
+pub fn test_inline() {
+    let mut v = SmallVec::<[_; 16]>::new();
+    v.push("hello".to_owned());
+    v.push("there".to_owned());
+    assert_eq!(&*v, &["hello".to_owned(), "there".to_owned(),][..]);
+}
+
+#[test]
+pub fn test_spill() {
+    let mut v = SmallVec::<[_; 2]>::new();
+    v.push("hello".to_owned());
+    assert_eq!(v[0], "hello");
+    v.push("there".to_owned());
+    v.push("burma".to_owned());
+    assert_eq!(v[0], "hello");
+    v.push("shave".to_owned());
+    assert_eq!(
+        &*v,
+        &[
+            "hello".to_owned(),
+            "there".to_owned(),
+            "burma".to_owned(),
+            "shave".to_owned(),
+        ][..]
+    );
+}
+
+#[test]
+pub fn test_double_spill() {
+    let mut v = SmallVec::<[_; 2]>::new();
+    v.push("hello".to_owned());
+    v.push("there".to_owned());
+    v.push("burma".to_owned());
+    v.push("shave".to_owned());
+    v.push("hello".to_owned());
+    v.push("there".to_owned());
+    v.push("burma".to_owned());
+    v.push("shave".to_owned());
+    assert_eq!(
+        &*v,
+        &[
+            "hello".to_owned(),
+            "there".to_owned(),
+            "burma".to_owned(),
+            "shave".to_owned(),
+            "hello".to_owned(),
+            "there".to_owned(),
+            "burma".to_owned(),
+            "shave".to_owned(),
+        ][..]
+    );
+}
+
+// https://github.com/servo/rust-smallvec/issues/4
+#[test]
+fn issue_4() {
+    SmallVec::<[Box<u32>; 2]>::new();
+}
+
+// https://github.com/servo/rust-smallvec/issues/5
+#[test]
+fn issue_5() {
+    assert!(Some(SmallVec::<[&u32; 2]>::new()).is_some());
+}
+
+#[test]
+fn test_with_capacity() {
+    let v: SmallVec<[u8; 3]> = SmallVec::with_capacity(1);
+    assert!(v.is_empty());
+    assert!(!v.spilled());
+    assert_eq!(v.capacity(), 3);
+
+    let v: SmallVec<[u8; 3]> = SmallVec::with_capacity(10);
+    assert!(v.is_empty());
+    assert!(v.spilled());
+    assert_eq!(v.capacity(), 10);
+}
+
+#[test]
+fn drain() {
+    let mut v: SmallVec<[u8; 2]> = SmallVec::new();
+    v.push(3);
+    assert_eq!(v.drain(..).collect::<Vec<_>>(), &[3]);
+
+    // spilling the vec
+    v.push(3);
+    v.push(4);
+    v.push(5);
+    let old_capacity = v.capacity();
+    assert_eq!(v.drain(1..).collect::<Vec<_>>(), &[4, 5]);
+    // drain should not change the capacity
+    assert_eq!(v.capacity(), old_capacity);
+
+    // Exercise the tail-shifting code when in the inline state
+    // This has the potential to produce UB due to aliasing
+    let mut v: SmallVec<[u8; 2]> = SmallVec::new();
+    v.push(1);
+    v.push(2);
+    assert_eq!(v.drain(..1).collect::<Vec<_>>(), &[1]);
+}
+
+#[test]
+fn drain_rev() {
+    let mut v: SmallVec<[u8; 2]> = SmallVec::new();
+    v.push(3);
+    assert_eq!(v.drain(..).rev().collect::<Vec<_>>(), &[3]);
+
+    // spilling the vec
+    v.push(3);
+    v.push(4);
+    v.push(5);
+    assert_eq!(v.drain(..).rev().collect::<Vec<_>>(), &[5, 4, 3]);
+}
+
+#[test]
+fn drain_forget() {
+    let mut v: SmallVec<[u8; 1]> = smallvec![0, 1, 2, 3, 4, 5, 6, 7];
+    std::mem::forget(v.drain(2..5));
+    assert_eq!(v.len(), 2);
+}
+
+#[test]
+fn into_iter() {
+    let mut v: SmallVec<[u8; 2]> = SmallVec::new();
+    v.push(3);
+    assert_eq!(v.into_iter().collect::<Vec<_>>(), &[3]);
+
+    // spilling the vec
+    let mut v: SmallVec<[u8; 2]> = SmallVec::new();
+    v.push(3);
+    v.push(4);
+    v.push(5);
+    assert_eq!(v.into_iter().collect::<Vec<_>>(), &[3, 4, 5]);
+}
+
+#[test]
+fn into_iter_rev() {
+    let mut v: SmallVec<[u8; 2]> = SmallVec::new();
+    v.push(3);
+    assert_eq!(v.into_iter().rev().collect::<Vec<_>>(), &[3]);
+
+    // spilling the vec
+    let mut v: SmallVec<[u8; 2]> = SmallVec::new();
+    v.push(3);
+    v.push(4);
+    v.push(5);
+    assert_eq!(v.into_iter().rev().collect::<Vec<_>>(), &[5, 4, 3]);
+}
+
+#[test]
+fn into_iter_drop() {
+    use std::cell::Cell;
+
+    struct DropCounter<'a>(&'a Cell<i32>);
+
+    impl<'a> Drop for DropCounter<'a> {
+        fn drop(&mut self) {
+            self.0.set(self.0.get() + 1);
+        }
+    }
+
+    {
+        let cell = Cell::new(0);
+        let mut v: SmallVec<[DropCounter<'_>; 2]> = SmallVec::new();
+        v.push(DropCounter(&cell));
+        v.into_iter();
+        assert_eq!(cell.get(), 1);
+    }
+
+    {
+        let cell = Cell::new(0);
+        let mut v: SmallVec<[DropCounter<'_>; 2]> = SmallVec::new();
+        v.push(DropCounter(&cell));
+        v.push(DropCounter(&cell));
+        assert!(v.into_iter().next().is_some());
+        assert_eq!(cell.get(), 2);
+    }
+
+    {
+        let cell = Cell::new(0);
+        let mut v: SmallVec<[DropCounter<'_>; 2]> = SmallVec::new();
+        v.push(DropCounter(&cell));
+        v.push(DropCounter(&cell));
+        v.push(DropCounter(&cell));
+        assert!(v.into_iter().next().is_some());
+        assert_eq!(cell.get(), 3);
+    }
+    {
+        let cell = Cell::new(0);
+        let mut v: SmallVec<[DropCounter<'_>; 2]> = SmallVec::new();
+        v.push(DropCounter(&cell));
+        v.push(DropCounter(&cell));
+        v.push(DropCounter(&cell));
+        {
+            let mut it = v.into_iter();
+            assert!(it.next().is_some());
+            assert!(it.next_back().is_some());
+        }
+        assert_eq!(cell.get(), 3);
+    }
+}
+
+#[test]
+fn test_capacity() {
+    let mut v: SmallVec<[u8; 2]> = SmallVec::new();
+    v.reserve(1);
+    assert_eq!(v.capacity(), 2);
+    assert!(!v.spilled());
+
+    v.reserve_exact(0x100);
+    assert!(v.capacity() >= 0x100);
+
+    v.push(0);
+    v.push(1);
+    v.push(2);
+    v.push(3);
+
+    v.shrink_to_fit();
+    assert!(v.capacity() < 0x100);
+}
+
+#[test]
+fn test_truncate() {
+    let mut v: SmallVec<[Box<u8>; 8]> = SmallVec::new();
+
+    for x in 0..8 {
+        v.push(Box::new(x));
+    }
+    v.truncate(4);
+
+    assert_eq!(v.len(), 4);
+    assert!(!v.spilled());
+
+    assert_eq!(*v.swap_remove(1), 1);
+    assert_eq!(*v.remove(1), 3);
+    v.insert(1, Box::new(3));
+
+    assert_eq!(&v.iter().map(|v| **v).collect::<Vec<_>>(), &[0, 3, 2]);
+}
+
+#[test]
+fn test_insert_many() {
+    let mut v: SmallVec<[u8; 8]> = SmallVec::new();
+    for x in 0..4 {
+        v.push(x);
+    }
+    assert_eq!(v.len(), 4);
+    v.insert_many(1, [5, 6].iter().cloned());
+    assert_eq!(
+        &v.iter().map(|v| *v).collect::<Vec<_>>(),
+        &[0, 5, 6, 1, 2, 3]
+    );
+}
+
+struct MockHintIter<T: Iterator> {
+    x: T,
+    hint: usize,
+}
+impl<T: Iterator> Iterator for MockHintIter<T> {
+    type Item = T::Item;
+    fn next(&mut self) -> Option<Self::Item> {
+        self.x.next()
+    }
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        (self.hint, None)
+    }
+}
+
+#[test]
+fn test_insert_many_short_hint() {
+    let mut v: SmallVec<[u8; 8]> = SmallVec::new();
+    for x in 0..4 {
+        v.push(x);
+    }
+    assert_eq!(v.len(), 4);
+    v.insert_many(
+        1,
+        MockHintIter {
+            x: [5, 6].iter().cloned(),
+            hint: 5,
+        },
+    );
+    assert_eq!(
+        &v.iter().map(|v| *v).collect::<Vec<_>>(),
+        &[0, 5, 6, 1, 2, 3]
+    );
+}
+
+#[test]
+fn test_insert_many_long_hint() {
+    let mut v: SmallVec<[u8; 8]> = SmallVec::new();
+    for x in 0..4 {
+        v.push(x);
+    }
+    assert_eq!(v.len(), 4);
+    v.insert_many(
+        1,
+        MockHintIter {
+            x: [5, 6].iter().cloned(),
+            hint: 1,
+        },
+    );
+    assert_eq!(
+        &v.iter().map(|v| *v).collect::<Vec<_>>(),
+        &[0, 5, 6, 1, 2, 3]
+    );
+}
+
+// https://github.com/servo/rust-smallvec/issues/96
+mod insert_many_panic {
+    use crate::{smallvec, SmallVec};
+    use alloc::boxed::Box;
+
+    struct PanicOnDoubleDrop {
+        dropped: Box<bool>,
+    }
+
+    impl PanicOnDoubleDrop {
+        fn new() -> Self {
+            Self {
+                dropped: Box::new(false),
+            }
+        }
+    }
+
+    impl Drop for PanicOnDoubleDrop {
+        fn drop(&mut self) {
+            assert!(!*self.dropped, "already dropped");
+            *self.dropped = true;
+        }
+    }
+
+    /// Claims to yield `hint` items, but actually yields `count`, then panics.
+    struct BadIter {
+        hint: usize,
+        count: usize,
+    }
+
+    impl Iterator for BadIter {
+        type Item = PanicOnDoubleDrop;
+        fn size_hint(&self) -> (usize, Option<usize>) {
+            (self.hint, None)
+        }
+        fn next(&mut self) -> Option<Self::Item> {
+            if self.count == 0 {
+                panic!()
+            }
+            self.count -= 1;
+            Some(PanicOnDoubleDrop::new())
+        }
+    }
+
+    #[test]
+    fn panic_early_at_start() {
+        let mut vec: SmallVec<[PanicOnDoubleDrop; 0]> =
+            smallvec![PanicOnDoubleDrop::new(), PanicOnDoubleDrop::new(),];
+        let result = ::std::panic::catch_unwind(move || {
+            vec.insert_many(0, BadIter { hint: 1, count: 0 });
+        });
+        assert!(result.is_err());
+    }
+
+    #[test]
+    fn panic_early_in_middle() {
+        let mut vec: SmallVec<[PanicOnDoubleDrop; 0]> =
+            smallvec![PanicOnDoubleDrop::new(), PanicOnDoubleDrop::new(),];
+        let result = ::std::panic::catch_unwind(move || {
+            vec.insert_many(1, BadIter { hint: 4, count: 2 });
+        });
+        assert!(result.is_err());
+    }
+
+    #[test]
+    fn panic_early_at_end() {
+        let mut vec: SmallVec<[PanicOnDoubleDrop; 0]> =
+            smallvec![PanicOnDoubleDrop::new(), PanicOnDoubleDrop::new(),];
+        let result = ::std::panic::catch_unwind(move || {
+            vec.insert_many(2, BadIter { hint: 3, count: 1 });
+        });
+        assert!(result.is_err());
+    }
+
+    #[test]
+    fn panic_late_at_start() {
+        let mut vec: SmallVec<[PanicOnDoubleDrop; 0]> =
+            smallvec![PanicOnDoubleDrop::new(), PanicOnDoubleDrop::new(),];
+        let result = ::std::panic::catch_unwind(move || {
+            vec.insert_many(0, BadIter { hint: 3, count: 5 });
+        });
+        assert!(result.is_err());
+    }
+
+    #[test]
+    fn panic_late_at_end() {
+        let mut vec: SmallVec<[PanicOnDoubleDrop; 0]> =
+            smallvec![PanicOnDoubleDrop::new(), PanicOnDoubleDrop::new(),];
+        let result = ::std::panic::catch_unwind(move || {
+            vec.insert_many(2, BadIter { hint: 3, count: 5 });
+        });
+        assert!(result.is_err());
+    }
+}
+
+#[test]
+#[should_panic]
+fn test_invalid_grow() {
+    let mut v: SmallVec<[u8; 8]> = SmallVec::new();
+    v.extend(0..8);
+    v.grow(5);
+}
+
+#[test]
+#[should_panic]
+fn drain_overflow() {
+    let mut v: SmallVec<[u8; 8]> = smallvec![0];
+    v.drain(..=std::usize::MAX);
+}
+
+#[test]
+fn test_insert_from_slice() {
+    let mut v: SmallVec<[u8; 8]> = SmallVec::new();
+    for x in 0..4 {
+        v.push(x);
+    }
+    assert_eq!(v.len(), 4);
+    v.insert_from_slice(1, &[5, 6]);
+    assert_eq!(
+        &v.iter().map(|v| *v).collect::<Vec<_>>(),
+        &[0, 5, 6, 1, 2, 3]
+    );
+}
+
+#[test]
+fn test_extend_from_slice() {
+    let mut v: SmallVec<[u8; 8]> = SmallVec::new();
+    for x in 0..4 {
+        v.push(x);
+    }
+    assert_eq!(v.len(), 4);
+    v.extend_from_slice(&[5, 6]);
+    assert_eq!(
+        &v.iter().map(|v| *v).collect::<Vec<_>>(),
+        &[0, 1, 2, 3, 5, 6]
+    );
+}
+
+#[test]
+#[should_panic]
+fn test_drop_panic_smallvec() {
+    // This test should only panic once, and not double panic,
+    // which would mean a double drop
+    struct DropPanic;
+
+    impl Drop for DropPanic {
+        fn drop(&mut self) {
+            panic!("drop");
+        }
+    }
+
+    let mut v = SmallVec::<[_; 1]>::new();
+    v.push(DropPanic);
+}
+
+#[test]
+fn test_eq() {
+    let mut a: SmallVec<[u32; 2]> = SmallVec::new();
+    let mut b: SmallVec<[u32; 2]> = SmallVec::new();
+    let mut c: SmallVec<[u32; 2]> = SmallVec::new();
+    // a = [1, 2]
+    a.push(1);
+    a.push(2);
+    // b = [1, 2]
+    b.push(1);
+    b.push(2);
+    // c = [3, 4]
+    c.push(3);
+    c.push(4);
+
+    assert!(a == b);
+    assert!(a != c);
+}
+
+#[test]
+fn test_ord() {
+    let mut a: SmallVec<[u32; 2]> = SmallVec::new();
+    let mut b: SmallVec<[u32; 2]> = SmallVec::new();
+    let mut c: SmallVec<[u32; 2]> = SmallVec::new();
+    // a = [1]
+    a.push(1);
+    // b = [1, 1]
+    b.push(1);
+    b.push(1);
+    // c = [1, 2]
+    c.push(1);
+    c.push(2);
+
+    assert!(a < b);
+    assert!(b > a);
+    assert!(b < c);
+    assert!(c > b);
+}
+
+#[test]
+fn test_hash() {
+    use std::collections::hash_map::DefaultHasher;
+    use std::hash::Hash;
+
+    {
+        let mut a: SmallVec<[u32; 2]> = SmallVec::new();
+        let b = [1, 2];
+        a.extend(b.iter().cloned());
+        let mut hasher = DefaultHasher::new();
+        assert_eq!(a.hash(&mut hasher), b.hash(&mut hasher));
+    }
+    {
+        let mut a: SmallVec<[u32; 2]> = SmallVec::new();
+        let b = [1, 2, 11, 12];
+        a.extend(b.iter().cloned());
+        let mut hasher = DefaultHasher::new();
+        assert_eq!(a.hash(&mut hasher), b.hash(&mut hasher));
+    }
+}
+
+#[test]
+fn test_as_ref() {
+    let mut a: SmallVec<[u32; 2]> = SmallVec::new();
+    a.push(1);
+    assert_eq!(a.as_ref(), [1]);
+    a.push(2);
+    assert_eq!(a.as_ref(), [1, 2]);
+    a.push(3);
+    assert_eq!(a.as_ref(), [1, 2, 3]);
+}
+
+#[test]
+fn test_as_mut() {
+    let mut a: SmallVec<[u32; 2]> = SmallVec::new();
+    a.push(1);
+    assert_eq!(a.as_mut(), [1]);
+    a.push(2);
+    assert_eq!(a.as_mut(), [1, 2]);
+    a.push(3);
+    assert_eq!(a.as_mut(), [1, 2, 3]);
+    a.as_mut()[1] = 4;
+    assert_eq!(a.as_mut(), [1, 4, 3]);
+}
+
+#[test]
+fn test_borrow() {
+    use std::borrow::Borrow;
+
+    let mut a: SmallVec<[u32; 2]> = SmallVec::new();
+    a.push(1);
+    assert_eq!(a.borrow(), [1]);
+    a.push(2);
+    assert_eq!(a.borrow(), [1, 2]);
+    a.push(3);
+    assert_eq!(a.borrow(), [1, 2, 3]);
+}
+
+#[test]
+fn test_borrow_mut() {
+    use std::borrow::BorrowMut;
+
+    let mut a: SmallVec<[u32; 2]> = SmallVec::new();
+    a.push(1);
+    assert_eq!(a.borrow_mut(), [1]);
+    a.push(2);
+    assert_eq!(a.borrow_mut(), [1, 2]);
+    a.push(3);
+    assert_eq!(a.borrow_mut(), [1, 2, 3]);
+    BorrowMut::<[u32]>::borrow_mut(&mut a)[1] = 4;
+    assert_eq!(a.borrow_mut(), [1, 4, 3]);
+}
+
+#[test]
+fn test_from() {
+    assert_eq!(&SmallVec::<[u32; 2]>::from(&[1][..])[..], [1]);
+    assert_eq!(&SmallVec::<[u32; 2]>::from(&[1, 2, 3][..])[..], [1, 2, 3]);
+
+    let vec = vec![];
+    let small_vec: SmallVec<[u8; 3]> = SmallVec::from(vec);
+    assert_eq!(&*small_vec, &[]);
+    drop(small_vec);
+
+    let vec = vec![1, 2, 3, 4, 5];
+    let small_vec: SmallVec<[u8; 3]> = SmallVec::from(vec);
+    assert_eq!(&*small_vec, &[1, 2, 3, 4, 5]);
+    drop(small_vec);
+
+    let vec = vec![1, 2, 3, 4, 5];
+    let small_vec: SmallVec<[u8; 1]> = SmallVec::from(vec);
+    assert_eq!(&*small_vec, &[1, 2, 3, 4, 5]);
+    drop(small_vec);
+
+    let array = [1];
+    let small_vec: SmallVec<[u8; 1]> = SmallVec::from(array);
+    assert_eq!(&*small_vec, &[1]);
+    drop(small_vec);
+
+    let array = [99; 128];
+    let small_vec: SmallVec<[u8; 128]> = SmallVec::from(array);
+    assert_eq!(&*small_vec, vec![99u8; 128].as_slice());
+    drop(small_vec);
+}
+
+#[test]
+fn test_from_slice() {
+    assert_eq!(&SmallVec::<[u32; 2]>::from_slice(&[1][..])[..], [1]);
+    assert_eq!(
+        &SmallVec::<[u32; 2]>::from_slice(&[1, 2, 3][..])[..],
+        [1, 2, 3]
+    );
+}
+
+#[test]
+fn test_exact_size_iterator() {
+    let mut vec = SmallVec::<[u32; 2]>::from(&[1, 2, 3][..]);
+    assert_eq!(vec.clone().into_iter().len(), 3);
+    assert_eq!(vec.drain(..2).len(), 2);
+    assert_eq!(vec.into_iter().len(), 1);
+}
+
+#[test]
+fn test_into_iter_as_slice() {
+    let vec = SmallVec::<[u32; 2]>::from(&[1, 2, 3][..]);
+    let mut iter = vec.clone().into_iter();
+    assert_eq!(iter.as_slice(), &[1, 2, 3]);
+    assert_eq!(iter.as_mut_slice(), &[1, 2, 3]);
+    iter.next();
+    assert_eq!(iter.as_slice(), &[2, 3]);
+    assert_eq!(iter.as_mut_slice(), &[2, 3]);
+    iter.next_back();
+    assert_eq!(iter.as_slice(), &[2]);
+    assert_eq!(iter.as_mut_slice(), &[2]);
+}
+
+#[test]
+fn test_into_iter_clone() {
+    // Test that the cloned iterator yields identical elements and that it owns its own copy
+    // (i.e. no use after move errors).
+    let mut iter = SmallVec::<[u8; 2]>::from_iter(0..3).into_iter();
+    let mut clone_iter = iter.clone();
+    while let Some(x) = iter.next() {
+        assert_eq!(x, clone_iter.next().unwrap());
+    }
+    assert_eq!(clone_iter.next(), None);
+}
+
+#[test]
+fn test_into_iter_clone_partially_consumed_iterator() {
+    // Test that the cloned iterator only contains the remaining elements of the original iterator.
+    let mut iter = SmallVec::<[u8; 2]>::from_iter(0..3).into_iter().skip(1);
+    let mut clone_iter = iter.clone();
+    while let Some(x) = iter.next() {
+        assert_eq!(x, clone_iter.next().unwrap());
+    }
+    assert_eq!(clone_iter.next(), None);
+}
+
+#[test]
+fn test_into_iter_clone_empty_smallvec() {
+    let mut iter = SmallVec::<[u8; 2]>::new().into_iter();
+    let mut clone_iter = iter.clone();
+    assert_eq!(iter.next(), None);
+    assert_eq!(clone_iter.next(), None);
+}
+
+#[test]
+fn shrink_to_fit_unspill() {
+    let mut vec = SmallVec::<[u8; 2]>::from_iter(0..3);
+    vec.pop();
+    assert!(vec.spilled());
+    vec.shrink_to_fit();
+    assert!(!vec.spilled(), "shrink_to_fit will un-spill if possible");
+}
+
+#[test]
+fn test_into_vec() {
+    let vec = SmallVec::<[u8; 2]>::from_iter(0..2);
+    assert_eq!(vec.into_vec(), vec![0, 1]);
+
+    let vec = SmallVec::<[u8; 2]>::from_iter(0..3);
+    assert_eq!(vec.into_vec(), vec![0, 1, 2]);
+}
+
+#[test]
+fn test_into_inner() {
+    let vec = SmallVec::<[u8; 2]>::from_iter(0..2);
+    assert_eq!(vec.into_inner(), Ok([0, 1]));
+
+    let vec = SmallVec::<[u8; 2]>::from_iter(0..1);
+    assert_eq!(vec.clone().into_inner(), Err(vec));
+
+    let vec = SmallVec::<[u8; 2]>::from_iter(0..3);
+    assert_eq!(vec.clone().into_inner(), Err(vec));
+}
+
+#[test]
+fn test_from_vec() {
+    let vec = vec![];
+    let small_vec: SmallVec<[u8; 3]> = SmallVec::from_vec(vec);
+    assert_eq!(&*small_vec, &[]);
+    drop(small_vec);
+
+    let vec = vec![];
+    let small_vec: SmallVec<[u8; 1]> = SmallVec::from_vec(vec);
+    assert_eq!(&*small_vec, &[]);
+    drop(small_vec);
+
+    let vec = vec![1];
+    let small_vec: SmallVec<[u8; 3]> = SmallVec::from_vec(vec);
+    assert_eq!(&*small_vec, &[1]);
+    drop(small_vec);
+
+    let vec = vec![1, 2, 3];
+    let small_vec: SmallVec<[u8; 3]> = SmallVec::from_vec(vec);
+    assert_eq!(&*small_vec, &[1, 2, 3]);
+    drop(small_vec);
+
+    let vec = vec![1, 2, 3, 4, 5];
+    let small_vec: SmallVec<[u8; 3]> = SmallVec::from_vec(vec);
+    assert_eq!(&*small_vec, &[1, 2, 3, 4, 5]);
+    drop(small_vec);
+
+    let vec = vec![1, 2, 3, 4, 5];
+    let small_vec: SmallVec<[u8; 1]> = SmallVec::from_vec(vec);
+    assert_eq!(&*small_vec, &[1, 2, 3, 4, 5]);
+    drop(small_vec);
+}
+
+#[test]
+fn test_retain() {
+    // Test inline data storate
+    let mut sv: SmallVec<[i32; 5]> = SmallVec::from_slice(&[1, 2, 3, 3, 4]);
+    sv.retain(|&mut i| i != 3);
+    assert_eq!(sv.pop(), Some(4));
+    assert_eq!(sv.pop(), Some(2));
+    assert_eq!(sv.pop(), Some(1));
+    assert_eq!(sv.pop(), None);
+
+    // Test spilled data storage
+    let mut sv: SmallVec<[i32; 3]> = SmallVec::from_slice(&[1, 2, 3, 3, 4]);
+    sv.retain(|&mut i| i != 3);
+    assert_eq!(sv.pop(), Some(4));
+    assert_eq!(sv.pop(), Some(2));
+    assert_eq!(sv.pop(), Some(1));
+    assert_eq!(sv.pop(), None);
+
+    // Test that drop implementations are called for inline.
+    let one = Rc::new(1);
+    let mut sv: SmallVec<[Rc<i32>; 3]> = SmallVec::new();
+    sv.push(Rc::clone(&one));
+    assert_eq!(Rc::strong_count(&one), 2);
+    sv.retain(|_| false);
+    assert_eq!(Rc::strong_count(&one), 1);
+
+    // Test that drop implementations are called for spilled data.
+    let mut sv: SmallVec<[Rc<i32>; 1]> = SmallVec::new();
+    sv.push(Rc::clone(&one));
+    sv.push(Rc::new(2));
+    assert_eq!(Rc::strong_count(&one), 2);
+    sv.retain(|_| false);
+    assert_eq!(Rc::strong_count(&one), 1);
+}
+
+#[test]
+fn test_dedup() {
+    let mut dupes: SmallVec<[i32; 5]> = SmallVec::from_slice(&[1, 1, 2, 3, 3]);
+    dupes.dedup();
+    assert_eq!(&*dupes, &[1, 2, 3]);
+
+    let mut empty: SmallVec<[i32; 5]> = SmallVec::new();
+    empty.dedup();
+    assert!(empty.is_empty());
+
+    let mut all_ones: SmallVec<[i32; 5]> = SmallVec::from_slice(&[1, 1, 1, 1, 1]);
+    all_ones.dedup();
+    assert_eq!(all_ones.len(), 1);
+
+    let mut no_dupes: SmallVec<[i32; 5]> = SmallVec::from_slice(&[1, 2, 3, 4, 5]);
+    no_dupes.dedup();
+    assert_eq!(no_dupes.len(), 5);
+}
+
+#[test]
+fn test_resize() {
+    let mut v: SmallVec<[i32; 8]> = SmallVec::new();
+    v.push(1);
+    v.resize(5, 0);
+    assert_eq!(v[..], [1, 0, 0, 0, 0][..]);
+
+    v.resize(2, -1);
+    assert_eq!(v[..], [1, 0][..]);
+}
+
+#[cfg(feature = "write")]
+#[test]
+fn test_write() {
+    use std::io::Write;
+
+    let data = [1, 2, 3, 4, 5];
+
+    let mut small_vec: SmallVec<[u8; 2]> = SmallVec::new();
+    let len = small_vec.write(&data[..]).unwrap();
+    assert_eq!(len, 5);
+    assert_eq!(small_vec.as_ref(), data.as_ref());
+
+    let mut small_vec: SmallVec<[u8; 2]> = SmallVec::new();
+    small_vec.write_all(&data[..]).unwrap();
+    assert_eq!(small_vec.as_ref(), data.as_ref());
+}
+
+#[cfg(feature = "serde")]
+#[test]
+fn test_serde() {
+    use bincode::{config, deserialize};
+    let mut small_vec: SmallVec<[i32; 2]> = SmallVec::new();
+    small_vec.push(1);
+    let encoded = config().limit(100).serialize(&small_vec).unwrap();
+    let decoded: SmallVec<[i32; 2]> = deserialize(&encoded).unwrap();
+    assert_eq!(small_vec, decoded);
+    small_vec.push(2);
+    // Spill the vec
+    small_vec.push(3);
+    small_vec.push(4);
+    // Check again after spilling.
+    let encoded = config().limit(100).serialize(&small_vec).unwrap();
+    let decoded: SmallVec<[i32; 2]> = deserialize(&encoded).unwrap();
+    assert_eq!(small_vec, decoded);
+}
+
+#[test]
+fn grow_to_shrink() {
+    let mut v: SmallVec<[u8; 2]> = SmallVec::new();
+    v.push(1);
+    v.push(2);
+    v.push(3);
+    assert!(v.spilled());
+    v.clear();
+    // Shrink to inline.
+    v.grow(2);
+    assert!(!v.spilled());
+    assert_eq!(v.capacity(), 2);
+    assert_eq!(v.len(), 0);
+    v.push(4);
+    assert_eq!(v[..], [4]);
+}
+
+#[test]
+fn resumable_extend() {
+    let s = "a b c";
+    // This iterator yields: (Some('a'), None, Some('b'), None, Some('c')), None
+    let it = s
+        .chars()
+        .scan(0, |_, ch| if ch.is_whitespace() { None } else { Some(ch) });
+    let mut v: SmallVec<[char; 4]> = SmallVec::new();
+    v.extend(it);
+    assert_eq!(v[..], ['a']);
+}
+
+// #139
+#[test]
+fn uninhabited() {
+    enum Void {}
+    let _sv = SmallVec::<[Void; 8]>::new();
+}
+
+#[test]
+fn grow_spilled_same_size() {
+    let mut v: SmallVec<[u8; 2]> = SmallVec::new();
+    v.push(0);
+    v.push(1);
+    v.push(2);
+    assert!(v.spilled());
+    assert_eq!(v.capacity(), 4);
+    // grow with the same capacity
+    v.grow(4);
+    assert_eq!(v.capacity(), 4);
+    assert_eq!(v[..], [0, 1, 2]);
+}
+
+#[cfg(feature = "const_generics")]
+#[test]
+fn const_generics() {
+    let _v = SmallVec::<[i32; 987]>::default();
+}
+
+#[cfg(feature = "const_new")]
+#[test]
+fn const_new() {
+    let v = const_new_inner();
+    assert_eq!(v.capacity(), 4);
+    assert_eq!(v.len(), 0);
+    let v = const_new_inline_sized();
+    assert_eq!(v.capacity(), 4);
+    assert_eq!(v.len(), 4);
+    assert_eq!(v[0], 1);
+    let v = const_new_inline_args();
+    assert_eq!(v.capacity(), 2);
+    assert_eq!(v.len(), 2);
+    assert_eq!(v[0], 1);
+    assert_eq!(v[1], 4);
+    let v = const_new_with_len();
+    assert_eq!(v.capacity(), 4);
+    assert_eq!(v.len(), 3);
+    assert_eq!(v[0], 2);
+    assert_eq!(v[1], 5);
+    assert_eq!(v[2], 7);
+}
+#[cfg(feature = "const_new")]
+const fn const_new_inner() -> SmallVec<[i32; 4]> {
+    SmallVec::<[i32; 4]>::new_const()
+}
+#[cfg(feature = "const_new")]
+const fn const_new_inline_sized() -> SmallVec<[i32; 4]> {
+    crate::smallvec_inline![1; 4]
+}
+#[cfg(feature = "const_new")]
+const fn const_new_inline_args() -> SmallVec<[i32; 2]> {
+    crate::smallvec_inline![1, 4]
+}
+#[cfg(feature = "const_new")]
+const fn const_new_with_len() -> SmallVec<[i32; 4]> {
+    unsafe {
+        SmallVec::<[i32; 4]>::from_const_with_len_unchecked([2, 5, 7, 0], 3)
+    }
+}
+
+#[test]
+fn empty_macro() {
+    let _v: SmallVec<[u8; 1]> = smallvec![];
+}
+
+#[test]
+fn zero_size_items() {
+    SmallVec::<[(); 0]>::new().push(());
+}
+
+#[test]
+fn test_insert_many_overflow() {
+    let mut v: SmallVec<[u8; 1]> = SmallVec::new();
+    v.push(123);
+
+    // Prepare an iterator with small lower bound
+    let iter = (0u8..5).filter(|n| n % 2 == 0);
+    assert_eq!(iter.size_hint().0, 0);
+
+    v.insert_many(0, iter);
+    assert_eq!(&*v, &[0, 2, 4, 123]);
+}
+
+#[test]
+fn test_clone_from() {
+    let mut a: SmallVec<[u8; 2]> = SmallVec::new();
+    a.push(1);
+    a.push(2);
+    a.push(3);
+
+    let mut b: SmallVec<[u8; 2]> = SmallVec::new();
+    b.push(10);
+
+    let mut c: SmallVec<[u8; 2]> = SmallVec::new();
+    c.push(20);
+    c.push(21);
+    c.push(22);
+
+    a.clone_from(&b);
+    assert_eq!(&*a, &[10]);
+
+    b.clone_from(&c);
+    assert_eq!(&*b, &[20, 21, 22]);
+}
+
+#[test]
+fn test_size() {
+    use core::mem::size_of;
+    assert_eq!(24, size_of::<SmallVec<[u8; 8]>>());
+}
+
+#[cfg(feature = "drain_filter")]
+#[test]
+fn drain_filter() {
+    let mut a: SmallVec<[u8; 2]> = smallvec![1u8, 2, 3, 4, 5, 6, 7, 8];
+
+    let b: SmallVec<[u8; 2]> = a.drain_filter(|x| *x % 3 == 0).collect();
+
+    assert_eq!(a, SmallVec::<[u8; 2]>::from_slice(&[1u8, 2, 4, 5, 7, 8]));
+    assert_eq!(b, SmallVec::<[u8; 2]>::from_slice(&[3u8, 6]));
+}
+
+#[cfg(feature = "drain_keep_rest")]
+#[test]
+fn drain_keep_rest() {
+    let mut a: SmallVec<[i32; 3]> = smallvec![1i32, 2, 3, 4, 5, 6, 7, 8];
+    let mut df = a.drain_filter(|x| *x % 2 == 0);
+
+    assert_eq!(df.next().unwrap(), 2);
+    assert_eq!(df.next().unwrap(), 4);
+
+    df.keep_rest();
+
+    assert_eq!(a, SmallVec::<[i32; 3]>::from_slice(&[1i32, 3, 5, 6, 7, 8]));
+}
diff --git a/crates/smallvec/tests/debugger_visualizer.rs b/crates/smallvec/tests/debugger_visualizer.rs
new file mode 100644
index 0000000..b39aa9d
--- /dev/null
+++ b/crates/smallvec/tests/debugger_visualizer.rs
@@ -0,0 +1,68 @@
+use debugger_test::debugger_test;
+use smallvec::{smallvec, SmallVec};
+
+#[inline(never)]
+fn __break() {}
+
+#[debugger_test(
+    debugger = "cdb",
+    commands = r#"
+.nvlist
+dx sv
+
+g
+
+dx sv
+
+g
+
+dx sv
+"#,
+    expected_statements = r#"
+sv               : { len=0x2 is_inline=true } [Type: smallvec::SmallVec<array$<i32,4> >]
+    [<Raw View>]     [Type: smallvec::SmallVec<array$<i32,4> >]
+    [capacity]       : 4
+    [len]            : 0x2 [Type: unsigned __int64]
+    [0]              : 1 [Type: int]
+    [1]              : 2 [Type: int]
+
+sv               : { len=0x5 is_inline=false } [Type: smallvec::SmallVec<array$<i32,4> >]
+    [<Raw View>]     [Type: smallvec::SmallVec<array$<i32,4> >]
+    [capacity]       : 0x8 [Type: unsigned __int64]
+    [len]            : 0x5 [Type: unsigned __int64]
+    [0]              : 5 [Type: int]
+    [1]              : 2 [Type: int]
+    [2]              : 3 [Type: int]
+    [3]              : 4 [Type: int]
+    [4]              : 5 [Type: int]
+
+sv               : { len=0x5 is_inline=false } [Type: smallvec::SmallVec<array$<i32,4> >]
+    [<Raw View>]     [Type: smallvec::SmallVec<array$<i32,4> >]
+    [capacity]       : 0x8 [Type: unsigned __int64]
+    [len]            : 0x5 [Type: unsigned __int64]
+    [0]              : 2 [Type: int]
+    [1]              : 3 [Type: int]
+    [2]              : 4 [Type: int]
+    [3]              : 5 [Type: int]
+    [4]              : 5 [Type: int]
+"#
+)]
+#[inline(never)]
+fn test_debugger_visualizer() {
+    // This SmallVec can hold up to 4 items on the stack:
+    let mut sv: SmallVec<[i32; 4]> = smallvec![1, 2];
+    __break();
+
+    // Overfill the SmallVec to move its contents to the heap
+    for i in 3..6 {
+        sv.push(i);
+    }
+
+    // Update the contents of the first value of the SmallVec.
+    sv[0] = sv[1] + sv[2];
+    __break();
+
+    // Sort the SmallVec in place.
+    sv.sort();
+    __break();
+}
diff --git a/crates/smallvec/tests/macro.rs b/crates/smallvec/tests/macro.rs
new file mode 100644
index 0000000..fa52e79
--- /dev/null
+++ b/crates/smallvec/tests/macro.rs
@@ -0,0 +1,24 @@
+/// This file tests `smallvec!` without actually having the macro in scope.
+/// This forces any recursion to use a `$crate` prefix to reliably find itself.
+
+#[test]
+fn smallvec() {
+    let mut vec: smallvec::SmallVec<[i32; 2]>;
+
+    macro_rules! check {
+        ($init:tt) => {
+            vec = smallvec::smallvec! $init;
+            assert_eq!(*vec, *vec! $init);
+        }
+    }
+
+    check!([0; 0]);
+    check!([1; 1]);
+    check!([2; 2]);
+    check!([3; 3]);
+
+    check!([]);
+    check!([1]);
+    check!([1, 2]);
+    check!([1, 2, 3]);
+}
diff --git a/crates/smccc/.cargo-checksum.json b/crates/smccc/.cargo-checksum.json
new file mode 100644
index 0000000..65e2b06
--- /dev/null
+++ b/crates/smccc/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"AUTHORS":"98037126c2907fb9618d3340c36ee4e349d01c53b0157050a7497c9cffd24130","CHANGELOG.md":"2792e94842cc1feb8ce14ab1662a3d3f583acd0f9d56fab31843d43e2747a946","CONTRIBUTING.md":"6d22dc292d5e1a5211a9a46d84ca7fd3723baee38b3f41e33365594e399704c4","Cargo.toml":"7a63cb0e1467d14d5e60bc20beda0382e2a46bce8f0744190bfd98cce0c8fd23","LICENSE":"7045ecf48472d9af941299dc6bade713e41910b09e23377d34716fee3cd0fbb4","LICENSE-APACHE":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30","LICENSE-MIT":"de191bbfa18d69d1825e70df6687e7dda9701e6c79896a19a0d3d5e8ea445cd9","README.md":"2c4501c70dc21db4f87c16ad2aee943d51a57c0ad428e5dc7285a5631cdb7b1f","rust-toolchain.toml":"529098119eee4cf9ec887f5de38a3a3787cea3f050e241c03c19c132bb05b224","src/arch.rs":"4ac517d4b5f0e73c0c5a8214762bc37d45fd0a632147fd09925b0eea1dc5e6bb","src/arch/calls.rs":"9afd9a3935e1195e2b6d5dd8e381ec4839c260e45c4d17489e1f9f052c1ce59f","src/arch/error.rs":"d02b15ba01e1d2d33771b267131346fcf4326721f8a065b165f1e61e4cc4942b","src/error.rs":"9ed5719081c4b6e25728c114b66d4f08c7c72242c2aed46b6f9183af1da91e90","src/lib.rs":"08369d208b3e7b88c0a3f2604c0c450edb47bfad42772e07c50bc6f77798b0a3","src/psci.rs":"cb8eea756944b5a86a56b2b42f0209f671bb2f2b330bee1bf988a4bfc59253d4","src/psci/calls.rs":"98d1dc62acc41d836a32c687b5b569434f1c0d7ebc9ae18f6786dcf929e6daa3","src/psci/error.rs":"3a0c8e9421680c6ac1d2488ba9a7c83cdcaecc38544ac93f89f097b4ca5530bf"},"package":"617d17f088ec733e5a6b86da6ce4cce1414e6e856d6061c16dda51cceae6f68c"}
\ No newline at end of file
diff --git a/crates/smccc/AUTHORS b/crates/smccc/AUTHORS
new file mode 100644
index 0000000..cf7af2f
--- /dev/null
+++ b/crates/smccc/AUTHORS
@@ -0,0 +1,7 @@
+# This is the list of smccc's significant contributors.
+#
+# This does not necessarily list everyone who has contributed code,
+# especially since many employees of one corporation may be contributing.
+# To see the full list of contributors, see the revision history in
+# source control.
+Google LLC
diff --git a/crates/smccc/Android.bp b/crates/smccc/Android.bp
new file mode 100644
index 0000000..4fe4006
--- /dev/null
+++ b/crates/smccc/Android.bp
@@ -0,0 +1,46 @@
+// This file is generated by cargo_embargo.
+// Do not modify this file because the changes will be overridden on upgrade.
+
+package {
+    default_applicable_licenses: ["external_rust_crates_smccc_license"],
+    default_team: "trendy_team_android_rust",
+}
+
+license {
+    name: "external_rust_crates_smccc_license",
+    visibility: [":__subpackages__"],
+    license_kinds: ["SPDX-license-identifier-Apache-2.0"],
+    license_text: ["LICENSE"],
+}
+
+rust_library_rlib {
+    name: "libsmccc",
+    crate_name: "smccc",
+    cargo_env_compat: true,
+    cargo_pkg_version: "0.1.1",
+    crate_root: "src/lib.rs",
+    edition: "2021",
+    apex_available: [
+        "//apex_available:platform",
+        "//apex_available:anyapex",
+    ],
+    prefer_rlib: true,
+    no_stdlibs: true,
+    stdlibs: [
+        "libcompiler_builtins.rust_sysroot",
+        "libcore.rust_sysroot",
+    ],
+    product_available: true,
+    vendor_available: true,
+}
+
+rust_test {
+    name: "smccc_test_src_lib",
+    crate_name: "smccc",
+    cargo_env_compat: true,
+    cargo_pkg_version: "0.1.1",
+    crate_root: "src/lib.rs",
+    test_suites: ["general-tests"],
+    auto_gen_config: true,
+    edition: "2021",
+}
diff --git a/crates/smccc/CHANGELOG.md b/crates/smccc/CHANGELOG.md
new file mode 100644
index 0000000..d7d52e7
--- /dev/null
+++ b/crates/smccc/CHANGELOG.md
@@ -0,0 +1,42 @@
+# Changelog
+
+## 0.1.1
+
+### Bugfixes
+
+- Fixed docs.rs to build for aarch64.
+
+## 0.1.0
+
+Renamed crate to `smccc`.
+
+### Breaking changes
+
+- Moved PSCI code to the `psci` module, moved other modules up one level.
+- Use type parameters rather than features to specify HVC vs. SMC for PSCI and arch calls.
+- Changed `error::Error::Unknown` to contain an `i64` rather than an `i32`.
+
+## `psci` 0.1.3
+
+### Bugfixes
+
+- Fixed type of `smccc::error::success_or_error_64`. This is a breaking change relative to 0.1.2 but
+  it was yanked.
+
+## `psci` 0.1.2 (yanked)
+
+### New features
+
+- Added constants, types and functions for standard Arm architucture SMCCC calls, in `smccc::arch`
+  module.
+- Added helpers in `smccc::error` module for handling negative return values as errors.
+
+## `psci` 0.1.1
+
+### New features
+
+- Exposed functions for SMC and HVC calls for use outside of PSCI.
+
+## `psci` 0.1.0
+
+Initial release with PSCI constants and functions.
diff --git a/crates/smccc/CONTRIBUTING.md b/crates/smccc/CONTRIBUTING.md
new file mode 100644
index 0000000..c88469f
--- /dev/null
+++ b/crates/smccc/CONTRIBUTING.md
@@ -0,0 +1,26 @@
+# How to Contribute
+
+We'd love to accept your patches and contributions to this project. There are just a few small
+guidelines you need to follow.
+
+## Contributor License Agreement
+
+Contributions to this project must be accompanied by a Contributor License Agreement (CLA). You (or
+your employer) retain the copyright to your contribution; this simply gives us permission to use and
+redistribute your contributions as part of the project. Head over to
+<https://cla.developers.google.com/> to see your current agreements on file or to sign a new one.
+
+You generally only need to submit a CLA once, so if you've already submitted one (even if it was for
+a different project), you probably don't need to do it again.
+
+## Code Reviews
+
+All submissions, including submissions by project members, require review. We use GitHub pull
+requests for this purpose. Consult
+[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more information on using
+pull requests.
+
+## Community Guidelines
+
+This project follows
+[Google's Open Source Community Guidelines](https://opensource.google/conduct/).
diff --git a/crates/smccc/Cargo.lock b/crates/smccc/Cargo.lock
new file mode 100644
index 0000000..f97102f
--- /dev/null
+++ b/crates/smccc/Cargo.lock
@@ -0,0 +1,7 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "smccc"
+version = "0.1.1"
diff --git a/crates/smccc/Cargo.toml b/crates/smccc/Cargo.toml
new file mode 100644
index 0000000..795596d
--- /dev/null
+++ b/crates/smccc/Cargo.toml
@@ -0,0 +1,37 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2021"
+name = "smccc"
+version = "0.1.1"
+authors = ["Andrew Walbran <qwandor@google.com>"]
+description = "Functions and constants for the Arm SMC Calling Convention (SMCCC) 1.4 and Arm Power State Coordination Interface (PSCI) 1.1 on aarch64."
+readme = "README.md"
+keywords = [
+    "arm",
+    "aarch64",
+    "cortex-a",
+    "smccc",
+    "psci",
+]
+categories = [
+    "embedded",
+    "no-std",
+    "hardware-support",
+]
+license = "MIT OR Apache-2.0"
+repository = "https://github.com/google/smccc"
+
+[package.metadata.docs.rs]
+default-target = "aarch64-unknown-none"
+
+[dependencies]
diff --git a/crates/smccc/LICENSE b/crates/smccc/LICENSE
new file mode 100644
index 0000000..a0de226
--- /dev/null
+++ b/crates/smccc/LICENSE
@@ -0,0 +1,229 @@
+This project is dual-licensed under Apache 2.0 and MIT terms.
+
+====
+
+MIT License
+
+Copyright (c) 2020 The cloudbbq authors.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+====
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/crates/smccc/LICENSE-APACHE b/crates/smccc/LICENSE-APACHE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/crates/smccc/LICENSE-APACHE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/crates/smccc/LICENSE-MIT b/crates/smccc/LICENSE-MIT
new file mode 100644
index 0000000..e5fc1d8
--- /dev/null
+++ b/crates/smccc/LICENSE-MIT
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2020 The cloudbbq authors.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/crates/smccc/METADATA b/crates/smccc/METADATA
new file mode 100644
index 0000000..c4d7f0b
--- /dev/null
+++ b/crates/smccc/METADATA
@@ -0,0 +1,20 @@
+name: "smccc"
+description: "Functions and constants for the Arm SMC Calling Convention (SMCCC) 1.4 and Arm Power State Coordination Interface (PSCI) 1.1 on aarch64."
+third_party {
+  url {
+    type: HOMEPAGE
+    value: "https://crates.io/crates/smccc"
+  }
+  url {
+    type: ARCHIVE
+    value: "https://static.crates.io/crates/smccc/smccc-0.1.1.crate"
+  }
+  version: "0.1.1"
+  # Dual-licensed, using the least restrictive per go/thirdpartylicenses#same.
+  license_type: NOTICE
+  last_upgrade_date {
+    year: 2023
+    month: 5
+    day: 10
+  }
+}
diff --git a/crates/smccc/MODULE_LICENSE_APACHE2 b/crates/smccc/MODULE_LICENSE_APACHE2
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/crates/smccc/MODULE_LICENSE_APACHE2
diff --git a/crates/smccc/README.md b/crates/smccc/README.md
new file mode 100644
index 0000000..4d8af68
--- /dev/null
+++ b/crates/smccc/README.md
@@ -0,0 +1,32 @@
+# SMCCC and PSCI functions for bare-metal Rust on aarch64
+
+[![crates.io page](https://img.shields.io/crates/v/smccc.svg)](https://crates.io/crates/smccc)
+[![docs.rs page](https://docs.rs/smccc/badge.svg)](https://docs.rs/smccc)
+
+This crate provides support for the Arm SMC Calling Convention version 1.4, including standard Arm
+Architecture Calls constants, and version 1.1 of the Arm Power State Coordination Interface (PSCI).
+It includes constants, functions to make the calls (on aarch64 targets), and error types.
+
+Note that the PSCI and SMCCC arch calls may be made via either HVC or SMC. You can choose which one
+to use by passing either `Hvc` or `Smc` as a type parameter to the relevant function.
+
+This crate currently only supports aarch64 and the SMC64 versions of the PSCI calls, in the cases
+that both SMC32 and SMC64 versions exist.
+
+This is not an officially supported Google product.
+
+## License
+
+Licensed under either of
+
+- Apache License, Version 2.0
+  ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
+- MIT license
+  ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
+
+at your option.
+
+## Contributing
+
+If you want to contribute to the project, see details of
+[how we accept contributions](CONTRIBUTING.md).
diff --git a/crates/smccc/cargo_embargo.json b/crates/smccc/cargo_embargo.json
new file mode 100644
index 0000000..e3c9cb8
--- /dev/null
+++ b/crates/smccc/cargo_embargo.json
@@ -0,0 +1,11 @@
+{
+  "package": {
+    "smccc": {
+      "force_rlib": true,
+      "host_supported": false,
+      "no_std": true
+    }
+  },
+  "run_cargo": false,
+  "tests": true
+}
diff --git a/crates/smccc/rust-toolchain.toml b/crates/smccc/rust-toolchain.toml
new file mode 100644
index 0000000..3bd233d
--- /dev/null
+++ b/crates/smccc/rust-toolchain.toml
@@ -0,0 +1,2 @@
+[toolchain]
+targets = ["aarch64-unknown-none"]
diff --git a/crates/smccc/src/arch.rs b/crates/smccc/src/arch.rs
new file mode 100644
index 0000000..e3673f7
--- /dev/null
+++ b/crates/smccc/src/arch.rs
@@ -0,0 +1,88 @@
+// Copyright 2023 the authors.
+// This project is dual-licensed under Apache 2.0 and MIT terms.
+// See LICENSE-APACHE and LICENSE-MIT for details.
+
+//! Standard Arm architecture calls.
+
+mod calls;
+pub mod error;
+
+pub use calls::{
+    arch_workaround_1, arch_workaround_2, arch_workaround_3, features, soc_id, version,
+};
+use core::fmt::{self, Debug, Display, Formatter};
+pub use error::Error;
+
+pub const SMCCC_VERSION: u32 = 0x8000_0000;
+pub const SMCCC_ARCH_FEATURES: u32 = 0x8000_0001;
+pub const SMCCC_ARCH_SOC_ID: u32 = 0x8000_0002;
+pub const SMCCC_ARCH_WORKAROUND_1: u32 = 0x8000_8000;
+pub const SMCCC_ARCH_WORKAROUND_2: u32 = 0x8000_7FFF;
+pub const SMCCC_ARCH_WORKAROUND_3: u32 = 0x8000_3FFF;
+
+/// A version of the SMC Calling Convention.
+#[derive(Copy, Clone, Eq, Ord, PartialEq, PartialOrd)]
+pub struct Version {
+    pub major: u16,
+    pub minor: u16,
+}
+
+impl Display for Version {
+    fn fmt(&self, f: &mut Formatter) -> fmt::Result {
+        write!(f, "{}.{}", self.major, self.minor)
+    }
+}
+
+impl Debug for Version {
+    fn fmt(&self, f: &mut Formatter) -> fmt::Result {
+        Display::fmt(self, f)
+    }
+}
+
+impl TryFrom<i32> for Version {
+    type Error = Error;
+
+    fn try_from(value: i32) -> Result<Self, Error> {
+        if value < 0 {
+            Err(value.into())
+        } else {
+            Ok(Self {
+                major: (value >> 16) as u16,
+                minor: value as u16,
+            })
+        }
+    }
+}
+
+impl From<Version> for u32 {
+    fn from(version: Version) -> Self {
+        u32::from(version.major) << 16 | u32::from(version.minor)
+    }
+}
+
+#[derive(Copy, Clone, Eq, Ord, PartialEq, PartialOrd)]
+#[repr(u32)]
+pub enum SocIdType {
+    /// The SoC version.
+    Version,
+    /// The SoC revision.
+    Revision,
+}
+
+impl From<SocIdType> for u32 {
+    fn from(id_type: SocIdType) -> Self {
+        id_type as Self
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[test]
+    fn convert_version() {
+        let version = Version { major: 1, minor: 2 };
+        assert_eq!(u32::from(version), 0x0001_0002);
+        assert_eq!(0x0001_0002.try_into(), Ok(version));
+    }
+}
diff --git a/crates/smccc/src/arch/calls.rs b/crates/smccc/src/arch/calls.rs
new file mode 100644
index 0000000..a06511d
--- /dev/null
+++ b/crates/smccc/src/arch/calls.rs
@@ -0,0 +1,43 @@
+// Copyright 2023 the authors.
+// This project is dual-licensed under Apache 2.0 and MIT terms.
+// See LICENSE-APACHE and LICENSE-MIT for details.
+
+use super::{
+    error::Error, SocIdType, Version, SMCCC_ARCH_FEATURES, SMCCC_ARCH_SOC_ID,
+    SMCCC_ARCH_WORKAROUND_1, SMCCC_ARCH_WORKAROUND_2, SMCCC_ARCH_WORKAROUND_3, SMCCC_VERSION,
+};
+use crate::{
+    error::{positive_or_error_32, success_or_error_32},
+    Call,
+};
+
+/// Returns the implemented version of the SMC Calling Convention.
+pub fn version<C: Call>() -> Result<Version, Error> {
+    (C::call32(SMCCC_VERSION, [0; 7])[0] as i32).try_into()
+}
+
+/// Returns whether the given Arm Architecture Service function is implemented, and any feature
+/// flags specific to the function.
+pub fn features<C: Call>(arch_func_id: u32) -> Result<u32, Error> {
+    positive_or_error_32(C::call32(SMCCC_ARCH_FEATURES, [arch_func_id, 0, 0, 0, 0, 0, 0])[0])
+}
+
+/// Returns the SiP defined SoC identification details.
+pub fn soc_id<C: Call>(soc_id_type: SocIdType) -> Result<u32, Error> {
+    positive_or_error_32(C::call32(SMCCC_ARCH_SOC_ID, [soc_id_type.into(), 0, 0, 0, 0, 0, 0])[0])
+}
+
+/// Executes a firmware workaround to mitigate CVE-2017-5715.
+pub fn arch_workaround_1<C: Call>() -> Result<(), Error> {
+    success_or_error_32(C::call32(SMCCC_ARCH_WORKAROUND_1, [0; 7])[0])
+}
+
+/// Enables or disables the mitigation for CVE-2018-3639.
+pub fn arch_workaround_2<C: Call>(enable: bool) -> Result<(), Error> {
+    success_or_error_32(C::call32(SMCCC_ARCH_WORKAROUND_2, [enable.into(), 0, 0, 0, 0, 0, 0])[0])
+}
+
+/// Executes a firmware workaround to mitigate CVE-2017-5715 and CVE-2022-23960.
+pub fn arch_workaround_3<C: Call>() -> Result<(), Error> {
+    success_or_error_32(C::call32(SMCCC_ARCH_WORKAROUND_3, [0; 7])[0])
+}
diff --git a/crates/smccc/src/arch/error.rs b/crates/smccc/src/arch/error.rs
new file mode 100644
index 0000000..79bb094
--- /dev/null
+++ b/crates/smccc/src/arch/error.rs
@@ -0,0 +1,58 @@
+// Copyright 2023 the authors.
+// This project is dual-licensed under Apache 2.0 and MIT terms.
+// See LICENSE-APACHE and LICENSE-MIT for details.
+
+//! Error codes for standard Arm Architecture SMCCC calls.
+
+pub use crate::error::SUCCESS;
+use core::fmt::{self, Display, Formatter};
+
+pub const NOT_SUPPORTED: i32 = -1;
+pub const NOT_REQUIRED: i32 = -2;
+pub const INVALID_PARAMETER: i32 = -3;
+
+/// Errors for standard Arm Architecture calls.
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+pub enum Error {
+    /// The call is not supported by the implementation.
+    NotSupported,
+    /// The call is deemed not required by the implementation.
+    NotRequired,
+    /// One of the call parameters has a non-supported value.
+    InvalidParameter,
+    /// There was an unexpected return value.
+    Unknown(i32),
+}
+
+impl From<Error> for i32 {
+    fn from(error: Error) -> i32 {
+        match error {
+            Error::NotSupported => NOT_SUPPORTED,
+            Error::NotRequired => NOT_REQUIRED,
+            Error::InvalidParameter => INVALID_PARAMETER,
+            Error::Unknown(value) => value,
+        }
+    }
+}
+
+impl From<i32> for Error {
+    fn from(value: i32) -> Self {
+        match value {
+            NOT_SUPPORTED => Error::NotSupported,
+            NOT_REQUIRED => Error::NotRequired,
+            INVALID_PARAMETER => Error::InvalidParameter,
+            _ => Error::Unknown(value),
+        }
+    }
+}
+
+impl Display for Error {
+    fn fmt(&self, f: &mut Formatter) -> fmt::Result {
+        match self {
+            Self::NotSupported => write!(f, "SMCCC call not supported"),
+            Self::NotRequired => write!(f, "SMCCC call not required"),
+            Self::InvalidParameter => write!(f, "SMCCC call received non-supported value"),
+            Self::Unknown(e) => write!(f, "Unknown SMCCC return value {} ({0:#x})", e),
+        }
+    }
+}
diff --git a/crates/smccc/src/error.rs b/crates/smccc/src/error.rs
new file mode 100644
index 0000000..0267411
--- /dev/null
+++ b/crates/smccc/src/error.rs
@@ -0,0 +1,78 @@
+// Copyright 2023 the authors.
+// This project is dual-licensed under Apache 2.0 and MIT terms.
+// See LICENSE-APACHE and LICENSE-MIT for details.
+
+//! Utility functions for error handling.
+//!
+//! These functions can be combined with the appropriate HVC or SMC functions to wrap calls which
+//! return a single value where negative values indicate an error.
+//!
+//! For example, the [`system_off`](crate::psci::system_off) function is implemented approximately
+//! as:
+//!
+//! ```
+//! # #[cfg(target_arch = "aarch64")]
+//! use smccc::{
+//!     error::success_or_error_32,
+//!     psci::{error::Error, PSCI_SYSTEM_OFF},
+//!     smc32,
+//! };
+//!
+//! # #[cfg(target_arch = "aarch64")]
+//! pub fn system_off() -> Result<(), Error> {
+//!     success_or_error_32(smc32(PSCI_SYSTEM_OFF, [0; 7])[0])
+//! }
+//! ```
+
+/// A value commonly returned to indicate a successful SMCCC call.
+pub const SUCCESS: i32 = 0;
+
+/// Converts the given value (returned from an HVC32 or SMC32 call) either to `Ok(())` if it is
+/// equal to [`SUCCESS`], or else an error of the given type.
+pub fn success_or_error_32<E: From<i32>>(value: u32) -> Result<(), E> {
+    let value = value as i32;
+    if value == SUCCESS {
+        Ok(())
+    } else {
+        Err(value.into())
+    }
+}
+
+/// Converts the given value (returned from an HVC64 or SMC64 call) either to `Ok(())` if it is
+/// equal to [`SUCCESS`], or else an error of the given type.
+pub fn success_or_error_64<E: From<i64>>(value: u64) -> Result<(), E> {
+    let value = value as i64;
+    if value == SUCCESS.into() {
+        Ok(())
+    } else {
+        Err(value.into())
+    }
+}
+
+/// Returns `Ok(value)` if the given value has its high bit unset (i.e. would be positive when
+/// treated as a signed value), or an error of the given type if the high bit is set.
+///
+/// This is intended to be used with the return value of [`hvc32`](super::hvc32) or
+/// [`smc32`](super::smc32).
+pub fn positive_or_error_32<E: From<i32>>(value: u32) -> Result<u32, E> {
+    let signed = value as i32;
+    if signed < 0 {
+        Err(signed.into())
+    } else {
+        Ok(value)
+    }
+}
+
+/// Returns `Ok(value)` if the given value has its high bit unset (i.e. would be positive when
+/// treated as a signed value), or an error of the given type if the high bit is set.
+///
+/// This is intended to be used with the return value of [`hvc64`](super::hvc64) or
+/// [`smc64`](super::smc64).
+pub fn positive_or_error_64<E: From<i64>>(value: u64) -> Result<u64, E> {
+    let signed = value as i64;
+    if signed < 0 {
+        Err(signed.into())
+    } else {
+        Ok(value)
+    }
+}
diff --git a/crates/smccc/src/lib.rs b/crates/smccc/src/lib.rs
new file mode 100644
index 0000000..2857ebf
--- /dev/null
+++ b/crates/smccc/src/lib.rs
@@ -0,0 +1,172 @@
+// Copyright 2022 the authors.
+// This project is dual-licensed under Apache 2.0 and MIT terms.
+// See LICENSE-APACHE and LICENSE-MIT for details.
+
+//! Functions for version 1.4 of the Arm SMC Calling Convention and version 1.1 of the Arm Power
+//! State Coordination Interface (PSCI) version 1.1, and relevant constants.
+//!
+//! Note that the PSCI and SMCCC arch calls may be made via either HVC or SMC. You can choose which
+//! one to use by passing either [`Hvc`] or [`Smc`] as a type parameter to the relevant function.
+//!
+//! This crate currently only supports aarch64 and the SMC64 versions of the PSCI calls, in the
+//! cases that both SMC32 and SMC64 versions exist.
+
+#![no_std]
+
+pub mod arch;
+pub mod error;
+pub mod psci;
+
+/// Use a Hypervisor Call (HVC).
+#[cfg(target_arch = "aarch64")]
+pub struct Hvc;
+
+/// Use a Secure Moniter Call (SMC).
+#[cfg(target_arch = "aarch64")]
+pub struct Smc;
+
+/// Functions to make an HVC or SMC call.
+pub trait Call {
+    /// Makes a call using the 32-bit calling convention.
+    fn call32(function: u32, args: [u32; 7]) -> [u32; 8];
+    /// Makes a call using the 64-bit calling convention.
+    fn call64(function: u32, args: [u64; 17]) -> [u64; 18];
+}
+
+#[cfg(target_arch = "aarch64")]
+impl Call for Hvc {
+    fn call32(function: u32, args: [u32; 7]) -> [u32; 8] {
+        hvc32(function, args)
+    }
+
+    fn call64(function: u32, args: [u64; 17]) -> [u64; 18] {
+        hvc64(function, args)
+    }
+}
+
+#[cfg(target_arch = "aarch64")]
+impl Call for Smc {
+    fn call32(function: u32, args: [u32; 7]) -> [u32; 8] {
+        smc32(function, args)
+    }
+
+    fn call64(function: u32, args: [u64; 17]) -> [u64; 18] {
+        smc64(function, args)
+    }
+}
+
+/// Makes an HVC32 call to the hypervisor, following the SMC Calling Convention version 1.3.
+#[cfg(target_arch = "aarch64")]
+#[inline(always)]
+pub fn hvc32(function: u32, args: [u32; 7]) -> [u32; 8] {
+    unsafe {
+        let mut ret = [0; 8];
+
+        core::arch::asm!(
+            "hvc #0",
+            inout("w0") function => ret[0],
+            inout("w1") args[0] => ret[1],
+            inout("w2") args[1] => ret[2],
+            inout("w3") args[2] => ret[3],
+            inout("w4") args[3] => ret[4],
+            inout("w5") args[4] => ret[5],
+            inout("w6") args[5] => ret[6],
+            inout("w7") args[6] => ret[7],
+            options(nomem, nostack)
+        );
+
+        ret
+    }
+}
+
+/// Makes an SMC32 call to the firmware, following the SMC Calling Convention version 1.3.
+#[cfg(target_arch = "aarch64")]
+#[inline(always)]
+pub fn smc32(function: u32, args: [u32; 7]) -> [u32; 8] {
+    unsafe {
+        let mut ret = [0; 8];
+
+        core::arch::asm!(
+            "smc #0",
+            inout("w0") function => ret[0],
+            inout("w1") args[0] => ret[1],
+            inout("w2") args[1] => ret[2],
+            inout("w3") args[2] => ret[3],
+            inout("w4") args[3] => ret[4],
+            inout("w5") args[4] => ret[5],
+            inout("w6") args[5] => ret[6],
+            inout("w7") args[6] => ret[7],
+            options(nomem, nostack)
+        );
+
+        ret
+    }
+}
+
+/// Makes an HVC64 call to the hypervisor, following the SMC Calling Convention version 1.3.
+#[cfg(target_arch = "aarch64")]
+#[inline(always)]
+pub fn hvc64(function: u32, args: [u64; 17]) -> [u64; 18] {
+    unsafe {
+        let mut ret = [0; 18];
+
+        core::arch::asm!(
+            "hvc #0",
+            inout("x0") function as u64 => ret[0],
+            inout("x1") args[0] => ret[1],
+            inout("x2") args[1] => ret[2],
+            inout("x3") args[2] => ret[3],
+            inout("x4") args[3] => ret[4],
+            inout("x5") args[4] => ret[5],
+            inout("x6") args[5] => ret[6],
+            inout("x7") args[6] => ret[7],
+            inout("x8") args[7] => ret[8],
+            inout("x9") args[8] => ret[9],
+            inout("x10") args[9] => ret[10],
+            inout("x11") args[10] => ret[11],
+            inout("x12") args[11] => ret[12],
+            inout("x13") args[12] => ret[13],
+            inout("x14") args[13] => ret[14],
+            inout("x15") args[14] => ret[15],
+            inout("x16") args[15] => ret[16],
+            inout("x17") args[16] => ret[17],
+            options(nomem, nostack)
+        );
+
+        ret
+    }
+}
+
+/// Makes an SMC64 call to the firmware, following the SMC Calling Convention version 1.3.
+#[cfg(target_arch = "aarch64")]
+#[inline(always)]
+pub fn smc64(function: u32, args: [u64; 17]) -> [u64; 18] {
+    unsafe {
+        let mut ret = [0; 18];
+
+        core::arch::asm!(
+            "smc #0",
+            inout("x0") function as u64 => ret[0],
+            inout("x1") args[0] => ret[1],
+            inout("x2") args[1] => ret[2],
+            inout("x3") args[2] => ret[3],
+            inout("x4") args[3] => ret[4],
+            inout("x5") args[4] => ret[5],
+            inout("x6") args[5] => ret[6],
+            inout("x7") args[6] => ret[7],
+            inout("x8") args[7] => ret[8],
+            inout("x9") args[8] => ret[9],
+            inout("x10") args[9] => ret[10],
+            inout("x11") args[10] => ret[11],
+            inout("x12") args[11] => ret[12],
+            inout("x13") args[12] => ret[13],
+            inout("x14") args[13] => ret[14],
+            inout("x15") args[14] => ret[15],
+            inout("x16") args[15] => ret[16],
+            inout("x17") args[16] => ret[17],
+            options(nomem, nostack)
+        );
+
+        ret
+    }
+}
diff --git a/crates/smccc/src/psci.rs b/crates/smccc/src/psci.rs
new file mode 100644
index 0000000..87dbecd
--- /dev/null
+++ b/crates/smccc/src/psci.rs
@@ -0,0 +1,166 @@
+// Copyright 2022 the authors.
+// This project is dual-licensed under Apache 2.0 and MIT terms.
+// See LICENSE-APACHE and LICENSE-MIT for details.
+
+//! Constants for version 1.1 of the Arm Power State Coordination Interface (PSCI) version 1.1, and
+//! functions to call them.
+//!
+//! Note that PSCI and other SMCCC calls may be made via either HVC or SMC. You can choose which one
+//! to use by building this crate with the corresponding feature (i.e. `hvc` or `smc`). By default
+//! `hvc` is enabled. If neither feature is enabled then the functions to make calls will not be
+//! available, but the constants and types are still provided.
+//!
+//! This crate currently only supports aarch64 and the SMC64 versions of the various calls, in the
+//! cases that both SMC32 and SMC64 versions exist.
+
+mod calls;
+pub mod error;
+
+pub use calls::{
+    affinity_info, cpu_default_suspend, cpu_freeze, cpu_off, cpu_on, cpu_suspend, mem_protect,
+    mem_protect_check_range, migrate, migrate_info_type, migrate_info_up_cpu, node_hw_state,
+    psci_features, set_suspend_mode, stat_count, stat_residency, system_off, system_reset,
+    system_reset2, system_suspend, version,
+};
+pub use error::Error;
+
+pub const PSCI_VERSION: u32 = 0x84000000;
+pub const PSCI_CPU_SUSPEND_32: u32 = 0x84000001;
+pub const PSCI_CPU_SUSPEND_64: u32 = 0xC4000001;
+pub const PSCI_CPU_OFF: u32 = 0x84000002;
+pub const PSCI_CPU_ON_32: u32 = 0x84000003;
+pub const PSCI_CPU_ON_64: u32 = 0xC4000003;
+pub const PSCI_AFFINITY_INFO_32: u32 = 0x84000004;
+pub const PSCI_AFFINITY_INFO_64: u32 = 0xC4000004;
+pub const PSCI_MIGRATE_32: u32 = 0x84000005;
+pub const PSCI_MIGRATE_64: u32 = 0xC4000005;
+pub const PSCI_MIGRATE_INFO_TYPE: u32 = 0x84000006;
+pub const PSCI_MIGRATE_INFO_UP_CPU_32: u32 = 0x84000007;
+pub const PSCI_MIGRATE_INFO_UP_CPU_64: u32 = 0xC4000007;
+pub const PSCI_SYSTEM_OFF: u32 = 0x84000008;
+pub const PSCI_SYSTEM_RESET: u32 = 0x84000009;
+pub const PSCI_SYSTEM_RESET2_32: u32 = 0x84000012;
+pub const PSCI_SYSTEM_RESET2_64: u32 = 0xC4000012;
+pub const PSCI_MEM_PROTECT: u32 = 0x84000013;
+pub const PSCI_MEM_PROTECT_CHECK_RANGE_32: u32 = 0x84000014;
+pub const PSCI_MEM_PROTECT_CHECK_RANGE_64: u32 = 0xC4000014;
+pub const PSCI_FEATURES: u32 = 0x8400000A;
+pub const PSCI_CPU_FREEZE: u32 = 0x8400000B;
+pub const PSCI_CPU_DEFAULT_SUSPEND_32: u32 = 0x8400000C;
+pub const PSCI_CPU_DEFAULT_SUSPEND_64: u32 = 0xC400000C;
+pub const PSCI_NODE_HW_STATE_32: u32 = 0x8400000D;
+pub const PSCI_NODE_HW_STATE_64: u32 = 0xC400000D;
+pub const PSCI_SYSTEM_SUSPEND_32: u32 = 0x8400000E;
+pub const PSCI_SYSTEM_SUSPEND_64: u32 = 0xC400000E;
+pub const PSCI_SET_SUSPEND_MODE: u32 = 0x8400000F;
+pub const PSCI_STAT_RESIDENCY_32: u32 = 0x84000010;
+pub const PSCI_STAT_RESIDENCY_64: u32 = 0xC4000010;
+pub const PSCI_STAT_COUNT_32: u32 = 0x84000011;
+pub const PSCI_STAT_COUNT_64: u32 = 0xC4000011;
+
+/// Selects which affinity level fields are valid in the `target_affinity` parameter to
+/// `AFFINITY_INFO`.
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+pub enum LowestAffinityLevel {
+    /// All afinity level fields are valid.
+    All = 0,
+    /// The `Aff0` field is ignored.
+    Aff0Ignored = 1,
+    /// The `Aff0` and `Aff1` fields are ignored.
+    Aff0Aff1Ignored = 2,
+    /// The `Aff0`, `Aff1` and `Aff2` fields are ignored.
+    Aff0Aff1Aff2Ignored = 3,
+}
+
+impl From<LowestAffinityLevel> for u64 {
+    fn from(lowest_affinity_level: LowestAffinityLevel) -> u64 {
+        (lowest_affinity_level as u32).into()
+    }
+}
+
+/// Affinity state values returned by `AFFINITY_INFO`.
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+pub enum AffinityState {
+    /// At least one core in the affinity instance is on.
+    On = 0,
+    /// All cores in the affinity instance are off.
+    Off = 1,
+    /// The affinity instance is transitioning to the on state.
+    OnPending = 2,
+}
+
+impl TryFrom<i32> for AffinityState {
+    type Error = Error;
+
+    fn try_from(value: i32) -> Result<Self, Error> {
+        match value {
+            0 => Ok(Self::On),
+            1 => Ok(Self::Off),
+            2 => Ok(Self::OnPending),
+            _ => Err(value.into()),
+        }
+    }
+}
+
+/// The level of multicore support in the Trusted OS, as returned by `MIGRATE_INFO_TYPE`.
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+pub enum MigrateType {
+    /// The Trusted OS will only run on one core, and supports the `MIGRATE` function.
+    MigrateCapable = 0,
+    /// The Trusted OS does not support the `MIGRATE` function.
+    NotMigrateCapable = 1,
+    /// Either there is no Trusted OS, or it doesn't require migration.
+    MigrationNotRequired = 2,
+}
+
+impl TryFrom<i32> for MigrateType {
+    type Error = Error;
+
+    fn try_from(value: i32) -> Result<Self, Error> {
+        match value {
+            0 => Ok(Self::MigrateCapable),
+            1 => Ok(Self::NotMigrateCapable),
+            2 => Ok(Self::MigrationNotRequired),
+            _ => Err(value.into()),
+        }
+    }
+}
+
+/// The power state of a node in the power domain topology, as returned by `NODE_HW_STATE`.
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+pub enum PowerState {
+    /// The node is in the run state.
+    HwOn = 0,
+    /// The node is fully powered down.
+    HwOff = 1,
+    /// The node is in a standby or retention power state.
+    HwStandby = 2,
+}
+
+impl TryFrom<i32> for PowerState {
+    type Error = Error;
+
+    fn try_from(value: i32) -> Result<Self, Error> {
+        match value {
+            0 => Ok(Self::HwOn),
+            1 => Ok(Self::HwOff),
+            2 => Ok(Self::HwStandby),
+            _ => Err(value.into()),
+        }
+    }
+}
+
+/// The mode to be used by `CPU_SUSPEND`, as set by `PSCI_SET_SUSPEND_MODE`.
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+pub enum SuspendMode {
+    /// Platform-coordinated mode.
+    PlatformCoordinated = 0,
+    /// OS-initiated mode.
+    OsInitiated = 1,
+}
+
+impl From<SuspendMode> for u32 {
+    fn from(suspend_mode: SuspendMode) -> u32 {
+        suspend_mode as u32
+    }
+}
diff --git a/crates/smccc/src/psci/calls.rs b/crates/smccc/src/psci/calls.rs
new file mode 100644
index 0000000..afc629e
--- /dev/null
+++ b/crates/smccc/src/psci/calls.rs
@@ -0,0 +1,354 @@
+// Copyright 2022 the authors.
+// This project is dual-licensed under Apache 2.0 and MIT terms.
+// See LICENSE-APACHE and LICENSE-MIT for details.
+
+//! Functions to make PSCI calls.
+
+use super::{
+    error::Error, AffinityState, LowestAffinityLevel, MigrateType, PowerState, SuspendMode,
+    PSCI_AFFINITY_INFO_64, PSCI_CPU_DEFAULT_SUSPEND_64, PSCI_CPU_FREEZE, PSCI_CPU_OFF,
+    PSCI_CPU_ON_64, PSCI_CPU_SUSPEND_64, PSCI_FEATURES, PSCI_MEM_PROTECT,
+    PSCI_MEM_PROTECT_CHECK_RANGE_64, PSCI_MIGRATE_64, PSCI_MIGRATE_INFO_TYPE,
+    PSCI_MIGRATE_INFO_UP_CPU_64, PSCI_NODE_HW_STATE_64, PSCI_SET_SUSPEND_MODE, PSCI_STAT_COUNT_64,
+    PSCI_STAT_RESIDENCY_64, PSCI_SYSTEM_OFF, PSCI_SYSTEM_RESET, PSCI_SYSTEM_RESET2_64,
+    PSCI_SYSTEM_SUSPEND_64, PSCI_VERSION,
+};
+use crate::{
+    error::{positive_or_error_32, success_or_error_32, success_or_error_64},
+    Call,
+};
+
+/// Returns the version of PSCI implemented.
+pub fn version<C: Call>() -> u32 {
+    C::call32(PSCI_VERSION, [0; 7])[0]
+}
+
+/// Suspends execution of a core or topology node.
+pub fn cpu_suspend<C: Call>(
+    power_state: u32,
+    entry_point_address: u64,
+    context_id: u64,
+) -> Result<(), Error> {
+    success_or_error_64(
+        C::call64(
+            PSCI_CPU_SUSPEND_64,
+            [
+                power_state.into(),
+                entry_point_address,
+                context_id,
+                0,
+                0,
+                0,
+                0,
+                0,
+                0,
+                0,
+                0,
+                0,
+                0,
+                0,
+                0,
+                0,
+                0,
+            ],
+        )[0],
+    )
+}
+
+/// Powers down the current core.
+pub fn cpu_off<C: Call>() -> Result<(), Error> {
+    success_or_error_32(C::call32(PSCI_CPU_OFF, [0; 7])[0])
+}
+
+/// Powers up a core.
+pub fn cpu_on<C: Call>(
+    target_cpu: u64,
+    entry_point_address: u64,
+    context_id: u64,
+) -> Result<(), Error> {
+    success_or_error_64(
+        C::call64(
+            PSCI_CPU_ON_64,
+            [
+                target_cpu,
+                entry_point_address,
+                context_id,
+                0,
+                0,
+                0,
+                0,
+                0,
+                0,
+                0,
+                0,
+                0,
+                0,
+                0,
+                0,
+                0,
+                0,
+            ],
+        )[0],
+    )
+}
+
+/// Gets the status of an affinity instance.
+pub fn affinity_info<C: Call>(
+    target_affinity: u64,
+    lowest_affinity_level: LowestAffinityLevel,
+) -> Result<AffinityState, Error> {
+    (C::call64(
+        PSCI_AFFINITY_INFO_64,
+        [
+            target_affinity,
+            lowest_affinity_level as u64,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+        ],
+    )[0] as i32)
+        .try_into()
+}
+
+/// Asks the Trusted OS to migrate its context to a specific core.
+pub fn migrate<C: Call>(target_cpu: u64) -> Result<(), Error> {
+    success_or_error_64(
+        C::call64(
+            PSCI_MIGRATE_64,
+            [target_cpu, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+        )[0],
+    )
+}
+
+/// Identifies the levelof multicore support in the Trusted OS.
+pub fn migrate_info_type<C: Call>() -> Result<MigrateType, Error> {
+    (C::call32(PSCI_MIGRATE_INFO_TYPE, [0; 7])[0] as i32).try_into()
+}
+
+/// Returns the MPIDR value of the current resident core of the Trusted OS.
+pub fn migrate_info_up_cpu<C: Call>() -> u64 {
+    C::call64(PSCI_MIGRATE_INFO_UP_CPU_64, [0; 17])[0]
+}
+
+/// Shuts down the system.
+pub fn system_off<C: Call>() -> Result<(), Error> {
+    success_or_error_32(C::call32(PSCI_SYSTEM_OFF, [0; 7])[0])
+}
+
+/// Resets the system.
+pub fn system_reset<C: Call>() -> Result<(), Error> {
+    success_or_error_32(C::call32(PSCI_SYSTEM_RESET, [0; 7])[0])
+}
+
+/// Resets the system in an architectural or vendor-specific way.
+pub fn system_reset2<C: Call>(reset_type: u32, cookie: u64) -> Result<(), Error> {
+    success_or_error_64(
+        C::call64(
+            PSCI_SYSTEM_RESET2_64,
+            [
+                reset_type.into(),
+                cookie,
+                0,
+                0,
+                0,
+                0,
+                0,
+                0,
+                0,
+                0,
+                0,
+                0,
+                0,
+                0,
+                0,
+                0,
+                0,
+            ],
+        )[0],
+    )
+}
+
+/// Enables or disables memory protection.
+pub fn mem_protect<C: Call>(enable: bool) -> Result<bool, Error> {
+    match C::call32(PSCI_MEM_PROTECT, [enable as u32, 0, 0, 0, 0, 0, 0])[0] as i32 {
+        0 => Ok(false),
+        1 => Ok(true),
+        error => Err(error.into()),
+    }
+}
+
+/// Checks whether a memory range is protected by `MEM_PROTECT`.
+pub fn mem_protect_check_range<C: Call>(base: u64, length: u64) -> Result<(), Error> {
+    success_or_error_64(
+        C::call64(
+            PSCI_MEM_PROTECT_CHECK_RANGE_64,
+            [base, length, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+        )[0],
+    )
+}
+
+/// Queries whether `SMCCC_VERSION` or a specific PSCI function is implemented, and what features
+/// are supported.
+pub fn psci_features<C: Call>(psci_function_id: u32) -> Result<u32, Error> {
+    positive_or_error_32(C::call32(PSCI_FEATURES, [psci_function_id, 0, 0, 0, 0, 0, 0])[0])
+}
+
+/// Puts the current core into an implementation-defined low power state.
+pub fn cpu_freeze<C: Call>() -> Result<(), Error> {
+    success_or_error_32(C::call32(PSCI_CPU_FREEZE, [0; 7])[0])
+}
+
+/// Puts the current core into an implementation-defined low power state.
+pub fn cpu_default_suspend<C: Call>(
+    entry_point_address: u64,
+    context_id: u64,
+) -> Result<(), Error> {
+    success_or_error_64(
+        C::call64(
+            PSCI_CPU_DEFAULT_SUSPEND_64,
+            [
+                entry_point_address,
+                context_id,
+                0,
+                0,
+                0,
+                0,
+                0,
+                0,
+                0,
+                0,
+                0,
+                0,
+                0,
+                0,
+                0,
+                0,
+                0,
+            ],
+        )[0],
+    )
+}
+
+/// Retuns the true hardware state of a node in the power domain topology.
+pub fn node_hw_state<C: Call>(target_cpu: u64, power_level: u32) -> Result<PowerState, Error> {
+    (C::call64(
+        PSCI_NODE_HW_STATE_64,
+        [
+            target_cpu,
+            power_level.into(),
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+        ],
+    )[0] as i32)
+        .try_into()
+}
+
+/// Suspends the system to RAM.
+pub fn system_suspend<C: Call>(entry_point_address: u64, context_id: u64) -> Result<(), Error> {
+    success_or_error_64(
+        C::call64(
+            PSCI_SYSTEM_SUSPEND_64,
+            [
+                entry_point_address,
+                context_id,
+                0,
+                0,
+                0,
+                0,
+                0,
+                0,
+                0,
+                0,
+                0,
+                0,
+                0,
+                0,
+                0,
+                0,
+                0,
+            ],
+        )[0],
+    )
+}
+
+/// Sets the mode used by `CPU_SUSPEND`.
+pub fn set_suspend_mode<C: Call>(mode: SuspendMode) -> Result<(), Error> {
+    success_or_error_32(C::call32(PSCI_SET_SUSPEND_MODE, [mode.into(), 0, 0, 0, 0, 0, 0])[0])
+}
+
+/// Returns the amount of time the platform has spend in the given power state since cold boot.
+pub fn stat_residency<C: Call>(target_cpu: u64, power_state: u32) -> u64 {
+    C::call64(
+        PSCI_STAT_RESIDENCY_64,
+        [
+            target_cpu,
+            power_state.into(),
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+        ],
+    )[0]
+}
+
+/// Returns the number of times the platform has used the given power state since cold boot.
+pub fn stat_count<C: Call>(target_cpu: u64, power_state: u32) -> u64 {
+    C::call64(
+        PSCI_STAT_COUNT_64,
+        [
+            target_cpu,
+            power_state.into(),
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+        ],
+    )[0]
+}
diff --git a/crates/smccc/src/psci/error.rs b/crates/smccc/src/psci/error.rs
new file mode 100644
index 0000000..9051eca
--- /dev/null
+++ b/crates/smccc/src/psci/error.rs
@@ -0,0 +1,104 @@
+// Copyright 2022 the authors.
+// This project is dual-licensed under Apache 2.0 and MIT terms.
+// See LICENSE-APACHE and LICENSE-MIT for details.
+
+//! PSCI error codes.
+
+pub use crate::error::SUCCESS;
+use core::fmt::{self, Display, Formatter};
+
+pub const NOT_SUPPORTED: i32 = -1;
+pub const INVALID_PARAMETERS: i32 = -2;
+pub const DENIED: i32 = -3;
+pub const ALREADY_ON: i32 = -4;
+pub const ON_PENDING: i32 = -5;
+pub const INTERNAL_FAILURE: i32 = -6;
+pub const NOT_PRESENT: i32 = -7;
+pub const DISABLED: i32 = -8;
+pub const INVALID_ADDRESS: i32 = -9;
+
+/// Standard PSCI errors.
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+pub enum Error {
+    /// PSCI call not supported.
+    NotSupported,
+    /// Invalid parameters to PSCI call.
+    InvalidParameters,
+    /// PSCI call denied.
+    Denied,
+    /// Core already on.
+    AlreadyOn,
+    /// Core already being turned on.
+    OnPending,
+    /// Internal failure in PSCI call.
+    InternalFailure,
+    /// Trusted OS not present on target core.
+    NotPresent,
+    /// Core disabled.
+    Disabled,
+    /// Invalid address passed to PSCI call.
+    InvalidAddress,
+    /// An unexpected return value from a PSCI function.
+    Unknown(i64),
+}
+
+impl From<Error> for i64 {
+    fn from(error: Error) -> i64 {
+        match error {
+            Error::NotSupported => NOT_SUPPORTED.into(),
+            Error::InvalidParameters => INVALID_PARAMETERS.into(),
+            Error::Denied => DENIED.into(),
+            Error::AlreadyOn => ALREADY_ON.into(),
+            Error::OnPending => ON_PENDING.into(),
+            Error::InternalFailure => INTERNAL_FAILURE.into(),
+            Error::NotPresent => NOT_PRESENT.into(),
+            Error::Disabled => DISABLED.into(),
+            Error::InvalidAddress => INVALID_ADDRESS.into(),
+            Error::Unknown(value) => value,
+        }
+    }
+}
+
+impl From<i32> for Error {
+    fn from(value: i32) -> Self {
+        match value {
+            NOT_SUPPORTED => Error::NotSupported,
+            INVALID_PARAMETERS => Error::InvalidParameters,
+            DENIED => Error::Denied,
+            ALREADY_ON => Error::AlreadyOn,
+            ON_PENDING => Error::OnPending,
+            INTERNAL_FAILURE => Error::InternalFailure,
+            NOT_PRESENT => Error::NotPresent,
+            DISABLED => Error::Disabled,
+            INVALID_ADDRESS => Error::InvalidAddress,
+            _ => Error::Unknown(value.into()),
+        }
+    }
+}
+
+impl From<i64> for Error {
+    fn from(value: i64) -> Self {
+        if let Ok(value) = i32::try_from(value) {
+            value.into()
+        } else {
+            Error::Unknown(value)
+        }
+    }
+}
+
+impl Display for Error {
+    fn fmt(&self, f: &mut Formatter) -> fmt::Result {
+        match self {
+            Self::NotSupported => write!(f, "PSCI call not supported"),
+            Self::InvalidParameters => write!(f, "Invalid parameters to PSCI call"),
+            Self::Denied => write!(f, "PSCI call denied"),
+            Self::AlreadyOn => write!(f, "Core already on"),
+            Self::OnPending => write!(f, "Core already being turned on"),
+            Self::InternalFailure => write!(f, "Internal failure in PSCI call"),
+            Self::NotPresent => write!(f, "Trusted OS not present on target core"),
+            Self::Disabled => write!(f, "Core disabled"),
+            Self::InvalidAddress => write!(f, "Invalid address passed to PSCI call"),
+            Self::Unknown(e) => write!(f, "Unknown PSCI return value {} ({0:#x})", e),
+        }
+    }
+}
diff --git a/crates/socket2/.cargo-checksum.json b/crates/socket2/.cargo-checksum.json
new file mode 100644
index 0000000..57d2b35
--- /dev/null
+++ b/crates/socket2/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.toml":"b90237dd0094556fa5f270e8d6612dd8d6b4d35244c397c60d6f191b5a8442d7","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"378f5840b258e2779c39418f3f2d7b2ba96f1c7917dd6be0713f88305dbda397","README.md":"5b9d3a55525334c4abdb992a49f8dd4f9853f3c061e905334c56ab1dba1bb3ff","src/lib.rs":"b4757798ca38b3f9ec9e99aeec3918080f8b52a45d677382ad91c7049b209683","src/sockaddr.rs":"1e3a30766beb175a509ed295148de57a6ce6d39382e68c066a2bc4565fa1f033","src/socket.rs":"a4ef50a384867a2278c5f1bb15d4689394926c8b2fb3f840d25ae4f6000e54cf","src/sockref.rs":"7e16a5300cdd003e591486cb3bd384bdb92044e940e3ddf327da27935dc51e22","src/sys/unix.rs":"40c7ac6af023844b74f0824a9fe4df5b3a9b37c9288aed97c10529e4fbd1e2c8","src/sys/windows.rs":"ab92ae1e961413673c58bcad42a28eedebc0b2e7f82f38908839d13cc5bbca7f"},"package":"7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9"}
\ No newline at end of file
diff --git a/crates/socket2/Android.bp b/crates/socket2/Android.bp
new file mode 100644
index 0000000..2a69a5c
--- /dev/null
+++ b/crates/socket2/Android.bp
@@ -0,0 +1,33 @@
+// This file is generated by cargo_embargo.
+// Do not modify this file because the changes will be overridden on upgrade.
+
+package {
+    default_applicable_licenses: ["external_rust_crates_socket2_license"],
+    default_team: "trendy_team_android_rust",
+}
+
+license {
+    name: "external_rust_crates_socket2_license",
+    visibility: [":__subpackages__"],
+    license_kinds: ["SPDX-license-identifier-Apache-2.0"],
+    license_text: ["LICENSE"],
+}
+
+rust_library {
+    name: "libsocket2",
+    host_supported: true,
+    crate_name: "socket2",
+    cargo_env_compat: true,
+    cargo_pkg_version: "0.5.5",
+    crate_root: "src/lib.rs",
+    edition: "2021",
+    features: ["all"],
+    rustlibs: ["liblibc"],
+    apex_available: [
+        "//apex_available:platform",
+        "//apex_available:anyapex",
+    ],
+    product_available: true,
+    vendor_available: true,
+    min_sdk_version: "29",
+}
diff --git a/crates/socket2/Cargo.lock b/crates/socket2/Cargo.lock
new file mode 100644
index 0000000..20b6ece
--- /dev/null
+++ b/crates/socket2/Cargo.lock
@@ -0,0 +1,83 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "libc"
+version = "0.2.158"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439"
+
+[[package]]
+name = "socket2"
+version = "0.5.5"
+dependencies = [
+ "libc",
+ "windows-sys",
+]
+
+[[package]]
+name = "windows-sys"
+version = "0.48.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9"
+dependencies = [
+ "windows-targets",
+]
+
+[[package]]
+name = "windows-targets"
+version = "0.48.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c"
+dependencies = [
+ "windows_aarch64_gnullvm",
+ "windows_aarch64_msvc",
+ "windows_i686_gnu",
+ "windows_i686_msvc",
+ "windows_x86_64_gnu",
+ "windows_x86_64_gnullvm",
+ "windows_x86_64_msvc",
+]
+
+[[package]]
+name = "windows_aarch64_gnullvm"
+version = "0.48.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8"
+
+[[package]]
+name = "windows_aarch64_msvc"
+version = "0.48.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc"
+
+[[package]]
+name = "windows_i686_gnu"
+version = "0.48.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e"
+
+[[package]]
+name = "windows_i686_msvc"
+version = "0.48.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406"
+
+[[package]]
+name = "windows_x86_64_gnu"
+version = "0.48.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e"
+
+[[package]]
+name = "windows_x86_64_gnullvm"
+version = "0.48.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc"
+
+[[package]]
+name = "windows_x86_64_msvc"
+version = "0.48.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538"
diff --git a/crates/socket2/Cargo.toml b/crates/socket2/Cargo.toml
new file mode 100644
index 0000000..9250ab4
--- /dev/null
+++ b/crates/socket2/Cargo.toml
@@ -0,0 +1,87 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2021"
+rust-version = "1.63"
+name = "socket2"
+version = "0.5.5"
+authors = [
+    "Alex Crichton <alex@alexcrichton.com>",
+    "Thomas de Zeeuw <thomasdezeeuw@gmail.com>",
+]
+include = [
+    "Cargo.toml",
+    "LICENSE-APACHE",
+    "LICENSE-MIT",
+    "README.md",
+    "src/**/*.rs",
+]
+description = """
+Utilities for handling networking sockets with a maximal amount of configuration
+possible intended.
+"""
+homepage = "https://github.com/rust-lang/socket2"
+documentation = "https://docs.rs/socket2"
+readme = "README.md"
+keywords = [
+    "io",
+    "socket",
+    "network",
+]
+categories = [
+    "api-bindings",
+    "network-programming",
+]
+license = "MIT OR Apache-2.0"
+repository = "https://github.com/rust-lang/socket2"
+
+[package.metadata.docs.rs]
+all-features = true
+rustdoc-args = [
+    "--cfg",
+    "docsrs",
+]
+targets = [
+    "aarch64-apple-ios",
+    "aarch64-linux-android",
+    "x86_64-apple-darwin",
+    "x86_64-unknown-fuchsia",
+    "x86_64-pc-windows-msvc",
+    "x86_64-pc-solaris",
+    "x86_64-unknown-freebsd",
+    "x86_64-unknown-illumos",
+    "x86_64-unknown-linux-gnu",
+    "x86_64-unknown-linux-musl",
+    "x86_64-unknown-netbsd",
+    "x86_64-unknown-redox",
+    "armv7-linux-androideabi",
+    "i686-linux-android",
+]
+
+[package.metadata.playground]
+features = ["all"]
+
+[features]
+all = []
+
+[target."cfg(unix)".dependencies.libc]
+version = "0.2.149"
+
+[target."cfg(windows)".dependencies.windows-sys]
+version = "0.48"
+features = [
+    "Win32_Foundation",
+    "Win32_Networking_WinSock",
+    "Win32_System_IO",
+    "Win32_System_Threading",
+    "Win32_System_WindowsProgramming",
+]
diff --git a/crates/socket2/LICENSE b/crates/socket2/LICENSE
new file mode 120000
index 0000000..6b579aa
--- /dev/null
+++ b/crates/socket2/LICENSE
@@ -0,0 +1 @@
+LICENSE-APACHE
\ No newline at end of file
diff --git a/crates/socket2/LICENSE-APACHE b/crates/socket2/LICENSE-APACHE
new file mode 100644
index 0000000..16fe87b
--- /dev/null
+++ b/crates/socket2/LICENSE-APACHE
@@ -0,0 +1,201 @@
+                              Apache License
+                        Version 2.0, January 2004
+                     http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+   "License" shall mean the terms and conditions for use, reproduction,
+   and distribution as defined by Sections 1 through 9 of this document.
+
+   "Licensor" shall mean the copyright owner or entity authorized by
+   the copyright owner that is granting the License.
+
+   "Legal Entity" shall mean the union of the acting entity and all
+   other entities that control, are controlled by, or are under common
+   control with that entity. For the purposes of this definition,
+   "control" means (i) the power, direct or indirect, to cause the
+   direction or management of such entity, whether by contract or
+   otherwise, or (ii) ownership of fifty percent (50%) or more of the
+   outstanding shares, or (iii) beneficial ownership of such entity.
+
+   "You" (or "Your") shall mean an individual or Legal Entity
+   exercising permissions granted by this License.
+
+   "Source" form shall mean the preferred form for making modifications,
+   including but not limited to software source code, documentation
+   source, and configuration files.
+
+   "Object" form shall mean any form resulting from mechanical
+   transformation or translation of a Source form, including but
+   not limited to compiled object code, generated documentation,
+   and conversions to other media types.
+
+   "Work" shall mean the work of authorship, whether in Source or
+   Object form, made available under the License, as indicated by a
+   copyright notice that is included in or attached to the work
+   (an example is provided in the Appendix below).
+
+   "Derivative Works" shall mean any work, whether in Source or Object
+   form, that is based on (or derived from) the Work and for which the
+   editorial revisions, annotations, elaborations, or other modifications
+   represent, as a whole, an original work of authorship. For the purposes
+   of this License, Derivative Works shall not include works that remain
+   separable from, or merely link (or bind by name) to the interfaces of,
+   the Work and Derivative Works thereof.
+
+   "Contribution" shall mean any work of authorship, including
+   the original version of the Work and any modifications or additions
+   to that Work or Derivative Works thereof, that is intentionally
+   submitted to Licensor for inclusion in the Work by the copyright owner
+   or by an individual or Legal Entity authorized to submit on behalf of
+   the copyright owner. For the purposes of this definition, "submitted"
+   means any form of electronic, verbal, or written communication sent
+   to the Licensor or its representatives, including but not limited to
+   communication on electronic mailing lists, source code control systems,
+   and issue tracking systems that are managed by, or on behalf of, the
+   Licensor for the purpose of discussing and improving the Work, but
+   excluding communication that is conspicuously marked or otherwise
+   designated in writing by the copyright owner as "Not a Contribution."
+
+   "Contributor" shall mean Licensor and any individual or Legal Entity
+   on behalf of whom a Contribution has been received by Licensor and
+   subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+   this License, each Contributor hereby grants to You a perpetual,
+   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+   copyright license to reproduce, prepare Derivative Works of,
+   publicly display, publicly perform, sublicense, and distribute the
+   Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+   this License, each Contributor hereby grants to You a perpetual,
+   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+   (except as stated in this section) patent license to make, have made,
+   use, offer to sell, sell, import, and otherwise transfer the Work,
+   where such license applies only to those patent claims licensable
+   by such Contributor that are necessarily infringed by their
+   Contribution(s) alone or by combination of their Contribution(s)
+   with the Work to which such Contribution(s) was submitted. If You
+   institute patent litigation against any entity (including a
+   cross-claim or counterclaim in a lawsuit) alleging that the Work
+   or a Contribution incorporated within the Work constitutes direct
+   or contributory patent infringement, then any patent licenses
+   granted to You under this License for that Work shall terminate
+   as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+   Work or Derivative Works thereof in any medium, with or without
+   modifications, and in Source or Object form, provided that You
+   meet the following conditions:
+
+   (a) You must give any other recipients of the Work or
+       Derivative Works a copy of this License; and
+
+   (b) You must cause any modified files to carry prominent notices
+       stating that You changed the files; and
+
+   (c) You must retain, in the Source form of any Derivative Works
+       that You distribute, all copyright, patent, trademark, and
+       attribution notices from the Source form of the Work,
+       excluding those notices that do not pertain to any part of
+       the Derivative Works; and
+
+   (d) If the Work includes a "NOTICE" text file as part of its
+       distribution, then any Derivative Works that You distribute must
+       include a readable copy of the attribution notices contained
+       within such NOTICE file, excluding those notices that do not
+       pertain to any part of the Derivative Works, in at least one
+       of the following places: within a NOTICE text file distributed
+       as part of the Derivative Works; within the Source form or
+       documentation, if provided along with the Derivative Works; or,
+       within a display generated by the Derivative Works, if and
+       wherever such third-party notices normally appear. The contents
+       of the NOTICE file are for informational purposes only and
+       do not modify the License. You may add Your own attribution
+       notices within Derivative Works that You distribute, alongside
+       or as an addendum to the NOTICE text from the Work, provided
+       that such additional attribution notices cannot be construed
+       as modifying the License.
+
+   You may add Your own copyright statement to Your modifications and
+   may provide additional or different license terms and conditions
+   for use, reproduction, or distribution of Your modifications, or
+   for any such Derivative Works as a whole, provided Your use,
+   reproduction, and distribution of the Work otherwise complies with
+   the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+   any Contribution intentionally submitted for inclusion in the Work
+   by You to the Licensor shall be under the terms and conditions of
+   this License, without any additional terms or conditions.
+   Notwithstanding the above, nothing herein shall supersede or modify
+   the terms of any separate license agreement you may have executed
+   with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+   names, trademarks, service marks, or product names of the Licensor,
+   except as required for reasonable and customary use in describing the
+   origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+   agreed to in writing, Licensor provides the Work (and each
+   Contributor provides its Contributions) on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+   implied, including, without limitation, any warranties or conditions
+   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+   PARTICULAR PURPOSE. You are solely responsible for determining the
+   appropriateness of using or redistributing the Work and assume any
+   risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+   whether in tort (including negligence), contract, or otherwise,
+   unless required by applicable law (such as deliberate and grossly
+   negligent acts) or agreed to in writing, shall any Contributor be
+   liable to You for damages, including any direct, indirect, special,
+   incidental, or consequential damages of any character arising as a
+   result of this License or out of the use or inability to use the
+   Work (including but not limited to damages for loss of goodwill,
+   work stoppage, computer failure or malfunction, or any and all
+   other commercial damages or losses), even if such Contributor
+   has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+   the Work or Derivative Works thereof, You may choose to offer,
+   and charge a fee for, acceptance of support, warranty, indemnity,
+   or other liability obligations and/or rights consistent with this
+   License. However, in accepting such obligations, You may act only
+   on Your own behalf and on Your sole responsibility, not on behalf
+   of any other Contributor, and only if You agree to indemnify,
+   defend, and hold each Contributor harmless for any liability
+   incurred by, or claims asserted against, such Contributor by reason
+   of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+   To apply the Apache License to your work, attach the following
+   boilerplate notice, with the fields enclosed by brackets "[]"
+   replaced with your own identifying information. (Don't include
+   the brackets!)  The text should be enclosed in the appropriate
+   comment syntax for the file format. We also recommend that a
+   file or class name and description of purpose be included on the
+   same "printed page" as the copyright notice for easier
+   identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+	http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/crates/socket2/LICENSE-MIT b/crates/socket2/LICENSE-MIT
new file mode 100644
index 0000000..39e0ed6
--- /dev/null
+++ b/crates/socket2/LICENSE-MIT
@@ -0,0 +1,25 @@
+Copyright (c) 2014 Alex Crichton
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/crates/socket2/METADATA b/crates/socket2/METADATA
new file mode 100644
index 0000000..73ea070
--- /dev/null
+++ b/crates/socket2/METADATA
@@ -0,0 +1,21 @@
+# This project was upgraded manually.
+
+name: "socket2"
+description: "Utilities for handling networking sockets with a maximal amount of configuration possible intended."
+third_party {
+  url {
+    type: HOMEPAGE
+    value: "https://crates.io/crates/socket2"
+  }
+  url {
+    type: ARCHIVE
+    value: "https://static.crates.io/crates/socket2/socket2-0.5.5.crate"
+  }
+  version: "0.5.5"
+  license_type: NOTICE
+  last_upgrade_date {
+    year: 2023
+    month: 10
+    day: 25
+  }
+}
diff --git a/crates/socket2/MODULE_LICENSE_APACHE2 b/crates/socket2/MODULE_LICENSE_APACHE2
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/crates/socket2/MODULE_LICENSE_APACHE2
diff --git a/crates/socket2/README.md b/crates/socket2/README.md
new file mode 100644
index 0000000..8bb0949
--- /dev/null
+++ b/crates/socket2/README.md
@@ -0,0 +1,85 @@
+# Socket2
+
+Socket2 is a crate that provides utilities for creating and using sockets.
+
+The goal of this crate is to create and use a socket using advanced
+configuration options (those that are not available in the types in the standard
+library) without using any unsafe code.
+
+This crate provides as direct as possible access to the system's functionality
+for sockets, this means little effort to provide cross-platform utilities. It is
+up to the user to know how to use sockets when using this crate. *If you don't
+know how to create a socket using libc/system calls then this crate is not for
+you*. Most, if not all, functions directly relate to the equivalent system call
+with no error handling applied, so no handling errors such as `EINTR`. As a
+result using this crate can be a little wordy, but it should give you maximal
+flexibility over configuration of sockets.
+
+See the [API documentation] for more.
+
+[API documentation]: https://docs.rs/socket2
+
+# Branches
+
+Currently Socket2 supports two versions: v0.5 and v0.4. Version 0.5 is being
+developed in the master branch. Version 0.4 is developed in the [v0.4.x branch]
+branch.
+
+[v0.4.x branch]: https://github.com/rust-lang/socket2/tree/v0.4.x
+
+# OS support
+
+Socket2 attempts to support the same OS/architectures as Rust does, see
+https://doc.rust-lang.org/nightly/rustc/platform-support.html. However this is
+not always possible, below is current list of support OSs.
+
+*If your favorite OS is not on the list consider contributing it! See [issue
+#78].*
+
+[issue #78]: https://github.com/rust-lang/socket2/issues/78
+
+### Tier 1
+
+These OSs are tested with each commit in the CI and must always pass the tests.
+All functions/types/etc., excluding ones behind the `all` feature, must work on
+these OSs.
+
+* Linux
+* macOS
+* Windows
+
+### Tier 2
+
+These OSs are currently build in the CI, but not tested. Not all
+functions/types/etc. may work on these OSs, even ones **not** behind the `all`
+feature flag.
+
+* Android
+* FreeBSD
+* Fuchsia
+* iOS
+* illumos
+* NetBSD
+* Redox
+* Solaris
+
+# Minimum Supported Rust Version (MSRV)
+
+Socket2 uses 1.63.0 as MSRV.
+
+# License
+
+This project is licensed under either of
+
+ * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
+   https://www.apache.org/licenses/LICENSE-2.0)
+ * MIT license ([LICENSE-MIT](LICENSE-MIT) or
+   https://opensource.org/licenses/MIT)
+
+at your option.
+
+### Contribution
+
+Unless you explicitly state otherwise, any contribution intentionally submitted
+for inclusion in this project by you, as defined in the Apache-2.0 license,
+shall be dual licensed as above, without any additional terms or conditions.
diff --git a/crates/socket2/TEST_MAPPING b/crates/socket2/TEST_MAPPING
new file mode 100644
index 0000000..a32d61c
--- /dev/null
+++ b/crates/socket2/TEST_MAPPING
@@ -0,0 +1,29 @@
+// Generated by update_crate_tests.py for tests that depend on this crate.
+{
+  "imports": [
+    {
+      "path": "external/rust/crates/async-stream"
+    },
+    {
+      "path": "external/rust/crates/futures-util"
+    },
+    {
+      "path": "external/rust/crates/tokio"
+    },
+    {
+      "path": "external/rust/crates/tokio-test"
+    },
+    {
+      "path": "external/uwb/src"
+    },
+    {
+      "path": "packages/modules/DnsResolver"
+    },
+    {
+      "path": "system/security/keystore2"
+    },
+    {
+      "path": "system/security/keystore2/legacykeystore"
+    }
+  ]
+}
diff --git a/crates/socket2/cargo_embargo.json b/crates/socket2/cargo_embargo.json
new file mode 100644
index 0000000..bc82b96
--- /dev/null
+++ b/crates/socket2/cargo_embargo.json
@@ -0,0 +1,7 @@
+{
+  "features": [
+    "all"
+  ],
+  "min_sdk_version": "29",
+  "run_cargo": false
+}
diff --git a/crates/socket2/src/lib.rs b/crates/socket2/src/lib.rs
new file mode 100644
index 0000000..127e070
--- /dev/null
+++ b/crates/socket2/src/lib.rs
@@ -0,0 +1,722 @@
+// Copyright 2015 The Rust Project Developers.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Utilities for creating and using sockets.
+//!
+//! The goal of this crate is to create and use a socket using advanced
+//! configuration options (those that are not available in the types in the
+//! standard library) without using any unsafe code.
+//!
+//! This crate provides as direct as possible access to the system's
+//! functionality for sockets, this means little effort to provide
+//! cross-platform utilities. It is up to the user to know how to use sockets
+//! when using this crate. *If you don't know how to create a socket using
+//! libc/system calls then this crate is not for you*. Most, if not all,
+//! functions directly relate to the equivalent system call with no error
+//! handling applied, so no handling errors such as [`EINTR`]. As a result using
+//! this crate can be a little wordy, but it should give you maximal flexibility
+//! over configuration of sockets.
+//!
+//! [`EINTR`]: std::io::ErrorKind::Interrupted
+//!
+//! # Examples
+//!
+//! ```no_run
+//! # fn main() -> std::io::Result<()> {
+//! use std::net::{SocketAddr, TcpListener};
+//! use socket2::{Socket, Domain, Type};
+//!
+//! // Create a TCP listener bound to two addresses.
+//! let socket = Socket::new(Domain::IPV6, Type::STREAM, None)?;
+//!
+//! socket.set_only_v6(false)?;
+//! let address: SocketAddr = "[::1]:12345".parse().unwrap();
+//! socket.bind(&address.into())?;
+//! socket.listen(128)?;
+//!
+//! let listener: TcpListener = socket.into();
+//! // ...
+//! # drop(listener);
+//! # Ok(()) }
+//! ```
+//!
+//! ## Features
+//!
+//! This crate has a single feature `all`, which enables all functions even ones
+//! that are not available on all OSs.
+
+#![deny(missing_docs, missing_debug_implementations, rust_2018_idioms)]
+// Show required OS/features on docs.rs.
+#![cfg_attr(docsrs, feature(doc_cfg))]
+// Disallow warnings when running tests.
+#![cfg_attr(test, deny(warnings))]
+// Disallow warnings in examples.
+#![doc(test(attr(deny(warnings))))]
+
+use std::fmt;
+#[cfg(not(target_os = "redox"))]
+use std::io::IoSlice;
+#[cfg(not(target_os = "redox"))]
+use std::marker::PhantomData;
+#[cfg(not(target_os = "redox"))]
+use std::mem;
+use std::mem::MaybeUninit;
+use std::net::SocketAddr;
+use std::ops::{Deref, DerefMut};
+use std::time::Duration;
+
+/// Macro to implement `fmt::Debug` for a type, printing the constant names
+/// rather than a number.
+///
+/// Note this is used in the `sys` module and thus must be defined before
+/// defining the modules.
+macro_rules! impl_debug {
+    (
+        // Type name for which to implement `fmt::Debug`.
+        $type: path,
+        $(
+            $(#[$target: meta])*
+            // The flag(s) to check.
+            // Need to specific the libc crate because Windows doesn't use
+            // `libc` but `windows_sys`.
+            $libc: ident :: $flag: ident
+        ),+ $(,)*
+    ) => {
+        impl std::fmt::Debug for $type {
+            fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+                let string = match self.0 {
+                    $(
+                        $(#[$target])*
+                        $libc :: $flag => stringify!($flag),
+                    )+
+                    n => return write!(f, "{n}"),
+                };
+                f.write_str(string)
+            }
+        }
+    };
+}
+
+/// Macro to convert from one network type to another.
+macro_rules! from {
+    ($from: ty, $for: ty) => {
+        impl From<$from> for $for {
+            fn from(socket: $from) -> $for {
+                #[cfg(unix)]
+                unsafe {
+                    <$for>::from_raw_fd(socket.into_raw_fd())
+                }
+                #[cfg(windows)]
+                unsafe {
+                    <$for>::from_raw_socket(socket.into_raw_socket())
+                }
+            }
+        }
+    };
+}
+
+/// Link to online documentation for (almost) all supported OSs.
+#[rustfmt::skip]
+macro_rules! man_links {
+    // Links to all OSs.
+    ($syscall: tt ( $section: tt ) ) => {
+        concat!(
+            man_links!(__ intro),
+            man_links!(__ unix $syscall($section)),
+            man_links!(__ windows $syscall($section)),
+        )
+    };
+    // Links to Unix-like OSs.
+    (unix: $syscall: tt ( $section: tt ) ) => {
+        concat!(
+            man_links!(__ intro),
+            man_links!(__ unix $syscall($section)),
+        )
+    };
+    // Links to Windows only.
+    (windows: $syscall: tt ( $section: tt ) ) => {
+        concat!(
+            man_links!(__ intro),
+            man_links!(__ windows $syscall($section)),
+        )
+    };
+    // Internals.
+    (__ intro) => {
+        "\n\nAdditional documentation can be found in manual of the OS:\n\n"
+    };
+    // List for Unix-like OSs.
+    (__ unix $syscall: tt ( $section: tt ) ) => {
+        concat!(
+            " * DragonFly BSD: <https://man.dragonflybsd.org/?command=", stringify!($syscall), "&section=", stringify!($section), ">\n",
+            " * FreeBSD: <https://www.freebsd.org/cgi/man.cgi?query=", stringify!($syscall), "&sektion=", stringify!($section), ">\n",
+            " * Linux: <https://man7.org/linux/man-pages/man", stringify!($section), "/", stringify!($syscall), ".", stringify!($section), ".html>\n",
+            " * macOS: <https://developer.apple.com/library/archive/documentation/System/Conceptual/ManPages_iPhoneOS/man2/", stringify!($syscall), ".", stringify!($section), ".html> (archived, actually for iOS)\n",
+            " * NetBSD: <https://man.netbsd.org/", stringify!($syscall), ".", stringify!($section), ">\n",
+            " * OpenBSD: <https://man.openbsd.org/", stringify!($syscall), ".", stringify!($section), ">\n",
+            " * iOS: <https://developer.apple.com/library/archive/documentation/System/Conceptual/ManPages_iPhoneOS/man2/", stringify!($syscall), ".", stringify!($section), ".html> (archived)\n",
+            " * illumos: <https://illumos.org/man/3SOCKET/", stringify!($syscall), ">\n",
+        )
+    };
+    // List for Window (so just Windows).
+    (__ windows $syscall: tt ( $section: tt ) ) => {
+        concat!(
+            " * Windows: <https://docs.microsoft.com/en-us/windows/win32/api/winsock2/nf-winsock2-", stringify!($syscall), ">\n",
+        )
+    };
+}
+
+mod sockaddr;
+mod socket;
+mod sockref;
+
+#[cfg_attr(unix, path = "sys/unix.rs")]
+#[cfg_attr(windows, path = "sys/windows.rs")]
+mod sys;
+
+#[cfg(not(any(windows, unix)))]
+compile_error!("Socket2 doesn't support the compile target");
+
+use sys::c_int;
+
+pub use sockaddr::SockAddr;
+pub use socket::Socket;
+pub use sockref::SockRef;
+
+#[cfg(not(any(
+    target_os = "haiku",
+    target_os = "illumos",
+    target_os = "netbsd",
+    target_os = "redox",
+    target_os = "solaris",
+)))]
+pub use socket::InterfaceIndexOrAddress;
+
+/// Specification of the communication domain for a socket.
+///
+/// This is a newtype wrapper around an integer which provides a nicer API in
+/// addition to an injection point for documentation. Convenience constants such
+/// as [`Domain::IPV4`], [`Domain::IPV6`], etc, are provided to avoid reaching
+/// into libc for various constants.
+///
+/// This type is freely interconvertible with C's `int` type, however, if a raw
+/// value needs to be provided.
+#[derive(Copy, Clone, Eq, PartialEq)]
+pub struct Domain(c_int);
+
+impl Domain {
+    /// Domain for IPv4 communication, corresponding to `AF_INET`.
+    pub const IPV4: Domain = Domain(sys::AF_INET);
+
+    /// Domain for IPv6 communication, corresponding to `AF_INET6`.
+    pub const IPV6: Domain = Domain(sys::AF_INET6);
+
+    /// Domain for Unix socket communication, corresponding to `AF_UNIX`.
+    pub const UNIX: Domain = Domain(sys::AF_UNIX);
+
+    /// Returns the correct domain for `address`.
+    pub const fn for_address(address: SocketAddr) -> Domain {
+        match address {
+            SocketAddr::V4(_) => Domain::IPV4,
+            SocketAddr::V6(_) => Domain::IPV6,
+        }
+    }
+}
+
+impl From<c_int> for Domain {
+    fn from(d: c_int) -> Domain {
+        Domain(d)
+    }
+}
+
+impl From<Domain> for c_int {
+    fn from(d: Domain) -> c_int {
+        d.0
+    }
+}
+
+/// Specification of communication semantics on a socket.
+///
+/// This is a newtype wrapper around an integer which provides a nicer API in
+/// addition to an injection point for documentation. Convenience constants such
+/// as [`Type::STREAM`], [`Type::DGRAM`], etc, are provided to avoid reaching
+/// into libc for various constants.
+///
+/// This type is freely interconvertible with C's `int` type, however, if a raw
+/// value needs to be provided.
+#[derive(Copy, Clone, Eq, PartialEq)]
+pub struct Type(c_int);
+
+impl Type {
+    /// Type corresponding to `SOCK_STREAM`.
+    ///
+    /// Used for protocols such as TCP.
+    pub const STREAM: Type = Type(sys::SOCK_STREAM);
+
+    /// Type corresponding to `SOCK_DGRAM`.
+    ///
+    /// Used for protocols such as UDP.
+    pub const DGRAM: Type = Type(sys::SOCK_DGRAM);
+
+    /// Type corresponding to `SOCK_DCCP`.
+    ///
+    /// Used for the DCCP protocol.
+    #[cfg(all(feature = "all", target_os = "linux"))]
+    #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_os = "linux"))))]
+    pub const DCCP: Type = Type(sys::SOCK_DCCP);
+
+    /// Type corresponding to `SOCK_SEQPACKET`.
+    #[cfg(all(feature = "all", not(target_os = "espidf")))]
+    #[cfg_attr(docsrs, doc(cfg(all(feature = "all", not(target_os = "espidf")))))]
+    pub const SEQPACKET: Type = Type(sys::SOCK_SEQPACKET);
+
+    /// Type corresponding to `SOCK_RAW`.
+    #[cfg(all(feature = "all", not(any(target_os = "redox", target_os = "espidf"))))]
+    #[cfg_attr(
+        docsrs,
+        doc(cfg(all(feature = "all", not(any(target_os = "redox", target_os = "espidf")))))
+    )]
+    pub const RAW: Type = Type(sys::SOCK_RAW);
+}
+
+impl From<c_int> for Type {
+    fn from(t: c_int) -> Type {
+        Type(t)
+    }
+}
+
+impl From<Type> for c_int {
+    fn from(t: Type) -> c_int {
+        t.0
+    }
+}
+
+/// Protocol specification used for creating sockets via `Socket::new`.
+///
+/// This is a newtype wrapper around an integer which provides a nicer API in
+/// addition to an injection point for documentation.
+///
+/// This type is freely interconvertible with C's `int` type, however, if a raw
+/// value needs to be provided.
+#[derive(Copy, Clone, Eq, PartialEq)]
+pub struct Protocol(c_int);
+
+impl Protocol {
+    /// Protocol corresponding to `ICMPv4`.
+    pub const ICMPV4: Protocol = Protocol(sys::IPPROTO_ICMP);
+
+    /// Protocol corresponding to `ICMPv6`.
+    pub const ICMPV6: Protocol = Protocol(sys::IPPROTO_ICMPV6);
+
+    /// Protocol corresponding to `TCP`.
+    pub const TCP: Protocol = Protocol(sys::IPPROTO_TCP);
+
+    /// Protocol corresponding to `UDP`.
+    pub const UDP: Protocol = Protocol(sys::IPPROTO_UDP);
+
+    #[cfg(target_os = "linux")]
+    /// Protocol corresponding to `MPTCP`.
+    pub const MPTCP: Protocol = Protocol(sys::IPPROTO_MPTCP);
+
+    /// Protocol corresponding to `DCCP`.
+    #[cfg(all(feature = "all", target_os = "linux"))]
+    #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_os = "linux"))))]
+    pub const DCCP: Protocol = Protocol(sys::IPPROTO_DCCP);
+
+    /// Protocol corresponding to `SCTP`.
+    #[cfg(all(feature = "all", any(target_os = "freebsd", target_os = "linux")))]
+    pub const SCTP: Protocol = Protocol(sys::IPPROTO_SCTP);
+
+    /// Protocol corresponding to `UDPLITE`.
+    #[cfg(all(
+        feature = "all",
+        any(
+            target_os = "android",
+            target_os = "freebsd",
+            target_os = "fuchsia",
+            target_os = "linux",
+        )
+    ))]
+    pub const UDPLITE: Protocol = Protocol(sys::IPPROTO_UDPLITE);
+
+    /// Protocol corresponding to `DIVERT`.
+    #[cfg(all(feature = "all", any(target_os = "freebsd", target_os = "openbsd")))]
+    pub const DIVERT: Protocol = Protocol(sys::IPPROTO_DIVERT);
+}
+
+impl From<c_int> for Protocol {
+    fn from(p: c_int) -> Protocol {
+        Protocol(p)
+    }
+}
+
+impl From<Protocol> for c_int {
+    fn from(p: Protocol) -> c_int {
+        p.0
+    }
+}
+
+/// Flags for incoming messages.
+///
+/// Flags provide additional information about incoming messages.
+#[cfg(not(target_os = "redox"))]
+#[cfg_attr(docsrs, doc(cfg(not(target_os = "redox"))))]
+#[derive(Copy, Clone, Eq, PartialEq)]
+pub struct RecvFlags(c_int);
+
+#[cfg(not(target_os = "redox"))]
+impl RecvFlags {
+    /// Check if the message contains a truncated datagram.
+    ///
+    /// This flag is only used for datagram-based sockets,
+    /// not for stream sockets.
+    ///
+    /// On Unix this corresponds to the `MSG_TRUNC` flag.
+    /// On Windows this corresponds to the `WSAEMSGSIZE` error code.
+    #[cfg(not(target_os = "espidf"))]
+    pub const fn is_truncated(self) -> bool {
+        self.0 & sys::MSG_TRUNC != 0
+    }
+}
+
+/// A version of [`IoSliceMut`] that allows the buffer to be uninitialised.
+///
+/// [`IoSliceMut`]: std::io::IoSliceMut
+#[repr(transparent)]
+pub struct MaybeUninitSlice<'a>(sys::MaybeUninitSlice<'a>);
+
+impl<'a> fmt::Debug for MaybeUninitSlice<'a> {
+    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+        fmt::Debug::fmt(self.0.as_slice(), fmt)
+    }
+}
+
+impl<'a> MaybeUninitSlice<'a> {
+    /// Creates a new `MaybeUninitSlice` wrapping a byte slice.
+    ///
+    /// # Panics
+    ///
+    /// Panics on Windows if the slice is larger than 4GB.
+    pub fn new(buf: &'a mut [MaybeUninit<u8>]) -> MaybeUninitSlice<'a> {
+        MaybeUninitSlice(sys::MaybeUninitSlice::new(buf))
+    }
+}
+
+impl<'a> Deref for MaybeUninitSlice<'a> {
+    type Target = [MaybeUninit<u8>];
+
+    fn deref(&self) -> &[MaybeUninit<u8>] {
+        self.0.as_slice()
+    }
+}
+
+impl<'a> DerefMut for MaybeUninitSlice<'a> {
+    fn deref_mut(&mut self) -> &mut [MaybeUninit<u8>] {
+        self.0.as_mut_slice()
+    }
+}
+
+/// Configures a socket's TCP keepalive parameters.
+///
+/// See [`Socket::set_tcp_keepalive`].
+#[derive(Debug, Clone)]
+pub struct TcpKeepalive {
+    #[cfg_attr(any(target_os = "openbsd", target_os = "vita"), allow(dead_code))]
+    time: Option<Duration>,
+    #[cfg(not(any(
+        target_os = "openbsd",
+        target_os = "redox",
+        target_os = "solaris",
+        target_os = "nto",
+        target_os = "espidf",
+        target_os = "vita",
+    )))]
+    interval: Option<Duration>,
+    #[cfg(not(any(
+        target_os = "openbsd",
+        target_os = "redox",
+        target_os = "solaris",
+        target_os = "windows",
+        target_os = "nto",
+        target_os = "espidf",
+        target_os = "vita",
+    )))]
+    retries: Option<u32>,
+}
+
+impl TcpKeepalive {
+    /// Returns a new, empty set of TCP keepalive parameters.
+    pub const fn new() -> TcpKeepalive {
+        TcpKeepalive {
+            time: None,
+            #[cfg(not(any(
+                target_os = "openbsd",
+                target_os = "redox",
+                target_os = "solaris",
+                target_os = "nto",
+                target_os = "espidf",
+                target_os = "vita",
+            )))]
+            interval: None,
+            #[cfg(not(any(
+                target_os = "openbsd",
+                target_os = "redox",
+                target_os = "solaris",
+                target_os = "windows",
+                target_os = "nto",
+                target_os = "espidf",
+                target_os = "vita",
+            )))]
+            retries: None,
+        }
+    }
+
+    /// Set the amount of time after which TCP keepalive probes will be sent on
+    /// idle connections.
+    ///
+    /// This will set `TCP_KEEPALIVE` on macOS and iOS, and
+    /// `TCP_KEEPIDLE` on all other Unix operating systems, except
+    /// OpenBSD and Haiku which don't support any way to set this
+    /// option. On Windows, this sets the value of the `tcp_keepalive`
+    /// struct's `keepalivetime` field.
+    ///
+    /// Some platforms specify this value in seconds, so sub-second
+    /// specifications may be omitted.
+    pub const fn with_time(self, time: Duration) -> Self {
+        Self {
+            time: Some(time),
+            ..self
+        }
+    }
+
+    /// Set the value of the `TCP_KEEPINTVL` option. On Windows, this sets the
+    /// value of the `tcp_keepalive` struct's `keepaliveinterval` field.
+    ///
+    /// Sets the time interval between TCP keepalive probes.
+    ///
+    /// Some platforms specify this value in seconds, so sub-second
+    /// specifications may be omitted.
+    #[cfg(any(
+        target_os = "android",
+        target_os = "dragonfly",
+        target_os = "freebsd",
+        target_os = "fuchsia",
+        target_os = "illumos",
+        target_os = "ios",
+        target_os = "linux",
+        target_os = "macos",
+        target_os = "netbsd",
+        target_os = "tvos",
+        target_os = "watchos",
+        target_os = "windows",
+    ))]
+    #[cfg_attr(
+        docsrs,
+        doc(cfg(any(
+            target_os = "android",
+            target_os = "dragonfly",
+            target_os = "freebsd",
+            target_os = "fuchsia",
+            target_os = "illumos",
+            target_os = "ios",
+            target_os = "linux",
+            target_os = "macos",
+            target_os = "netbsd",
+            target_os = "tvos",
+            target_os = "watchos",
+            target_os = "windows",
+        )))
+    )]
+    pub const fn with_interval(self, interval: Duration) -> Self {
+        Self {
+            interval: Some(interval),
+            ..self
+        }
+    }
+
+    /// Set the value of the `TCP_KEEPCNT` option.
+    ///
+    /// Set the maximum number of TCP keepalive probes that will be sent before
+    /// dropping a connection, if TCP keepalive is enabled on this socket.
+    #[cfg(all(
+        feature = "all",
+        any(
+            target_os = "android",
+            target_os = "dragonfly",
+            target_os = "freebsd",
+            target_os = "fuchsia",
+            target_os = "illumos",
+            target_os = "ios",
+            target_os = "linux",
+            target_os = "macos",
+            target_os = "netbsd",
+            target_os = "tvos",
+            target_os = "watchos",
+        )
+    ))]
+    #[cfg_attr(
+        docsrs,
+        doc(cfg(all(
+            feature = "all",
+            any(
+                target_os = "android",
+                target_os = "dragonfly",
+                target_os = "freebsd",
+                target_os = "fuchsia",
+                target_os = "illumos",
+                target_os = "ios",
+                target_os = "linux",
+                target_os = "macos",
+                target_os = "netbsd",
+                target_os = "tvos",
+                target_os = "watchos",
+            )
+        )))
+    )]
+    pub const fn with_retries(self, retries: u32) -> Self {
+        Self {
+            retries: Some(retries),
+            ..self
+        }
+    }
+}
+
+/// Configuration of a `sendmsg(2)` system call.
+///
+/// This wraps `msghdr` on Unix and `WSAMSG` on Windows. Also see [`MsgHdrMut`]
+/// for the variant used by `recvmsg(2)`.
+#[cfg(not(target_os = "redox"))]
+pub struct MsgHdr<'addr, 'bufs, 'control> {
+    inner: sys::msghdr,
+    #[allow(clippy::type_complexity)]
+    _lifetimes: PhantomData<(&'addr SockAddr, &'bufs IoSlice<'bufs>, &'control [u8])>,
+}
+
+#[cfg(not(target_os = "redox"))]
+impl<'addr, 'bufs, 'control> MsgHdr<'addr, 'bufs, 'control> {
+    /// Create a new `MsgHdr` with all empty/zero fields.
+    #[allow(clippy::new_without_default)]
+    pub fn new() -> MsgHdr<'addr, 'bufs, 'control> {
+        // SAFETY: all zero is valid for `msghdr` and `WSAMSG`.
+        MsgHdr {
+            inner: unsafe { mem::zeroed() },
+            _lifetimes: PhantomData,
+        }
+    }
+
+    /// Set the address (name) of the message.
+    ///
+    /// Corresponds to setting `msg_name` and `msg_namelen` on Unix and `name`
+    /// and `namelen` on Windows.
+    pub fn with_addr(mut self, addr: &'addr SockAddr) -> Self {
+        sys::set_msghdr_name(&mut self.inner, addr);
+        self
+    }
+
+    /// Set the buffer(s) of the message.
+    ///
+    /// Corresponds to setting `msg_iov` and `msg_iovlen` on Unix and `lpBuffers`
+    /// and `dwBufferCount` on Windows.
+    pub fn with_buffers(mut self, bufs: &'bufs [IoSlice<'_>]) -> Self {
+        let ptr = bufs.as_ptr() as *mut _;
+        sys::set_msghdr_iov(&mut self.inner, ptr, bufs.len());
+        self
+    }
+
+    /// Set the control buffer of the message.
+    ///
+    /// Corresponds to setting `msg_control` and `msg_controllen` on Unix and
+    /// `Control` on Windows.
+    pub fn with_control(mut self, buf: &'control [u8]) -> Self {
+        let ptr = buf.as_ptr() as *mut _;
+        sys::set_msghdr_control(&mut self.inner, ptr, buf.len());
+        self
+    }
+
+    /// Set the flags of the message.
+    ///
+    /// Corresponds to setting `msg_flags` on Unix and `dwFlags` on Windows.
+    pub fn with_flags(mut self, flags: sys::c_int) -> Self {
+        sys::set_msghdr_flags(&mut self.inner, flags);
+        self
+    }
+}
+
+#[cfg(not(target_os = "redox"))]
+impl<'name, 'bufs, 'control> fmt::Debug for MsgHdr<'name, 'bufs, 'control> {
+    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+        "MsgHdr".fmt(fmt)
+    }
+}
+
+/// Configuration of a `recvmsg(2)` system call.
+///
+/// This wraps `msghdr` on Unix and `WSAMSG` on Windows. Also see [`MsgHdr`] for
+/// the variant used by `sendmsg(2)`.
+#[cfg(not(target_os = "redox"))]
+pub struct MsgHdrMut<'addr, 'bufs, 'control> {
+    inner: sys::msghdr,
+    #[allow(clippy::type_complexity)]
+    _lifetimes: PhantomData<(
+        &'addr mut SockAddr,
+        &'bufs mut MaybeUninitSlice<'bufs>,
+        &'control mut [u8],
+    )>,
+}
+
+#[cfg(not(target_os = "redox"))]
+impl<'addr, 'bufs, 'control> MsgHdrMut<'addr, 'bufs, 'control> {
+    /// Create a new `MsgHdrMut` with all empty/zero fields.
+    #[allow(clippy::new_without_default)]
+    pub fn new() -> MsgHdrMut<'addr, 'bufs, 'control> {
+        // SAFETY: all zero is valid for `msghdr` and `WSAMSG`.
+        MsgHdrMut {
+            inner: unsafe { mem::zeroed() },
+            _lifetimes: PhantomData,
+        }
+    }
+
+    /// Set the mutable address (name) of the message.
+    ///
+    /// Corresponds to setting `msg_name` and `msg_namelen` on Unix and `name`
+    /// and `namelen` on Windows.
+    #[allow(clippy::needless_pass_by_ref_mut)]
+    pub fn with_addr(mut self, addr: &'addr mut SockAddr) -> Self {
+        sys::set_msghdr_name(&mut self.inner, addr);
+        self
+    }
+
+    /// Set the mutable buffer(s) of the message.
+    ///
+    /// Corresponds to setting `msg_iov` and `msg_iovlen` on Unix and `lpBuffers`
+    /// and `dwBufferCount` on Windows.
+    pub fn with_buffers(mut self, bufs: &'bufs mut [MaybeUninitSlice<'_>]) -> Self {
+        sys::set_msghdr_iov(&mut self.inner, bufs.as_mut_ptr().cast(), bufs.len());
+        self
+    }
+
+    /// Set the mutable control buffer of the message.
+    ///
+    /// Corresponds to setting `msg_control` and `msg_controllen` on Unix and
+    /// `Control` on Windows.
+    pub fn with_control(mut self, buf: &'control mut [MaybeUninit<u8>]) -> Self {
+        sys::set_msghdr_control(&mut self.inner, buf.as_mut_ptr().cast(), buf.len());
+        self
+    }
+
+    /// Returns the flags of the message.
+    pub fn flags(&self) -> RecvFlags {
+        sys::msghdr_flags(&self.inner)
+    }
+}
+
+#[cfg(not(target_os = "redox"))]
+impl<'name, 'bufs, 'control> fmt::Debug for MsgHdrMut<'name, 'bufs, 'control> {
+    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+        "MsgHdrMut".fmt(fmt)
+    }
+}
diff --git a/crates/socket2/src/sockaddr.rs b/crates/socket2/src/sockaddr.rs
new file mode 100644
index 0000000..6df22fd
--- /dev/null
+++ b/crates/socket2/src/sockaddr.rs
@@ -0,0 +1,572 @@
+use std::hash::Hash;
+use std::mem::{self, size_of, MaybeUninit};
+use std::net::{SocketAddr, SocketAddrV4, SocketAddrV6};
+use std::path::Path;
+use std::{fmt, io, ptr};
+
+#[cfg(windows)]
+use windows_sys::Win32::Networking::WinSock::SOCKADDR_IN6_0;
+
+use crate::sys::{
+    c_int, sa_family_t, sockaddr, sockaddr_in, sockaddr_in6, sockaddr_storage, socklen_t, AF_INET,
+    AF_INET6, AF_UNIX,
+};
+use crate::Domain;
+
+/// The address of a socket.
+///
+/// `SockAddr`s may be constructed directly to and from the standard library
+/// [`SocketAddr`], [`SocketAddrV4`], and [`SocketAddrV6`] types.
+#[derive(Clone)]
+pub struct SockAddr {
+    storage: sockaddr_storage,
+    len: socklen_t,
+}
+
+#[allow(clippy::len_without_is_empty)]
+impl SockAddr {
+    /// Create a `SockAddr` from the underlying storage and its length.
+    ///
+    /// # Safety
+    ///
+    /// Caller must ensure that the address family and length match the type of
+    /// storage address. For example if `storage.ss_family` is set to `AF_INET`
+    /// the `storage` must be initialised as `sockaddr_in`, setting the content
+    /// and length appropriately.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # fn main() -> std::io::Result<()> {
+    /// # #[cfg(unix)] {
+    /// use std::io;
+    /// use std::mem;
+    /// use std::os::unix::io::AsRawFd;
+    ///
+    /// use socket2::{SockAddr, Socket, Domain, Type};
+    ///
+    /// let socket = Socket::new(Domain::IPV4, Type::STREAM, None)?;
+    ///
+    /// // Initialise a `SocketAddr` byte calling `getsockname(2)`.
+    /// let mut addr_storage: libc::sockaddr_storage = unsafe { mem::zeroed() };
+    /// let mut len = mem::size_of_val(&addr_storage) as libc::socklen_t;
+    ///
+    /// // The `getsockname(2)` system call will intiliase `storage` for
+    /// // us, setting `len` to the correct length.
+    /// let res = unsafe {
+    ///     libc::getsockname(
+    ///         socket.as_raw_fd(),
+    ///         (&mut addr_storage as *mut libc::sockaddr_storage).cast(),
+    ///         &mut len,
+    ///     )
+    /// };
+    /// if res == -1 {
+    ///     return Err(io::Error::last_os_error());
+    /// }
+    ///
+    /// let address = unsafe { SockAddr::new(addr_storage, len) };
+    /// # drop(address);
+    /// # }
+    /// # Ok(())
+    /// # }
+    /// ```
+    pub const unsafe fn new(storage: sockaddr_storage, len: socklen_t) -> SockAddr {
+        SockAddr { storage, len }
+    }
+
+    /// Initialise a `SockAddr` by calling the function `init`.
+    ///
+    /// The type of the address storage and length passed to the function `init`
+    /// is OS/architecture specific.
+    ///
+    /// The address is zeroed before `init` is called and is thus valid to
+    /// dereference and read from. The length initialised to the maximum length
+    /// of the storage.
+    ///
+    /// # Safety
+    ///
+    /// Caller must ensure that the address family and length match the type of
+    /// storage address. For example if `storage.ss_family` is set to `AF_INET`
+    /// the `storage` must be initialised as `sockaddr_in`, setting the content
+    /// and length appropriately.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # fn main() -> std::io::Result<()> {
+    /// # #[cfg(unix)] {
+    /// use std::io;
+    /// use std::os::unix::io::AsRawFd;
+    ///
+    /// use socket2::{SockAddr, Socket, Domain, Type};
+    ///
+    /// let socket = Socket::new(Domain::IPV4, Type::STREAM, None)?;
+    ///
+    /// // Initialise a `SocketAddr` byte calling `getsockname(2)`.
+    /// let (_, address) = unsafe {
+    ///     SockAddr::try_init(|addr_storage, len| {
+    ///         // The `getsockname(2)` system call will intiliase `storage` for
+    ///         // us, setting `len` to the correct length.
+    ///         if libc::getsockname(socket.as_raw_fd(), addr_storage.cast(), len) == -1 {
+    ///             Err(io::Error::last_os_error())
+    ///         } else {
+    ///             Ok(())
+    ///         }
+    ///     })
+    /// }?;
+    /// # drop(address);
+    /// # }
+    /// # Ok(())
+    /// # }
+    /// ```
+    pub unsafe fn try_init<F, T>(init: F) -> io::Result<(T, SockAddr)>
+    where
+        F: FnOnce(*mut sockaddr_storage, *mut socklen_t) -> io::Result<T>,
+    {
+        const STORAGE_SIZE: socklen_t = size_of::<sockaddr_storage>() as socklen_t;
+        // NOTE: `SockAddr::unix` depends on the storage being zeroed before
+        // calling `init`.
+        // NOTE: calling `recvfrom` with an empty buffer also depends on the
+        // storage being zeroed before calling `init` as the OS might not
+        // initialise it.
+        let mut storage = MaybeUninit::<sockaddr_storage>::zeroed();
+        let mut len = STORAGE_SIZE;
+        init(storage.as_mut_ptr(), &mut len).map(|res| {
+            debug_assert!(len <= STORAGE_SIZE, "overflown address storage");
+            let addr = SockAddr {
+                // Safety: zeroed-out `sockaddr_storage` is valid, caller must
+                // ensure at least `len` bytes are valid.
+                storage: storage.assume_init(),
+                len,
+            };
+            (res, addr)
+        })
+    }
+
+    /// Constructs a `SockAddr` with the family `AF_UNIX` and the provided path.
+    ///
+    /// Returns an error if the path is longer than `SUN_LEN`.
+    pub fn unix<P>(path: P) -> io::Result<SockAddr>
+    where
+        P: AsRef<Path>,
+    {
+        crate::sys::unix_sockaddr(path.as_ref())
+    }
+
+    /// Set the length of the address.
+    ///
+    /// # Safety
+    ///
+    /// Caller must ensure that the address up to `length` bytes are properly
+    /// initialised.
+    pub unsafe fn set_length(&mut self, length: socklen_t) {
+        self.len = length;
+    }
+
+    /// Returns this address's family.
+    pub const fn family(&self) -> sa_family_t {
+        self.storage.ss_family
+    }
+
+    /// Returns this address's `Domain`.
+    pub const fn domain(&self) -> Domain {
+        Domain(self.storage.ss_family as c_int)
+    }
+
+    /// Returns the size of this address in bytes.
+    pub const fn len(&self) -> socklen_t {
+        self.len
+    }
+
+    /// Returns a raw pointer to the address.
+    pub const fn as_ptr(&self) -> *const sockaddr {
+        ptr::addr_of!(self.storage).cast()
+    }
+
+    /// Retuns the address as the storage.
+    pub const fn as_storage(self) -> sockaddr_storage {
+        self.storage
+    }
+
+    /// Returns true if this address is in the `AF_INET` (IPv4) family, false otherwise.
+    pub const fn is_ipv4(&self) -> bool {
+        self.storage.ss_family == AF_INET as sa_family_t
+    }
+
+    /// Returns true if this address is in the `AF_INET6` (IPv6) family, false
+    /// otherwise.
+    pub const fn is_ipv6(&self) -> bool {
+        self.storage.ss_family == AF_INET6 as sa_family_t
+    }
+
+    /// Returns true if this address is of a unix socket (for local interprocess communication),
+    /// i.e. it is from the `AF_UNIX` family, false otherwise.
+    pub fn is_unix(&self) -> bool {
+        self.storage.ss_family == AF_UNIX as sa_family_t
+    }
+
+    /// Returns this address as a `SocketAddr` if it is in the `AF_INET` (IPv4)
+    /// or `AF_INET6` (IPv6) family, otherwise returns `None`.
+    pub fn as_socket(&self) -> Option<SocketAddr> {
+        if self.storage.ss_family == AF_INET as sa_family_t {
+            // SAFETY: if the `ss_family` field is `AF_INET` then storage must
+            // be a `sockaddr_in`.
+            let addr = unsafe { &*(ptr::addr_of!(self.storage).cast::<sockaddr_in>()) };
+            let ip = crate::sys::from_in_addr(addr.sin_addr);
+            let port = u16::from_be(addr.sin_port);
+            Some(SocketAddr::V4(SocketAddrV4::new(ip, port)))
+        } else if self.storage.ss_family == AF_INET6 as sa_family_t {
+            // SAFETY: if the `ss_family` field is `AF_INET6` then storage must
+            // be a `sockaddr_in6`.
+            let addr = unsafe { &*(ptr::addr_of!(self.storage).cast::<sockaddr_in6>()) };
+            let ip = crate::sys::from_in6_addr(addr.sin6_addr);
+            let port = u16::from_be(addr.sin6_port);
+            Some(SocketAddr::V6(SocketAddrV6::new(
+                ip,
+                port,
+                addr.sin6_flowinfo,
+                #[cfg(unix)]
+                addr.sin6_scope_id,
+                #[cfg(windows)]
+                unsafe {
+                    addr.Anonymous.sin6_scope_id
+                },
+            )))
+        } else {
+            None
+        }
+    }
+
+    /// Returns this address as a [`SocketAddrV4`] if it is in the `AF_INET`
+    /// family.
+    pub fn as_socket_ipv4(&self) -> Option<SocketAddrV4> {
+        match self.as_socket() {
+            Some(SocketAddr::V4(addr)) => Some(addr),
+            _ => None,
+        }
+    }
+
+    /// Returns this address as a [`SocketAddrV6`] if it is in the `AF_INET6`
+    /// family.
+    pub fn as_socket_ipv6(&self) -> Option<SocketAddrV6> {
+        match self.as_socket() {
+            Some(SocketAddr::V6(addr)) => Some(addr),
+            _ => None,
+        }
+    }
+
+    /// Returns the initialised storage bytes.
+    fn as_bytes(&self) -> &[u8] {
+        // SAFETY: `self.storage` is a C struct which can always be treated a
+        // slice of bytes. Futhermore we ensure we don't read any unitialised
+        // bytes by using `self.len`.
+        unsafe { std::slice::from_raw_parts(self.as_ptr().cast(), self.len as usize) }
+    }
+}
+
+impl From<SocketAddr> for SockAddr {
+    fn from(addr: SocketAddr) -> SockAddr {
+        match addr {
+            SocketAddr::V4(addr) => addr.into(),
+            SocketAddr::V6(addr) => addr.into(),
+        }
+    }
+}
+
+impl From<SocketAddrV4> for SockAddr {
+    fn from(addr: SocketAddrV4) -> SockAddr {
+        // SAFETY: a `sockaddr_storage` of all zeros is valid.
+        let mut storage = unsafe { mem::zeroed::<sockaddr_storage>() };
+        let len = {
+            let storage = unsafe { &mut *ptr::addr_of_mut!(storage).cast::<sockaddr_in>() };
+            storage.sin_family = AF_INET as sa_family_t;
+            storage.sin_port = addr.port().to_be();
+            storage.sin_addr = crate::sys::to_in_addr(addr.ip());
+            storage.sin_zero = Default::default();
+            mem::size_of::<sockaddr_in>() as socklen_t
+        };
+        #[cfg(any(
+            target_os = "dragonfly",
+            target_os = "freebsd",
+            target_os = "haiku",
+            target_os = "hermit",
+            target_os = "ios",
+            target_os = "macos",
+            target_os = "netbsd",
+            target_os = "nto",
+            target_os = "openbsd",
+            target_os = "tvos",
+            target_os = "vxworks",
+            target_os = "watchos",
+        ))]
+        {
+            storage.ss_len = len as u8;
+        }
+        SockAddr { storage, len }
+    }
+}
+
+impl From<SocketAddrV6> for SockAddr {
+    fn from(addr: SocketAddrV6) -> SockAddr {
+        // SAFETY: a `sockaddr_storage` of all zeros is valid.
+        let mut storage = unsafe { mem::zeroed::<sockaddr_storage>() };
+        let len = {
+            let storage = unsafe { &mut *ptr::addr_of_mut!(storage).cast::<sockaddr_in6>() };
+            storage.sin6_family = AF_INET6 as sa_family_t;
+            storage.sin6_port = addr.port().to_be();
+            storage.sin6_addr = crate::sys::to_in6_addr(addr.ip());
+            storage.sin6_flowinfo = addr.flowinfo();
+            #[cfg(unix)]
+            {
+                storage.sin6_scope_id = addr.scope_id();
+            }
+            #[cfg(windows)]
+            {
+                storage.Anonymous = SOCKADDR_IN6_0 {
+                    sin6_scope_id: addr.scope_id(),
+                };
+            }
+            mem::size_of::<sockaddr_in6>() as socklen_t
+        };
+        #[cfg(any(
+            target_os = "dragonfly",
+            target_os = "freebsd",
+            target_os = "haiku",
+            target_os = "hermit",
+            target_os = "ios",
+            target_os = "macos",
+            target_os = "netbsd",
+            target_os = "nto",
+            target_os = "openbsd",
+            target_os = "tvos",
+            target_os = "vxworks",
+            target_os = "watchos",
+        ))]
+        {
+            storage.ss_len = len as u8;
+        }
+        SockAddr { storage, len }
+    }
+}
+
+impl fmt::Debug for SockAddr {
+    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+        let mut f = fmt.debug_struct("SockAddr");
+        #[cfg(any(
+            target_os = "dragonfly",
+            target_os = "freebsd",
+            target_os = "haiku",
+            target_os = "hermit",
+            target_os = "ios",
+            target_os = "macos",
+            target_os = "netbsd",
+            target_os = "nto",
+            target_os = "openbsd",
+            target_os = "tvos",
+            target_os = "vxworks",
+            target_os = "watchos",
+        ))]
+        f.field("ss_len", &self.storage.ss_len);
+        f.field("ss_family", &self.storage.ss_family)
+            .field("len", &self.len)
+            .finish()
+    }
+}
+
+impl PartialEq for SockAddr {
+    fn eq(&self, other: &Self) -> bool {
+        self.as_bytes() == other.as_bytes()
+    }
+}
+
+impl Eq for SockAddr {}
+
+impl Hash for SockAddr {
+    fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
+        self.as_bytes().hash(state);
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[test]
+    fn ipv4() {
+        use std::net::Ipv4Addr;
+        let std = SocketAddrV4::new(Ipv4Addr::new(1, 2, 3, 4), 9876);
+        let addr = SockAddr::from(std);
+        assert!(addr.is_ipv4());
+        assert!(!addr.is_ipv6());
+        assert!(!addr.is_unix());
+        assert_eq!(addr.family(), AF_INET as sa_family_t);
+        assert_eq!(addr.domain(), Domain::IPV4);
+        assert_eq!(addr.len(), size_of::<sockaddr_in>() as socklen_t);
+        assert_eq!(addr.as_socket(), Some(SocketAddr::V4(std)));
+        assert_eq!(addr.as_socket_ipv4(), Some(std));
+        assert!(addr.as_socket_ipv6().is_none());
+
+        let addr = SockAddr::from(SocketAddr::from(std));
+        assert_eq!(addr.family(), AF_INET as sa_family_t);
+        assert_eq!(addr.len(), size_of::<sockaddr_in>() as socklen_t);
+        assert_eq!(addr.as_socket(), Some(SocketAddr::V4(std)));
+        assert_eq!(addr.as_socket_ipv4(), Some(std));
+        assert!(addr.as_socket_ipv6().is_none());
+        #[cfg(unix)]
+        {
+            assert!(addr.as_pathname().is_none());
+            assert!(addr.as_abstract_namespace().is_none());
+        }
+    }
+
+    #[test]
+    fn ipv6() {
+        use std::net::Ipv6Addr;
+        let std = SocketAddrV6::new(Ipv6Addr::new(1, 2, 3, 4, 5, 6, 7, 8), 9876, 11, 12);
+        let addr = SockAddr::from(std);
+        assert!(addr.is_ipv6());
+        assert!(!addr.is_ipv4());
+        assert!(!addr.is_unix());
+        assert_eq!(addr.family(), AF_INET6 as sa_family_t);
+        assert_eq!(addr.domain(), Domain::IPV6);
+        assert_eq!(addr.len(), size_of::<sockaddr_in6>() as socklen_t);
+        assert_eq!(addr.as_socket(), Some(SocketAddr::V6(std)));
+        assert!(addr.as_socket_ipv4().is_none());
+        assert_eq!(addr.as_socket_ipv6(), Some(std));
+
+        let addr = SockAddr::from(SocketAddr::from(std));
+        assert_eq!(addr.family(), AF_INET6 as sa_family_t);
+        assert_eq!(addr.len(), size_of::<sockaddr_in6>() as socklen_t);
+        assert_eq!(addr.as_socket(), Some(SocketAddr::V6(std)));
+        assert!(addr.as_socket_ipv4().is_none());
+        assert_eq!(addr.as_socket_ipv6(), Some(std));
+        #[cfg(unix)]
+        {
+            assert!(addr.as_pathname().is_none());
+            assert!(addr.as_abstract_namespace().is_none());
+        }
+    }
+
+    #[test]
+    fn ipv4_eq() {
+        use std::net::Ipv4Addr;
+
+        let std1 = SocketAddrV4::new(Ipv4Addr::new(1, 2, 3, 4), 9876);
+        let std2 = SocketAddrV4::new(Ipv4Addr::new(5, 6, 7, 8), 8765);
+
+        test_eq(
+            SockAddr::from(std1),
+            SockAddr::from(std1),
+            SockAddr::from(std2),
+        );
+    }
+
+    #[test]
+    fn ipv4_hash() {
+        use std::net::Ipv4Addr;
+
+        let std1 = SocketAddrV4::new(Ipv4Addr::new(1, 2, 3, 4), 9876);
+        let std2 = SocketAddrV4::new(Ipv4Addr::new(5, 6, 7, 8), 8765);
+
+        test_hash(
+            SockAddr::from(std1),
+            SockAddr::from(std1),
+            SockAddr::from(std2),
+        );
+    }
+
+    #[test]
+    fn ipv6_eq() {
+        use std::net::Ipv6Addr;
+
+        let std1 = SocketAddrV6::new(Ipv6Addr::new(1, 2, 3, 4, 5, 6, 7, 8), 9876, 11, 12);
+        let std2 = SocketAddrV6::new(Ipv6Addr::new(3, 4, 5, 6, 7, 8, 9, 0), 7654, 13, 14);
+
+        test_eq(
+            SockAddr::from(std1),
+            SockAddr::from(std1),
+            SockAddr::from(std2),
+        );
+    }
+
+    #[test]
+    fn ipv6_hash() {
+        use std::net::Ipv6Addr;
+
+        let std1 = SocketAddrV6::new(Ipv6Addr::new(1, 2, 3, 4, 5, 6, 7, 8), 9876, 11, 12);
+        let std2 = SocketAddrV6::new(Ipv6Addr::new(3, 4, 5, 6, 7, 8, 9, 0), 7654, 13, 14);
+
+        test_hash(
+            SockAddr::from(std1),
+            SockAddr::from(std1),
+            SockAddr::from(std2),
+        );
+    }
+
+    #[test]
+    fn ipv4_ipv6_eq() {
+        use std::net::Ipv4Addr;
+        use std::net::Ipv6Addr;
+
+        let std1 = SocketAddrV4::new(Ipv4Addr::new(1, 2, 3, 4), 9876);
+        let std2 = SocketAddrV6::new(Ipv6Addr::new(1, 2, 3, 4, 5, 6, 7, 8), 9876, 11, 12);
+
+        test_eq(
+            SockAddr::from(std1),
+            SockAddr::from(std1),
+            SockAddr::from(std2),
+        );
+
+        test_eq(
+            SockAddr::from(std2),
+            SockAddr::from(std2),
+            SockAddr::from(std1),
+        );
+    }
+
+    #[test]
+    fn ipv4_ipv6_hash() {
+        use std::net::Ipv4Addr;
+        use std::net::Ipv6Addr;
+
+        let std1 = SocketAddrV4::new(Ipv4Addr::new(1, 2, 3, 4), 9876);
+        let std2 = SocketAddrV6::new(Ipv6Addr::new(1, 2, 3, 4, 5, 6, 7, 8), 9876, 11, 12);
+
+        test_hash(
+            SockAddr::from(std1),
+            SockAddr::from(std1),
+            SockAddr::from(std2),
+        );
+
+        test_hash(
+            SockAddr::from(std2),
+            SockAddr::from(std2),
+            SockAddr::from(std1),
+        );
+    }
+
+    #[allow(clippy::eq_op)] // allow a0 == a0 check
+    fn test_eq(a0: SockAddr, a1: SockAddr, b: SockAddr) {
+        assert!(a0 == a0);
+        assert!(a0 == a1);
+        assert!(a1 == a0);
+        assert!(a0 != b);
+        assert!(b != a0);
+    }
+
+    fn test_hash(a0: SockAddr, a1: SockAddr, b: SockAddr) {
+        assert!(calculate_hash(&a0) == calculate_hash(&a0));
+        assert!(calculate_hash(&a0) == calculate_hash(&a1));
+        // technically unequal values can have the same hash, in this case x != z and both have different hashes
+        assert!(calculate_hash(&a0) != calculate_hash(&b));
+    }
+
+    fn calculate_hash(x: &SockAddr) -> u64 {
+        use std::collections::hash_map::DefaultHasher;
+        use std::hash::Hasher;
+
+        let mut hasher = DefaultHasher::new();
+        x.hash(&mut hasher);
+        hasher.finish()
+    }
+}
diff --git a/crates/socket2/src/socket.rs b/crates/socket2/src/socket.rs
new file mode 100644
index 0000000..efe2b0a
--- /dev/null
+++ b/crates/socket2/src/socket.rs
@@ -0,0 +1,2099 @@
+// Copyright 2015 The Rust Project Developers.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::fmt;
+use std::io::{self, Read, Write};
+#[cfg(not(target_os = "redox"))]
+use std::io::{IoSlice, IoSliceMut};
+use std::mem::MaybeUninit;
+#[cfg(not(target_os = "nto"))]
+use std::net::Ipv6Addr;
+use std::net::{self, Ipv4Addr, Shutdown};
+#[cfg(unix)]
+use std::os::unix::io::{FromRawFd, IntoRawFd};
+#[cfg(windows)]
+use std::os::windows::io::{FromRawSocket, IntoRawSocket};
+use std::time::Duration;
+
+use crate::sys::{self, c_int, getsockopt, setsockopt, Bool};
+#[cfg(all(unix, not(target_os = "redox")))]
+use crate::MsgHdrMut;
+use crate::{Domain, Protocol, SockAddr, TcpKeepalive, Type};
+#[cfg(not(target_os = "redox"))]
+use crate::{MaybeUninitSlice, MsgHdr, RecvFlags};
+
+/// Owned wrapper around a system socket.
+///
+/// This type simply wraps an instance of a file descriptor (`c_int`) on Unix
+/// and an instance of `SOCKET` on Windows. This is the main type exported by
+/// this crate and is intended to mirror the raw semantics of sockets on
+/// platforms as closely as possible. Almost all methods correspond to
+/// precisely one libc or OS API call which is essentially just a "Rustic
+/// translation" of what's below.
+///
+/// ## Converting to and from other types
+///
+/// This type can be freely converted into the network primitives provided by
+/// the standard library, such as [`TcpStream`] or [`UdpSocket`], using the
+/// [`From`] trait, see the example below.
+///
+/// [`TcpStream`]: std::net::TcpStream
+/// [`UdpSocket`]: std::net::UdpSocket
+///
+/// # Notes
+///
+/// Some methods that set options on `Socket` require two system calls to set
+/// their options without overwriting previously set options. We do this by
+/// first getting the current settings, applying the desired changes, and then
+/// updating the settings. This means that the operation is **not** atomic. This
+/// can lead to a data race when two threads are changing options in parallel.
+///
+/// # Examples
+/// ```no_run
+/// # fn main() -> std::io::Result<()> {
+/// use std::net::{SocketAddr, TcpListener};
+/// use socket2::{Socket, Domain, Type};
+///
+/// // create a TCP listener
+/// let socket = Socket::new(Domain::IPV6, Type::STREAM, None)?;
+///
+/// let address: SocketAddr = "[::1]:12345".parse().unwrap();
+/// let address = address.into();
+/// socket.bind(&address)?;
+/// socket.listen(128)?;
+///
+/// let listener: TcpListener = socket.into();
+/// // ...
+/// # drop(listener);
+/// # Ok(()) }
+/// ```
+pub struct Socket {
+    inner: Inner,
+}
+
+/// Store a `TcpStream` internally to take advantage of its niche optimizations on Unix platforms.
+pub(crate) type Inner = std::net::TcpStream;
+
+impl Socket {
+    /// # Safety
+    ///
+    /// The caller must ensure `raw` is a valid file descriptor/socket. NOTE:
+    /// this should really be marked `unsafe`, but this being an internal
+    /// function, often passed as mapping function, it's makes it very
+    /// inconvenient to mark it as `unsafe`.
+    pub(crate) fn from_raw(raw: sys::Socket) -> Socket {
+        Socket {
+            inner: unsafe {
+                // SAFETY: the caller must ensure that `raw` is a valid file
+                // descriptor, but when it isn't it could return I/O errors, or
+                // potentially close a fd it doesn't own. All of that isn't
+                // memory unsafe, so it's not desired but never memory unsafe or
+                // causes UB.
+                //
+                // However there is one exception. We use `TcpStream` to
+                // represent the `Socket` internally (see `Inner` type),
+                // `TcpStream` has a layout optimisation that doesn't allow for
+                // negative file descriptors (as those are always invalid).
+                // Violating this assumption (fd never negative) causes UB,
+                // something we don't want. So check for that we have this
+                // `assert!`.
+                #[cfg(unix)]
+                assert!(raw >= 0, "tried to create a `Socket` with an invalid fd");
+                sys::socket_from_raw(raw)
+            },
+        }
+    }
+
+    pub(crate) fn as_raw(&self) -> sys::Socket {
+        sys::socket_as_raw(&self.inner)
+    }
+
+    pub(crate) fn into_raw(self) -> sys::Socket {
+        sys::socket_into_raw(self.inner)
+    }
+
+    /// Creates a new socket and sets common flags.
+    ///
+    /// This function corresponds to `socket(2)` on Unix and `WSASocketW` on
+    /// Windows.
+    ///
+    /// On Unix-like systems, the close-on-exec flag is set on the new socket.
+    /// Additionally, on Apple platforms `SOCK_NOSIGPIPE` is set. On Windows,
+    /// the socket is made non-inheritable.
+    ///
+    /// [`Socket::new_raw`] can be used if you don't want these flags to be set.
+    #[doc = man_links!(socket(2))]
+    pub fn new(domain: Domain, ty: Type, protocol: Option<Protocol>) -> io::Result<Socket> {
+        let ty = set_common_type(ty);
+        Socket::new_raw(domain, ty, protocol).and_then(set_common_flags)
+    }
+
+    /// Creates a new socket ready to be configured.
+    ///
+    /// This function corresponds to `socket(2)` on Unix and `WSASocketW` on
+    /// Windows and simply creates a new socket, no other configuration is done.
+    pub fn new_raw(domain: Domain, ty: Type, protocol: Option<Protocol>) -> io::Result<Socket> {
+        let protocol = protocol.map_or(0, |p| p.0);
+        sys::socket(domain.0, ty.0, protocol).map(Socket::from_raw)
+    }
+
+    /// Creates a pair of sockets which are connected to each other.
+    ///
+    /// This function corresponds to `socketpair(2)`.
+    ///
+    /// This function sets the same flags as in done for [`Socket::new`],
+    /// [`Socket::pair_raw`] can be used if you don't want to set those flags.
+    #[doc = man_links!(unix: socketpair(2))]
+    #[cfg(all(feature = "all", unix))]
+    #[cfg_attr(docsrs, doc(cfg(all(feature = "all", unix))))]
+    pub fn pair(
+        domain: Domain,
+        ty: Type,
+        protocol: Option<Protocol>,
+    ) -> io::Result<(Socket, Socket)> {
+        let ty = set_common_type(ty);
+        let (a, b) = Socket::pair_raw(domain, ty, protocol)?;
+        let a = set_common_flags(a)?;
+        let b = set_common_flags(b)?;
+        Ok((a, b))
+    }
+
+    /// Creates a pair of sockets which are connected to each other.
+    ///
+    /// This function corresponds to `socketpair(2)`.
+    #[cfg(all(feature = "all", unix))]
+    #[cfg_attr(docsrs, doc(cfg(all(feature = "all", unix))))]
+    pub fn pair_raw(
+        domain: Domain,
+        ty: Type,
+        protocol: Option<Protocol>,
+    ) -> io::Result<(Socket, Socket)> {
+        let protocol = protocol.map_or(0, |p| p.0);
+        sys::socketpair(domain.0, ty.0, protocol)
+            .map(|[a, b]| (Socket::from_raw(a), Socket::from_raw(b)))
+    }
+
+    /// Binds this socket to the specified address.
+    ///
+    /// This function directly corresponds to the `bind(2)` function on Windows
+    /// and Unix.
+    #[doc = man_links!(bind(2))]
+    pub fn bind(&self, address: &SockAddr) -> io::Result<()> {
+        sys::bind(self.as_raw(), address)
+    }
+
+    /// Initiate a connection on this socket to the specified address.
+    ///
+    /// This function directly corresponds to the `connect(2)` function on
+    /// Windows and Unix.
+    ///
+    /// An error will be returned if `listen` or `connect` has already been
+    /// called on this builder.
+    #[doc = man_links!(connect(2))]
+    ///
+    /// # Notes
+    ///
+    /// When using a non-blocking connect (by setting the socket into
+    /// non-blocking mode before calling this function), socket option can't be
+    /// set *while connecting*. This will cause errors on Windows. Socket
+    /// options can be safely set before and after connecting the socket.
+    pub fn connect(&self, address: &SockAddr) -> io::Result<()> {
+        sys::connect(self.as_raw(), address)
+    }
+
+    /// Initiate a connection on this socket to the specified address, only
+    /// only waiting for a certain period of time for the connection to be
+    /// established.
+    ///
+    /// Unlike many other methods on `Socket`, this does *not* correspond to a
+    /// single C function. It sets the socket to nonblocking mode, connects via
+    /// connect(2), and then waits for the connection to complete with poll(2)
+    /// on Unix and select on Windows. When the connection is complete, the
+    /// socket is set back to blocking mode. On Unix, this will loop over
+    /// `EINTR` errors.
+    ///
+    /// # Warnings
+    ///
+    /// The non-blocking state of the socket is overridden by this function -
+    /// it will be returned in blocking mode on success, and in an indeterminate
+    /// state on failure.
+    ///
+    /// If the connection request times out, it may still be processing in the
+    /// background - a second call to `connect` or `connect_timeout` may fail.
+    pub fn connect_timeout(&self, addr: &SockAddr, timeout: Duration) -> io::Result<()> {
+        self.set_nonblocking(true)?;
+        let res = self.connect(addr);
+        self.set_nonblocking(false)?;
+
+        match res {
+            Ok(()) => return Ok(()),
+            Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {}
+            #[cfg(unix)]
+            Err(ref e) if e.raw_os_error() == Some(libc::EINPROGRESS) => {}
+            Err(e) => return Err(e),
+        }
+
+        sys::poll_connect(self, timeout)
+    }
+
+    /// Mark a socket as ready to accept incoming connection requests using
+    /// [`Socket::accept()`].
+    ///
+    /// This function directly corresponds to the `listen(2)` function on
+    /// Windows and Unix.
+    ///
+    /// An error will be returned if `listen` or `connect` has already been
+    /// called on this builder.
+    #[doc = man_links!(listen(2))]
+    pub fn listen(&self, backlog: c_int) -> io::Result<()> {
+        sys::listen(self.as_raw(), backlog)
+    }
+
+    /// Accept a new incoming connection from this listener.
+    ///
+    /// This function uses `accept4(2)` on platforms that support it and
+    /// `accept(2)` platforms that do not.
+    ///
+    /// This function sets the same flags as in done for [`Socket::new`],
+    /// [`Socket::accept_raw`] can be used if you don't want to set those flags.
+    #[doc = man_links!(accept(2))]
+    pub fn accept(&self) -> io::Result<(Socket, SockAddr)> {
+        // Use `accept4` on platforms that support it.
+        #[cfg(any(
+            target_os = "android",
+            target_os = "dragonfly",
+            target_os = "freebsd",
+            target_os = "fuchsia",
+            target_os = "illumos",
+            target_os = "linux",
+            target_os = "netbsd",
+            target_os = "openbsd",
+        ))]
+        return self._accept4(libc::SOCK_CLOEXEC);
+
+        // Fall back to `accept` on platforms that do not support `accept4`.
+        #[cfg(not(any(
+            target_os = "android",
+            target_os = "dragonfly",
+            target_os = "freebsd",
+            target_os = "fuchsia",
+            target_os = "illumos",
+            target_os = "linux",
+            target_os = "netbsd",
+            target_os = "openbsd",
+        )))]
+        {
+            let (socket, addr) = self.accept_raw()?;
+            let socket = set_common_flags(socket)?;
+            // `set_common_flags` does not disable inheritance on Windows because `Socket::new`
+            // unlike `accept` is able to create the socket with inheritance disabled.
+            #[cfg(windows)]
+            socket._set_no_inherit(true)?;
+            Ok((socket, addr))
+        }
+    }
+
+    /// Accept a new incoming connection from this listener.
+    ///
+    /// This function directly corresponds to the `accept(2)` function on
+    /// Windows and Unix.
+    pub fn accept_raw(&self) -> io::Result<(Socket, SockAddr)> {
+        sys::accept(self.as_raw()).map(|(inner, addr)| (Socket::from_raw(inner), addr))
+    }
+
+    /// Returns the socket address of the local half of this socket.
+    ///
+    /// This function directly corresponds to the `getsockname(2)` function on
+    /// Windows and Unix.
+    #[doc = man_links!(getsockname(2))]
+    ///
+    /// # Notes
+    ///
+    /// Depending on the OS this may return an error if the socket is not
+    /// [bound].
+    ///
+    /// [bound]: Socket::bind
+    pub fn local_addr(&self) -> io::Result<SockAddr> {
+        sys::getsockname(self.as_raw())
+    }
+
+    /// Returns the socket address of the remote peer of this socket.
+    ///
+    /// This function directly corresponds to the `getpeername(2)` function on
+    /// Windows and Unix.
+    #[doc = man_links!(getpeername(2))]
+    ///
+    /// # Notes
+    ///
+    /// This returns an error if the socket is not [`connect`ed].
+    ///
+    /// [`connect`ed]: Socket::connect
+    pub fn peer_addr(&self) -> io::Result<SockAddr> {
+        sys::getpeername(self.as_raw())
+    }
+
+    /// Returns the [`Type`] of this socket by checking the `SO_TYPE` option on
+    /// this socket.
+    pub fn r#type(&self) -> io::Result<Type> {
+        unsafe { getsockopt::<c_int>(self.as_raw(), sys::SOL_SOCKET, sys::SO_TYPE).map(Type) }
+    }
+
+    /// Creates a new independently owned handle to the underlying socket.
+    ///
+    /// # Notes
+    ///
+    /// On Unix this uses `F_DUPFD_CLOEXEC` and thus sets the `FD_CLOEXEC` on
+    /// the returned socket.
+    ///
+    /// On Windows this uses `WSA_FLAG_NO_HANDLE_INHERIT` setting inheriting to
+    /// false.
+    ///
+    /// On Windows this can **not** be used function cannot be used on a
+    /// QOS-enabled socket, see
+    /// <https://docs.microsoft.com/en-us/windows/win32/api/winsock2/nf-winsock2-wsaduplicatesocketw>.
+    pub fn try_clone(&self) -> io::Result<Socket> {
+        sys::try_clone(self.as_raw()).map(Socket::from_raw)
+    }
+
+    /// Returns true if this socket is set to nonblocking mode, false otherwise.
+    ///
+    /// # Notes
+    ///
+    /// On Unix this corresponds to calling `fcntl` returning the value of
+    /// `O_NONBLOCK`.
+    ///
+    /// On Windows it is not possible retrieve the nonblocking mode status.
+    #[cfg(all(feature = "all", unix))]
+    #[cfg_attr(docsrs, doc(cfg(all(feature = "all", unix))))]
+    pub fn nonblocking(&self) -> io::Result<bool> {
+        sys::nonblocking(self.as_raw())
+    }
+
+    /// Moves this socket into or out of nonblocking mode.
+    ///
+    /// # Notes
+    ///
+    /// On Unix this corresponds to calling `fcntl` (un)setting `O_NONBLOCK`.
+    ///
+    /// On Windows this corresponds to calling `ioctlsocket` (un)setting
+    /// `FIONBIO`.
+    pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
+        sys::set_nonblocking(self.as_raw(), nonblocking)
+    }
+
+    /// Shuts down the read, write, or both halves of this connection.
+    ///
+    /// This function will cause all pending and future I/O on the specified
+    /// portions to return immediately with an appropriate value.
+    #[doc = man_links!(shutdown(2))]
+    pub fn shutdown(&self, how: Shutdown) -> io::Result<()> {
+        sys::shutdown(self.as_raw(), how)
+    }
+
+    /// Receives data on the socket from the remote address to which it is
+    /// connected.
+    ///
+    /// The [`connect`] method will connect this socket to a remote address.
+    /// This method might fail if the socket is not connected.
+    #[doc = man_links!(recv(2))]
+    ///
+    /// [`connect`]: Socket::connect
+    ///
+    /// # Safety
+    ///
+    /// Normally casting a `&mut [u8]` to `&mut [MaybeUninit<u8>]` would be
+    /// unsound, as that allows us to write uninitialised bytes to the buffer.
+    /// However this implementation promises to not write uninitialised bytes to
+    /// the `buf`fer and passes it directly to `recv(2)` system call. This
+    /// promise ensures that this function can be called using a `buf`fer of
+    /// type `&mut [u8]`.
+    ///
+    /// Note that the [`io::Read::read`] implementation calls this function with
+    /// a `buf`fer of type `&mut [u8]`, allowing initialised buffers to be used
+    /// without using `unsafe`.
+    pub fn recv(&self, buf: &mut [MaybeUninit<u8>]) -> io::Result<usize> {
+        self.recv_with_flags(buf, 0)
+    }
+
+    /// Receives out-of-band (OOB) data on the socket from the remote address to
+    /// which it is connected by setting the `MSG_OOB` flag for this call.
+    ///
+    /// For more information, see [`recv`], [`out_of_band_inline`].
+    ///
+    /// [`recv`]: Socket::recv
+    /// [`out_of_band_inline`]: Socket::out_of_band_inline
+    #[cfg_attr(target_os = "redox", allow(rustdoc::broken_intra_doc_links))]
+    pub fn recv_out_of_band(&self, buf: &mut [MaybeUninit<u8>]) -> io::Result<usize> {
+        self.recv_with_flags(buf, sys::MSG_OOB)
+    }
+
+    /// Identical to [`recv`] but allows for specification of arbitrary flags to
+    /// the underlying `recv` call.
+    ///
+    /// [`recv`]: Socket::recv
+    pub fn recv_with_flags(
+        &self,
+        buf: &mut [MaybeUninit<u8>],
+        flags: sys::c_int,
+    ) -> io::Result<usize> {
+        sys::recv(self.as_raw(), buf, flags)
+    }
+
+    /// Receives data on the socket from the remote address to which it is
+    /// connected. Unlike [`recv`] this allows passing multiple buffers.
+    ///
+    /// The [`connect`] method will connect this socket to a remote address.
+    /// This method might fail if the socket is not connected.
+    ///
+    /// In addition to the number of bytes read, this function returns the flags
+    /// for the received message. See [`RecvFlags`] for more information about
+    /// the returned flags.
+    #[doc = man_links!(recvmsg(2))]
+    ///
+    /// [`recv`]: Socket::recv
+    /// [`connect`]: Socket::connect
+    ///
+    /// # Safety
+    ///
+    /// Normally casting a `IoSliceMut` to `MaybeUninitSlice` would be unsound,
+    /// as that allows us to write uninitialised bytes to the buffer. However
+    /// this implementation promises to not write uninitialised bytes to the
+    /// `bufs` and passes it directly to `recvmsg(2)` system call. This promise
+    /// ensures that this function can be called using `bufs` of type `&mut
+    /// [IoSliceMut]`.
+    ///
+    /// Note that the [`io::Read::read_vectored`] implementation calls this
+    /// function with `buf`s of type `&mut [IoSliceMut]`, allowing initialised
+    /// buffers to be used without using `unsafe`.
+    #[cfg(not(target_os = "redox"))]
+    #[cfg_attr(docsrs, doc(cfg(not(target_os = "redox"))))]
+    pub fn recv_vectored(
+        &self,
+        bufs: &mut [MaybeUninitSlice<'_>],
+    ) -> io::Result<(usize, RecvFlags)> {
+        self.recv_vectored_with_flags(bufs, 0)
+    }
+
+    /// Identical to [`recv_vectored`] but allows for specification of arbitrary
+    /// flags to the underlying `recvmsg`/`WSARecv` call.
+    ///
+    /// [`recv_vectored`]: Socket::recv_vectored
+    ///
+    /// # Safety
+    ///
+    /// `recv_from_vectored` makes the same safety guarantees regarding `bufs`
+    /// as [`recv_vectored`].
+    ///
+    /// [`recv_vectored`]: Socket::recv_vectored
+    #[cfg(not(target_os = "redox"))]
+    #[cfg_attr(docsrs, doc(cfg(not(target_os = "redox"))))]
+    pub fn recv_vectored_with_flags(
+        &self,
+        bufs: &mut [MaybeUninitSlice<'_>],
+        flags: c_int,
+    ) -> io::Result<(usize, RecvFlags)> {
+        sys::recv_vectored(self.as_raw(), bufs, flags)
+    }
+
+    /// Receives data on the socket from the remote adress to which it is
+    /// connected, without removing that data from the queue. On success,
+    /// returns the number of bytes peeked.
+    ///
+    /// Successive calls return the same data. This is accomplished by passing
+    /// `MSG_PEEK` as a flag to the underlying `recv` system call.
+    ///
+    /// # Safety
+    ///
+    /// `peek` makes the same safety guarantees regarding the `buf`fer as
+    /// [`recv`].
+    ///
+    /// [`recv`]: Socket::recv
+    pub fn peek(&self, buf: &mut [MaybeUninit<u8>]) -> io::Result<usize> {
+        self.recv_with_flags(buf, sys::MSG_PEEK)
+    }
+
+    /// Receives data from the socket. On success, returns the number of bytes
+    /// read and the address from whence the data came.
+    #[doc = man_links!(recvfrom(2))]
+    ///
+    /// # Safety
+    ///
+    /// `recv_from` makes the same safety guarantees regarding the `buf`fer as
+    /// [`recv`].
+    ///
+    /// [`recv`]: Socket::recv
+    pub fn recv_from(&self, buf: &mut [MaybeUninit<u8>]) -> io::Result<(usize, SockAddr)> {
+        self.recv_from_with_flags(buf, 0)
+    }
+
+    /// Identical to [`recv_from`] but allows for specification of arbitrary
+    /// flags to the underlying `recvfrom` call.
+    ///
+    /// [`recv_from`]: Socket::recv_from
+    pub fn recv_from_with_flags(
+        &self,
+        buf: &mut [MaybeUninit<u8>],
+        flags: c_int,
+    ) -> io::Result<(usize, SockAddr)> {
+        sys::recv_from(self.as_raw(), buf, flags)
+    }
+
+    /// Receives data from the socket. Returns the amount of bytes read, the
+    /// [`RecvFlags`] and the remote address from the data is coming. Unlike
+    /// [`recv_from`] this allows passing multiple buffers.
+    #[doc = man_links!(recvmsg(2))]
+    ///
+    /// [`recv_from`]: Socket::recv_from
+    ///
+    /// # Safety
+    ///
+    /// `recv_from_vectored` makes the same safety guarantees regarding `bufs`
+    /// as [`recv_vectored`].
+    ///
+    /// [`recv_vectored`]: Socket::recv_vectored
+    #[cfg(not(target_os = "redox"))]
+    #[cfg_attr(docsrs, doc(cfg(not(target_os = "redox"))))]
+    pub fn recv_from_vectored(
+        &self,
+        bufs: &mut [MaybeUninitSlice<'_>],
+    ) -> io::Result<(usize, RecvFlags, SockAddr)> {
+        self.recv_from_vectored_with_flags(bufs, 0)
+    }
+
+    /// Identical to [`recv_from_vectored`] but allows for specification of
+    /// arbitrary flags to the underlying `recvmsg`/`WSARecvFrom` call.
+    ///
+    /// [`recv_from_vectored`]: Socket::recv_from_vectored
+    ///
+    /// # Safety
+    ///
+    /// `recv_from_vectored` makes the same safety guarantees regarding `bufs`
+    /// as [`recv_vectored`].
+    ///
+    /// [`recv_vectored`]: Socket::recv_vectored
+    #[cfg(not(target_os = "redox"))]
+    #[cfg_attr(docsrs, doc(cfg(not(target_os = "redox"))))]
+    pub fn recv_from_vectored_with_flags(
+        &self,
+        bufs: &mut [MaybeUninitSlice<'_>],
+        flags: c_int,
+    ) -> io::Result<(usize, RecvFlags, SockAddr)> {
+        sys::recv_from_vectored(self.as_raw(), bufs, flags)
+    }
+
+    /// Receives data from the socket, without removing it from the queue.
+    ///
+    /// Successive calls return the same data. This is accomplished by passing
+    /// `MSG_PEEK` as a flag to the underlying `recvfrom` system call.
+    ///
+    /// On success, returns the number of bytes peeked and the address from
+    /// whence the data came.
+    ///
+    /// # Safety
+    ///
+    /// `peek_from` makes the same safety guarantees regarding the `buf`fer as
+    /// [`recv`].
+    ///
+    /// # Note: Datagram Sockets
+    /// For datagram sockets, the behavior of this method when `buf` is smaller than
+    /// the datagram at the head of the receive queue differs between Windows and
+    /// Unix-like platforms (Linux, macOS, BSDs, etc: colloquially termed "*nix").
+    ///
+    /// On *nix platforms, the datagram is truncated to the length of `buf`.
+    ///
+    /// On Windows, an error corresponding to `WSAEMSGSIZE` will be returned.
+    ///
+    /// For consistency between platforms, be sure to provide a sufficiently large buffer to avoid
+    /// truncation; the exact size required depends on the underlying protocol.
+    ///
+    /// If you just want to know the sender of the data, try [`peek_sender`].
+    ///
+    /// [`recv`]: Socket::recv
+    /// [`peek_sender`]: Socket::peek_sender
+    pub fn peek_from(&self, buf: &mut [MaybeUninit<u8>]) -> io::Result<(usize, SockAddr)> {
+        self.recv_from_with_flags(buf, sys::MSG_PEEK)
+    }
+
+    /// Retrieve the sender for the data at the head of the receive queue.
+    ///
+    /// This is equivalent to calling [`peek_from`] with a zero-sized buffer,
+    /// but suppresses the `WSAEMSGSIZE` error on Windows.
+    ///
+    /// [`peek_from`]: Socket::peek_from
+    pub fn peek_sender(&self) -> io::Result<SockAddr> {
+        sys::peek_sender(self.as_raw())
+    }
+
+    /// Receive a message from a socket using a message structure.
+    ///
+    /// This is not supported on Windows as calling `WSARecvMsg` (the `recvmsg`
+    /// equivalent) is not straight forward on Windows. See
+    /// <https://github.com/microsoft/Windows-classic-samples/blob/7cbd99ac1d2b4a0beffbaba29ea63d024ceff700/Samples/Win7Samples/netds/winsock/recvmsg/rmmc.cpp>
+    /// for an example (in C++).
+    #[doc = man_links!(recvmsg(2))]
+    #[cfg(all(unix, not(target_os = "redox")))]
+    #[cfg_attr(docsrs, doc(cfg(all(unix, not(target_os = "redox")))))]
+    pub fn recvmsg(&self, msg: &mut MsgHdrMut<'_, '_, '_>, flags: sys::c_int) -> io::Result<usize> {
+        sys::recvmsg(self.as_raw(), msg, flags)
+    }
+
+    /// Sends data on the socket to a connected peer.
+    ///
+    /// This is typically used on TCP sockets or datagram sockets which have
+    /// been connected.
+    ///
+    /// On success returns the number of bytes that were sent.
+    #[doc = man_links!(send(2))]
+    pub fn send(&self, buf: &[u8]) -> io::Result<usize> {
+        self.send_with_flags(buf, 0)
+    }
+
+    /// Identical to [`send`] but allows for specification of arbitrary flags to the underlying
+    /// `send` call.
+    ///
+    /// [`send`]: Socket::send
+    pub fn send_with_flags(&self, buf: &[u8], flags: c_int) -> io::Result<usize> {
+        sys::send(self.as_raw(), buf, flags)
+    }
+
+    /// Send data to the connected peer. Returns the amount of bytes written.
+    #[cfg(not(target_os = "redox"))]
+    #[cfg_attr(docsrs, doc(cfg(not(target_os = "redox"))))]
+    pub fn send_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+        self.send_vectored_with_flags(bufs, 0)
+    }
+
+    /// Identical to [`send_vectored`] but allows for specification of arbitrary
+    /// flags to the underlying `sendmsg`/`WSASend` call.
+    #[doc = man_links!(sendmsg(2))]
+    ///
+    /// [`send_vectored`]: Socket::send_vectored
+    #[cfg(not(target_os = "redox"))]
+    #[cfg_attr(docsrs, doc(cfg(not(target_os = "redox"))))]
+    pub fn send_vectored_with_flags(
+        &self,
+        bufs: &[IoSlice<'_>],
+        flags: c_int,
+    ) -> io::Result<usize> {
+        sys::send_vectored(self.as_raw(), bufs, flags)
+    }
+
+    /// Sends out-of-band (OOB) data on the socket to connected peer
+    /// by setting the `MSG_OOB` flag for this call.
+    ///
+    /// For more information, see [`send`], [`out_of_band_inline`].
+    ///
+    /// [`send`]: Socket::send
+    /// [`out_of_band_inline`]: Socket::out_of_band_inline
+    #[cfg_attr(target_os = "redox", allow(rustdoc::broken_intra_doc_links))]
+    pub fn send_out_of_band(&self, buf: &[u8]) -> io::Result<usize> {
+        self.send_with_flags(buf, sys::MSG_OOB)
+    }
+
+    /// Sends data on the socket to the given address. On success, returns the
+    /// number of bytes written.
+    ///
+    /// This is typically used on UDP or datagram-oriented sockets.
+    #[doc = man_links!(sendto(2))]
+    pub fn send_to(&self, buf: &[u8], addr: &SockAddr) -> io::Result<usize> {
+        self.send_to_with_flags(buf, addr, 0)
+    }
+
+    /// Identical to [`send_to`] but allows for specification of arbitrary flags
+    /// to the underlying `sendto` call.
+    ///
+    /// [`send_to`]: Socket::send_to
+    pub fn send_to_with_flags(
+        &self,
+        buf: &[u8],
+        addr: &SockAddr,
+        flags: c_int,
+    ) -> io::Result<usize> {
+        sys::send_to(self.as_raw(), buf, addr, flags)
+    }
+
+    /// Send data to a peer listening on `addr`. Returns the amount of bytes
+    /// written.
+    #[doc = man_links!(sendmsg(2))]
+    #[cfg(not(target_os = "redox"))]
+    #[cfg_attr(docsrs, doc(cfg(not(target_os = "redox"))))]
+    pub fn send_to_vectored(&self, bufs: &[IoSlice<'_>], addr: &SockAddr) -> io::Result<usize> {
+        self.send_to_vectored_with_flags(bufs, addr, 0)
+    }
+
+    /// Identical to [`send_to_vectored`] but allows for specification of
+    /// arbitrary flags to the underlying `sendmsg`/`WSASendTo` call.
+    ///
+    /// [`send_to_vectored`]: Socket::send_to_vectored
+    #[cfg(not(target_os = "redox"))]
+    #[cfg_attr(docsrs, doc(cfg(not(target_os = "redox"))))]
+    pub fn send_to_vectored_with_flags(
+        &self,
+        bufs: &[IoSlice<'_>],
+        addr: &SockAddr,
+        flags: c_int,
+    ) -> io::Result<usize> {
+        sys::send_to_vectored(self.as_raw(), bufs, addr, flags)
+    }
+
+    /// Send a message on a socket using a message structure.
+    #[doc = man_links!(sendmsg(2))]
+    #[cfg(not(target_os = "redox"))]
+    #[cfg_attr(docsrs, doc(cfg(not(target_os = "redox"))))]
+    pub fn sendmsg(&self, msg: &MsgHdr<'_, '_, '_>, flags: sys::c_int) -> io::Result<usize> {
+        sys::sendmsg(self.as_raw(), msg, flags)
+    }
+}
+
+/// Set `SOCK_CLOEXEC` and `NO_HANDLE_INHERIT` on the `ty`pe on platforms that
+/// support it.
+#[inline(always)]
+const fn set_common_type(ty: Type) -> Type {
+    // On platforms that support it set `SOCK_CLOEXEC`.
+    #[cfg(any(
+        target_os = "android",
+        target_os = "dragonfly",
+        target_os = "freebsd",
+        target_os = "fuchsia",
+        target_os = "illumos",
+        target_os = "linux",
+        target_os = "netbsd",
+        target_os = "openbsd",
+    ))]
+    let ty = ty._cloexec();
+
+    // On windows set `NO_HANDLE_INHERIT`.
+    #[cfg(windows)]
+    let ty = ty._no_inherit();
+
+    ty
+}
+
+/// Set `FD_CLOEXEC` and `NOSIGPIPE` on the `socket` for platforms that need it.
+#[inline(always)]
+#[allow(clippy::unnecessary_wraps)]
+fn set_common_flags(socket: Socket) -> io::Result<Socket> {
+    // On platforms that don't have `SOCK_CLOEXEC` use `FD_CLOEXEC`.
+    #[cfg(all(
+        unix,
+        not(any(
+            target_os = "android",
+            target_os = "dragonfly",
+            target_os = "freebsd",
+            target_os = "fuchsia",
+            target_os = "illumos",
+            target_os = "linux",
+            target_os = "netbsd",
+            target_os = "openbsd",
+            target_os = "espidf",
+            target_os = "vita",
+        ))
+    ))]
+    socket._set_cloexec(true)?;
+
+    // On Apple platforms set `NOSIGPIPE`.
+    #[cfg(any(
+        target_os = "ios",
+        target_os = "macos",
+        target_os = "tvos",
+        target_os = "watchos",
+    ))]
+    socket._set_nosigpipe(true)?;
+
+    Ok(socket)
+}
+
+/// A local interface specified by its index or an address assigned to it.
+///
+/// `Index(0)` and `Address(Ipv4Addr::UNSPECIFIED)` are equivalent and indicate
+/// that an appropriate interface should be selected by the system.
+#[cfg(not(any(
+    target_os = "haiku",
+    target_os = "illumos",
+    target_os = "netbsd",
+    target_os = "redox",
+    target_os = "solaris",
+)))]
+#[derive(Debug)]
+pub enum InterfaceIndexOrAddress {
+    /// An interface index.
+    Index(u32),
+    /// An address assigned to an interface.
+    Address(Ipv4Addr),
+}
+
+/// Socket options get/set using `SOL_SOCKET`.
+///
+/// Additional documentation can be found in documentation of the OS.
+/// * Linux: <https://man7.org/linux/man-pages/man7/socket.7.html>
+/// * Windows: <https://docs.microsoft.com/en-us/windows/win32/winsock/sol-socket-socket-options>
+impl Socket {
+    /// Get the value of the `SO_BROADCAST` option for this socket.
+    ///
+    /// For more information about this option, see [`set_broadcast`].
+    ///
+    /// [`set_broadcast`]: Socket::set_broadcast
+    pub fn broadcast(&self) -> io::Result<bool> {
+        unsafe {
+            getsockopt::<c_int>(self.as_raw(), sys::SOL_SOCKET, sys::SO_BROADCAST)
+                .map(|broadcast| broadcast != 0)
+        }
+    }
+
+    /// Set the value of the `SO_BROADCAST` option for this socket.
+    ///
+    /// When enabled, this socket is allowed to send packets to a broadcast
+    /// address.
+    pub fn set_broadcast(&self, broadcast: bool) -> io::Result<()> {
+        unsafe {
+            setsockopt(
+                self.as_raw(),
+                sys::SOL_SOCKET,
+                sys::SO_BROADCAST,
+                broadcast as c_int,
+            )
+        }
+    }
+
+    /// Get the value of the `SO_ERROR` option on this socket.
+    ///
+    /// This will retrieve the stored error in the underlying socket, clearing
+    /// the field in the process. This can be useful for checking errors between
+    /// calls.
+    pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+        match unsafe { getsockopt::<c_int>(self.as_raw(), sys::SOL_SOCKET, sys::SO_ERROR) } {
+            Ok(0) => Ok(None),
+            Ok(errno) => Ok(Some(io::Error::from_raw_os_error(errno))),
+            Err(err) => Err(err),
+        }
+    }
+
+    /// Get the value of the `SO_KEEPALIVE` option on this socket.
+    ///
+    /// For more information about this option, see [`set_keepalive`].
+    ///
+    /// [`set_keepalive`]: Socket::set_keepalive
+    pub fn keepalive(&self) -> io::Result<bool> {
+        unsafe {
+            getsockopt::<Bool>(self.as_raw(), sys::SOL_SOCKET, sys::SO_KEEPALIVE)
+                .map(|keepalive| keepalive != 0)
+        }
+    }
+
+    /// Set value for the `SO_KEEPALIVE` option on this socket.
+    ///
+    /// Enable sending of keep-alive messages on connection-oriented sockets.
+    pub fn set_keepalive(&self, keepalive: bool) -> io::Result<()> {
+        unsafe {
+            setsockopt(
+                self.as_raw(),
+                sys::SOL_SOCKET,
+                sys::SO_KEEPALIVE,
+                keepalive as c_int,
+            )
+        }
+    }
+
+    /// Get the value of the `SO_LINGER` option on this socket.
+    ///
+    /// For more information about this option, see [`set_linger`].
+    ///
+    /// [`set_linger`]: Socket::set_linger
+    pub fn linger(&self) -> io::Result<Option<Duration>> {
+        unsafe {
+            getsockopt::<sys::linger>(self.as_raw(), sys::SOL_SOCKET, sys::SO_LINGER)
+                .map(from_linger)
+        }
+    }
+
+    /// Set value for the `SO_LINGER` option on this socket.
+    ///
+    /// If `linger` is not `None`, a close(2) or shutdown(2) will not return
+    /// until all queued messages for the socket have been successfully sent or
+    /// the linger timeout has been reached. Otherwise, the call returns
+    /// immediately and the closing is done in the background. When the socket
+    /// is closed as part of exit(2), it always lingers in the background.
+    ///
+    /// # Notes
+    ///
+    /// On most OSs the duration only has a precision of seconds and will be
+    /// silently truncated.
+    ///
+    /// On Apple platforms (e.g. macOS, iOS, etc) this uses `SO_LINGER_SEC`.
+    pub fn set_linger(&self, linger: Option<Duration>) -> io::Result<()> {
+        let linger = into_linger(linger);
+        unsafe { setsockopt(self.as_raw(), sys::SOL_SOCKET, sys::SO_LINGER, linger) }
+    }
+
+    /// Get value for the `SO_OOBINLINE` option on this socket.
+    ///
+    /// For more information about this option, see [`set_out_of_band_inline`].
+    ///
+    /// [`set_out_of_band_inline`]: Socket::set_out_of_band_inline
+    #[cfg(not(target_os = "redox"))]
+    #[cfg_attr(docsrs, doc(cfg(not(target_os = "redox"))))]
+    pub fn out_of_band_inline(&self) -> io::Result<bool> {
+        unsafe {
+            getsockopt::<c_int>(self.as_raw(), sys::SOL_SOCKET, sys::SO_OOBINLINE)
+                .map(|oob_inline| oob_inline != 0)
+        }
+    }
+
+    /// Set value for the `SO_OOBINLINE` option on this socket.
+    ///
+    /// If this option is enabled, out-of-band data is directly placed into the
+    /// receive data stream. Otherwise, out-of-band data is passed only when the
+    /// `MSG_OOB` flag is set during receiving. As per RFC6093, TCP sockets
+    /// using the Urgent mechanism are encouraged to set this flag.
+    #[cfg(not(target_os = "redox"))]
+    #[cfg_attr(docsrs, doc(cfg(not(target_os = "redox"))))]
+    pub fn set_out_of_band_inline(&self, oob_inline: bool) -> io::Result<()> {
+        unsafe {
+            setsockopt(
+                self.as_raw(),
+                sys::SOL_SOCKET,
+                sys::SO_OOBINLINE,
+                oob_inline as c_int,
+            )
+        }
+    }
+
+    /// Get value for the `SO_RCVBUF` option on this socket.
+    ///
+    /// For more information about this option, see [`set_recv_buffer_size`].
+    ///
+    /// [`set_recv_buffer_size`]: Socket::set_recv_buffer_size
+    pub fn recv_buffer_size(&self) -> io::Result<usize> {
+        unsafe {
+            getsockopt::<c_int>(self.as_raw(), sys::SOL_SOCKET, sys::SO_RCVBUF)
+                .map(|size| size as usize)
+        }
+    }
+
+    /// Set value for the `SO_RCVBUF` option on this socket.
+    ///
+    /// Changes the size of the operating system's receive buffer associated
+    /// with the socket.
+    pub fn set_recv_buffer_size(&self, size: usize) -> io::Result<()> {
+        unsafe {
+            setsockopt(
+                self.as_raw(),
+                sys::SOL_SOCKET,
+                sys::SO_RCVBUF,
+                size as c_int,
+            )
+        }
+    }
+
+    /// Get value for the `SO_RCVTIMEO` option on this socket.
+    ///
+    /// If the returned timeout is `None`, then `read` and `recv` calls will
+    /// block indefinitely.
+    pub fn read_timeout(&self) -> io::Result<Option<Duration>> {
+        sys::timeout_opt(self.as_raw(), sys::SOL_SOCKET, sys::SO_RCVTIMEO)
+    }
+
+    /// Set value for the `SO_RCVTIMEO` option on this socket.
+    ///
+    /// If `timeout` is `None`, then `read` and `recv` calls will block
+    /// indefinitely.
+    pub fn set_read_timeout(&self, duration: Option<Duration>) -> io::Result<()> {
+        sys::set_timeout_opt(self.as_raw(), sys::SOL_SOCKET, sys::SO_RCVTIMEO, duration)
+    }
+
+    /// Get the value of the `SO_REUSEADDR` option on this socket.
+    ///
+    /// For more information about this option, see [`set_reuse_address`].
+    ///
+    /// [`set_reuse_address`]: Socket::set_reuse_address
+    pub fn reuse_address(&self) -> io::Result<bool> {
+        unsafe {
+            getsockopt::<c_int>(self.as_raw(), sys::SOL_SOCKET, sys::SO_REUSEADDR)
+                .map(|reuse| reuse != 0)
+        }
+    }
+
+    /// Set value for the `SO_REUSEADDR` option on this socket.
+    ///
+    /// This indicates that futher calls to `bind` may allow reuse of local
+    /// addresses. For IPv4 sockets this means that a socket may bind even when
+    /// there's a socket already listening on this port.
+    pub fn set_reuse_address(&self, reuse: bool) -> io::Result<()> {
+        unsafe {
+            setsockopt(
+                self.as_raw(),
+                sys::SOL_SOCKET,
+                sys::SO_REUSEADDR,
+                reuse as c_int,
+            )
+        }
+    }
+
+    /// Get the value of the `SO_SNDBUF` option on this socket.
+    ///
+    /// For more information about this option, see [`set_send_buffer_size`].
+    ///
+    /// [`set_send_buffer_size`]: Socket::set_send_buffer_size
+    pub fn send_buffer_size(&self) -> io::Result<usize> {
+        unsafe {
+            getsockopt::<c_int>(self.as_raw(), sys::SOL_SOCKET, sys::SO_SNDBUF)
+                .map(|size| size as usize)
+        }
+    }
+
+    /// Set value for the `SO_SNDBUF` option on this socket.
+    ///
+    /// Changes the size of the operating system's send buffer associated with
+    /// the socket.
+    pub fn set_send_buffer_size(&self, size: usize) -> io::Result<()> {
+        unsafe {
+            setsockopt(
+                self.as_raw(),
+                sys::SOL_SOCKET,
+                sys::SO_SNDBUF,
+                size as c_int,
+            )
+        }
+    }
+
+    /// Get value for the `SO_SNDTIMEO` option on this socket.
+    ///
+    /// If the returned timeout is `None`, then `write` and `send` calls will
+    /// block indefinitely.
+    pub fn write_timeout(&self) -> io::Result<Option<Duration>> {
+        sys::timeout_opt(self.as_raw(), sys::SOL_SOCKET, sys::SO_SNDTIMEO)
+    }
+
+    /// Set value for the `SO_SNDTIMEO` option on this socket.
+    ///
+    /// If `timeout` is `None`, then `write` and `send` calls will block
+    /// indefinitely.
+    pub fn set_write_timeout(&self, duration: Option<Duration>) -> io::Result<()> {
+        sys::set_timeout_opt(self.as_raw(), sys::SOL_SOCKET, sys::SO_SNDTIMEO, duration)
+    }
+}
+
+const fn from_linger(linger: sys::linger) -> Option<Duration> {
+    if linger.l_onoff == 0 {
+        None
+    } else {
+        Some(Duration::from_secs(linger.l_linger as u64))
+    }
+}
+
+const fn into_linger(duration: Option<Duration>) -> sys::linger {
+    match duration {
+        Some(duration) => sys::linger {
+            l_onoff: 1,
+            l_linger: duration.as_secs() as _,
+        },
+        None => sys::linger {
+            l_onoff: 0,
+            l_linger: 0,
+        },
+    }
+}
+
+/// Socket options for IPv4 sockets, get/set using `IPPROTO_IP`.
+///
+/// Additional documentation can be found in documentation of the OS.
+/// * Linux: <https://man7.org/linux/man-pages/man7/ip.7.html>
+/// * Windows: <https://docs.microsoft.com/en-us/windows/win32/winsock/ipproto-ip-socket-options>
+impl Socket {
+    /// Get the value of the `IP_HDRINCL` option on this socket.
+    ///
+    /// For more information about this option, see [`set_header_included`].
+    ///
+    /// [`set_header_included`]: Socket::set_header_included
+    #[cfg(all(feature = "all", not(any(target_os = "redox", target_os = "espidf"))))]
+    #[cfg_attr(
+        docsrs,
+        doc(cfg(all(feature = "all", not(any(target_os = "redox", target_os = "espidf")))))
+    )]
+    pub fn header_included(&self) -> io::Result<bool> {
+        unsafe {
+            getsockopt::<c_int>(self.as_raw(), sys::IPPROTO_IP, sys::IP_HDRINCL)
+                .map(|included| included != 0)
+        }
+    }
+
+    /// Set the value of the `IP_HDRINCL` option on this socket.
+    ///
+    /// If enabled, the user supplies an IP header in front of the user data.
+    /// Valid only for [`SOCK_RAW`] sockets; see [raw(7)] for more information.
+    /// When this flag is enabled, the values set by `IP_OPTIONS`, [`IP_TTL`],
+    /// and [`IP_TOS`] are ignored.
+    ///
+    /// [`SOCK_RAW`]: Type::RAW
+    /// [raw(7)]: https://man7.org/linux/man-pages/man7/raw.7.html
+    /// [`IP_TTL`]: Socket::set_ttl
+    /// [`IP_TOS`]: Socket::set_tos
+    #[cfg_attr(
+        any(target_os = "fuchsia", target_os = "illumos", target_os = "solaris"),
+        allow(rustdoc::broken_intra_doc_links)
+    )]
+    #[cfg(all(feature = "all", not(any(target_os = "redox", target_os = "espidf"))))]
+    #[cfg_attr(
+        docsrs,
+        doc(cfg(all(feature = "all", not(any(target_os = "redox", target_os = "espidf")))))
+    )]
+    pub fn set_header_included(&self, included: bool) -> io::Result<()> {
+        unsafe {
+            setsockopt(
+                self.as_raw(),
+                sys::IPPROTO_IP,
+                sys::IP_HDRINCL,
+                included as c_int,
+            )
+        }
+    }
+
+    /// Get the value of the `IP_TRANSPARENT` option on this socket.
+    ///
+    /// For more information about this option, see [`set_ip_transparent`].
+    ///
+    /// [`set_ip_transparent`]: Socket::set_ip_transparent
+    #[cfg(all(feature = "all", target_os = "linux"))]
+    #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_os = "linux"))))]
+    pub fn ip_transparent(&self) -> io::Result<bool> {
+        unsafe {
+            getsockopt::<c_int>(self.as_raw(), sys::IPPROTO_IP, libc::IP_TRANSPARENT)
+                .map(|transparent| transparent != 0)
+        }
+    }
+
+    /// Set the value of the `IP_TRANSPARENT` option on this socket.
+    ///
+    /// Setting this boolean option enables transparent proxying
+    /// on this socket.  This socket option allows the calling
+    /// application to bind to a nonlocal IP address and operate
+    /// both as a client and a server with the foreign address as
+    /// the local endpoint.  NOTE: this requires that routing be
+    /// set up in a way that packets going to the foreign address
+    /// are routed through the TProxy box (i.e., the system
+    /// hosting the application that employs the IP_TRANSPARENT
+    /// socket option).  Enabling this socket option requires
+    /// superuser privileges (the `CAP_NET_ADMIN` capability).
+    ///
+    /// TProxy redirection with the iptables TPROXY target also
+    /// requires that this option be set on the redirected socket.
+    #[cfg(all(feature = "all", target_os = "linux"))]
+    #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_os = "linux"))))]
+    pub fn set_ip_transparent(&self, transparent: bool) -> io::Result<()> {
+        unsafe {
+            setsockopt(
+                self.as_raw(),
+                sys::IPPROTO_IP,
+                libc::IP_TRANSPARENT,
+                transparent as c_int,
+            )
+        }
+    }
+
+    /// Join a multicast group using `IP_ADD_MEMBERSHIP` option on this socket.
+    ///
+    /// This function specifies a new multicast group for this socket to join.
+    /// The address must be a valid multicast address, and `interface` is the
+    /// address of the local interface with which the system should join the
+    /// multicast group. If it's [`Ipv4Addr::UNSPECIFIED`] (`INADDR_ANY`) then
+    /// an appropriate interface is chosen by the system.
+    pub fn join_multicast_v4(&self, multiaddr: &Ipv4Addr, interface: &Ipv4Addr) -> io::Result<()> {
+        let mreq = sys::IpMreq {
+            imr_multiaddr: sys::to_in_addr(multiaddr),
+            imr_interface: sys::to_in_addr(interface),
+        };
+        unsafe { setsockopt(self.as_raw(), sys::IPPROTO_IP, sys::IP_ADD_MEMBERSHIP, mreq) }
+    }
+
+    /// Leave a multicast group using `IP_DROP_MEMBERSHIP` option on this socket.
+    ///
+    /// For more information about this option, see [`join_multicast_v4`].
+    ///
+    /// [`join_multicast_v4`]: Socket::join_multicast_v4
+    pub fn leave_multicast_v4(&self, multiaddr: &Ipv4Addr, interface: &Ipv4Addr) -> io::Result<()> {
+        let mreq = sys::IpMreq {
+            imr_multiaddr: sys::to_in_addr(multiaddr),
+            imr_interface: sys::to_in_addr(interface),
+        };
+        unsafe {
+            setsockopt(
+                self.as_raw(),
+                sys::IPPROTO_IP,
+                sys::IP_DROP_MEMBERSHIP,
+                mreq,
+            )
+        }
+    }
+
+    /// Join a multicast group using `IP_ADD_MEMBERSHIP` option on this socket.
+    ///
+    /// This function specifies a new multicast group for this socket to join.
+    /// The address must be a valid multicast address, and `interface` specifies
+    /// the local interface with which the system should join the multicast
+    /// group. See [`InterfaceIndexOrAddress`].
+    #[cfg(not(any(
+        target_os = "aix",
+        target_os = "haiku",
+        target_os = "illumos",
+        target_os = "netbsd",
+        target_os = "openbsd",
+        target_os = "redox",
+        target_os = "solaris",
+        target_os = "nto",
+        target_os = "espidf",
+        target_os = "vita",
+    )))]
+    pub fn join_multicast_v4_n(
+        &self,
+        multiaddr: &Ipv4Addr,
+        interface: &InterfaceIndexOrAddress,
+    ) -> io::Result<()> {
+        let mreqn = sys::to_mreqn(multiaddr, interface);
+        unsafe {
+            setsockopt(
+                self.as_raw(),
+                sys::IPPROTO_IP,
+                sys::IP_ADD_MEMBERSHIP,
+                mreqn,
+            )
+        }
+    }
+
+    /// Leave a multicast group using `IP_DROP_MEMBERSHIP` option on this socket.
+    ///
+    /// For more information about this option, see [`join_multicast_v4_n`].
+    ///
+    /// [`join_multicast_v4_n`]: Socket::join_multicast_v4_n
+    #[cfg(not(any(
+        target_os = "aix",
+        target_os = "haiku",
+        target_os = "illumos",
+        target_os = "netbsd",
+        target_os = "openbsd",
+        target_os = "redox",
+        target_os = "solaris",
+        target_os = "nto",
+        target_os = "espidf",
+        target_os = "vita",
+    )))]
+    pub fn leave_multicast_v4_n(
+        &self,
+        multiaddr: &Ipv4Addr,
+        interface: &InterfaceIndexOrAddress,
+    ) -> io::Result<()> {
+        let mreqn = sys::to_mreqn(multiaddr, interface);
+        unsafe {
+            setsockopt(
+                self.as_raw(),
+                sys::IPPROTO_IP,
+                sys::IP_DROP_MEMBERSHIP,
+                mreqn,
+            )
+        }
+    }
+
+    /// Join a multicast SSM channel using `IP_ADD_SOURCE_MEMBERSHIP` option on this socket.
+    ///
+    /// This function specifies a new multicast channel for this socket to join.
+    /// The group must be a valid SSM group address, the source must be the address of the sender
+    /// and `interface` is the address of the local interface with which the system should join the
+    /// multicast group. If it's [`Ipv4Addr::UNSPECIFIED`] (`INADDR_ANY`) then
+    /// an appropriate interface is chosen by the system.
+    #[cfg(not(any(
+        target_os = "dragonfly",
+        target_os = "haiku",
+        target_os = "netbsd",
+        target_os = "openbsd",
+        target_os = "redox",
+        target_os = "fuchsia",
+        target_os = "nto",
+        target_os = "espidf",
+        target_os = "vita",
+    )))]
+    pub fn join_ssm_v4(
+        &self,
+        source: &Ipv4Addr,
+        group: &Ipv4Addr,
+        interface: &Ipv4Addr,
+    ) -> io::Result<()> {
+        let mreqs = sys::IpMreqSource {
+            imr_multiaddr: sys::to_in_addr(group),
+            imr_interface: sys::to_in_addr(interface),
+            imr_sourceaddr: sys::to_in_addr(source),
+        };
+        unsafe {
+            setsockopt(
+                self.as_raw(),
+                sys::IPPROTO_IP,
+                sys::IP_ADD_SOURCE_MEMBERSHIP,
+                mreqs,
+            )
+        }
+    }
+
+    /// Leave a multicast group using `IP_DROP_SOURCE_MEMBERSHIP` option on this socket.
+    ///
+    /// For more information about this option, see [`join_ssm_v4`].
+    ///
+    /// [`join_ssm_v4`]: Socket::join_ssm_v4
+    #[cfg(not(any(
+        target_os = "dragonfly",
+        target_os = "haiku",
+        target_os = "netbsd",
+        target_os = "openbsd",
+        target_os = "redox",
+        target_os = "fuchsia",
+        target_os = "nto",
+        target_os = "espidf",
+        target_os = "vita",
+    )))]
+    pub fn leave_ssm_v4(
+        &self,
+        source: &Ipv4Addr,
+        group: &Ipv4Addr,
+        interface: &Ipv4Addr,
+    ) -> io::Result<()> {
+        let mreqs = sys::IpMreqSource {
+            imr_multiaddr: sys::to_in_addr(group),
+            imr_interface: sys::to_in_addr(interface),
+            imr_sourceaddr: sys::to_in_addr(source),
+        };
+        unsafe {
+            setsockopt(
+                self.as_raw(),
+                sys::IPPROTO_IP,
+                sys::IP_DROP_SOURCE_MEMBERSHIP,
+                mreqs,
+            )
+        }
+    }
+
+    /// Get the value of the `IP_MULTICAST_IF` option for this socket.
+    ///
+    /// For more information about this option, see [`set_multicast_if_v4`].
+    ///
+    /// [`set_multicast_if_v4`]: Socket::set_multicast_if_v4
+    pub fn multicast_if_v4(&self) -> io::Result<Ipv4Addr> {
+        unsafe {
+            getsockopt(self.as_raw(), sys::IPPROTO_IP, sys::IP_MULTICAST_IF).map(sys::from_in_addr)
+        }
+    }
+
+    /// Set the value of the `IP_MULTICAST_IF` option for this socket.
+    ///
+    /// Specifies the interface to use for routing multicast packets.
+    pub fn set_multicast_if_v4(&self, interface: &Ipv4Addr) -> io::Result<()> {
+        let interface = sys::to_in_addr(interface);
+        unsafe {
+            setsockopt(
+                self.as_raw(),
+                sys::IPPROTO_IP,
+                sys::IP_MULTICAST_IF,
+                interface,
+            )
+        }
+    }
+
+    /// Get the value of the `IP_MULTICAST_LOOP` option for this socket.
+    ///
+    /// For more information about this option, see [`set_multicast_loop_v4`].
+    ///
+    /// [`set_multicast_loop_v4`]: Socket::set_multicast_loop_v4
+    pub fn multicast_loop_v4(&self) -> io::Result<bool> {
+        unsafe {
+            getsockopt::<c_int>(self.as_raw(), sys::IPPROTO_IP, sys::IP_MULTICAST_LOOP)
+                .map(|loop_v4| loop_v4 != 0)
+        }
+    }
+
+    /// Set the value of the `IP_MULTICAST_LOOP` option for this socket.
+    ///
+    /// If enabled, multicast packets will be looped back to the local socket.
+    /// Note that this may not have any affect on IPv6 sockets.
+    pub fn set_multicast_loop_v4(&self, loop_v4: bool) -> io::Result<()> {
+        unsafe {
+            setsockopt(
+                self.as_raw(),
+                sys::IPPROTO_IP,
+                sys::IP_MULTICAST_LOOP,
+                loop_v4 as c_int,
+            )
+        }
+    }
+
+    /// Get the value of the `IP_MULTICAST_TTL` option for this socket.
+    ///
+    /// For more information about this option, see [`set_multicast_ttl_v4`].
+    ///
+    /// [`set_multicast_ttl_v4`]: Socket::set_multicast_ttl_v4
+    pub fn multicast_ttl_v4(&self) -> io::Result<u32> {
+        unsafe {
+            getsockopt::<c_int>(self.as_raw(), sys::IPPROTO_IP, sys::IP_MULTICAST_TTL)
+                .map(|ttl| ttl as u32)
+        }
+    }
+
+    /// Set the value of the `IP_MULTICAST_TTL` option for this socket.
+    ///
+    /// Indicates the time-to-live value of outgoing multicast packets for
+    /// this socket. The default value is 1 which means that multicast packets
+    /// don't leave the local network unless explicitly requested.
+    ///
+    /// Note that this may not have any affect on IPv6 sockets.
+    pub fn set_multicast_ttl_v4(&self, ttl: u32) -> io::Result<()> {
+        unsafe {
+            setsockopt(
+                self.as_raw(),
+                sys::IPPROTO_IP,
+                sys::IP_MULTICAST_TTL,
+                ttl as c_int,
+            )
+        }
+    }
+
+    /// Get the value of the `IP_TTL` option for this socket.
+    ///
+    /// For more information about this option, see [`set_ttl`].
+    ///
+    /// [`set_ttl`]: Socket::set_ttl
+    pub fn ttl(&self) -> io::Result<u32> {
+        unsafe {
+            getsockopt::<c_int>(self.as_raw(), sys::IPPROTO_IP, sys::IP_TTL).map(|ttl| ttl as u32)
+        }
+    }
+
+    /// Set the value of the `IP_TTL` option for this socket.
+    ///
+    /// This value sets the time-to-live field that is used in every packet sent
+    /// from this socket.
+    pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
+        unsafe { setsockopt(self.as_raw(), sys::IPPROTO_IP, sys::IP_TTL, ttl as c_int) }
+    }
+
+    /// Set the value of the `IP_TOS` option for this socket.
+    ///
+    /// This value sets the type-of-service field that is used in every packet
+    /// sent from this socket.
+    ///
+    /// NOTE: <https://docs.microsoft.com/en-us/windows/win32/winsock/ipproto-ip-socket-options>
+    /// documents that not all versions of windows support `IP_TOS`.
+    #[cfg(not(any(
+        target_os = "fuchsia",
+        target_os = "redox",
+        target_os = "solaris",
+        target_os = "illumos",
+    )))]
+    pub fn set_tos(&self, tos: u32) -> io::Result<()> {
+        unsafe { setsockopt(self.as_raw(), sys::IPPROTO_IP, sys::IP_TOS, tos as c_int) }
+    }
+
+    /// Get the value of the `IP_TOS` option for this socket.
+    ///
+    /// For more information about this option, see [`set_tos`].
+    ///
+    /// NOTE: <https://docs.microsoft.com/en-us/windows/win32/winsock/ipproto-ip-socket-options>
+    /// documents that not all versions of windows support `IP_TOS`.
+    ///
+    /// [`set_tos`]: Socket::set_tos
+    #[cfg(not(any(
+        target_os = "fuchsia",
+        target_os = "redox",
+        target_os = "solaris",
+        target_os = "illumos",
+    )))]
+    pub fn tos(&self) -> io::Result<u32> {
+        unsafe {
+            getsockopt::<c_int>(self.as_raw(), sys::IPPROTO_IP, sys::IP_TOS).map(|tos| tos as u32)
+        }
+    }
+
+    /// Set the value of the `IP_RECVTOS` option for this socket.
+    ///
+    /// If enabled, the `IP_TOS` ancillary message is passed with
+    /// incoming packets. It contains a byte which specifies the
+    /// Type of Service/Precedence field of the packet header.
+    #[cfg(not(any(
+        target_os = "aix",
+        target_os = "dragonfly",
+        target_os = "fuchsia",
+        target_os = "illumos",
+        target_os = "netbsd",
+        target_os = "openbsd",
+        target_os = "redox",
+        target_os = "solaris",
+        target_os = "haiku",
+        target_os = "nto",
+        target_os = "espidf",
+        target_os = "vita",
+    )))]
+    pub fn set_recv_tos(&self, recv_tos: bool) -> io::Result<()> {
+        unsafe {
+            setsockopt(
+                self.as_raw(),
+                sys::IPPROTO_IP,
+                sys::IP_RECVTOS,
+                recv_tos as c_int,
+            )
+        }
+    }
+
+    /// Get the value of the `IP_RECVTOS` option for this socket.
+    ///
+    /// For more information about this option, see [`set_recv_tos`].
+    ///
+    /// [`set_recv_tos`]: Socket::set_recv_tos
+    #[cfg(not(any(
+        target_os = "aix",
+        target_os = "dragonfly",
+        target_os = "fuchsia",
+        target_os = "illumos",
+        target_os = "netbsd",
+        target_os = "openbsd",
+        target_os = "redox",
+        target_os = "solaris",
+        target_os = "haiku",
+        target_os = "nto",
+        target_os = "espidf",
+        target_os = "vita",
+    )))]
+    pub fn recv_tos(&self) -> io::Result<bool> {
+        unsafe {
+            getsockopt::<c_int>(self.as_raw(), sys::IPPROTO_IP, sys::IP_RECVTOS)
+                .map(|recv_tos| recv_tos > 0)
+        }
+    }
+}
+
+/// Socket options for IPv6 sockets, get/set using `IPPROTO_IPV6`.
+///
+/// Additional documentation can be found in documentation of the OS.
+/// * Linux: <https://man7.org/linux/man-pages/man7/ipv6.7.html>
+/// * Windows: <https://docs.microsoft.com/en-us/windows/win32/winsock/ipproto-ipv6-socket-options>
+impl Socket {
+    /// Join a multicast group using `IPV6_ADD_MEMBERSHIP` option on this socket.
+    ///
+    /// Some OSs use `IPV6_JOIN_GROUP` for this option.
+    ///
+    /// This function specifies a new multicast group for this socket to join.
+    /// The address must be a valid multicast address, and `interface` is the
+    /// index of the interface to join/leave (or 0 to indicate any interface).
+    #[cfg(not(target_os = "nto"))]
+    pub fn join_multicast_v6(&self, multiaddr: &Ipv6Addr, interface: u32) -> io::Result<()> {
+        let mreq = sys::Ipv6Mreq {
+            ipv6mr_multiaddr: sys::to_in6_addr(multiaddr),
+            // NOTE: some OSs use `c_int`, others use `c_uint`.
+            ipv6mr_interface: interface as _,
+        };
+        unsafe {
+            setsockopt(
+                self.as_raw(),
+                sys::IPPROTO_IPV6,
+                sys::IPV6_ADD_MEMBERSHIP,
+                mreq,
+            )
+        }
+    }
+
+    /// Leave a multicast group using `IPV6_DROP_MEMBERSHIP` option on this socket.
+    ///
+    /// Some OSs use `IPV6_LEAVE_GROUP` for this option.
+    ///
+    /// For more information about this option, see [`join_multicast_v6`].
+    ///
+    /// [`join_multicast_v6`]: Socket::join_multicast_v6
+    #[cfg(not(target_os = "nto"))]
+    pub fn leave_multicast_v6(&self, multiaddr: &Ipv6Addr, interface: u32) -> io::Result<()> {
+        let mreq = sys::Ipv6Mreq {
+            ipv6mr_multiaddr: sys::to_in6_addr(multiaddr),
+            // NOTE: some OSs use `c_int`, others use `c_uint`.
+            ipv6mr_interface: interface as _,
+        };
+        unsafe {
+            setsockopt(
+                self.as_raw(),
+                sys::IPPROTO_IPV6,
+                sys::IPV6_DROP_MEMBERSHIP,
+                mreq,
+            )
+        }
+    }
+
+    /// Get the value of the `IPV6_MULTICAST_HOPS` option for this socket
+    ///
+    /// For more information about this option, see [`set_multicast_hops_v6`].
+    ///
+    /// [`set_multicast_hops_v6`]: Socket::set_multicast_hops_v6
+    pub fn multicast_hops_v6(&self) -> io::Result<u32> {
+        unsafe {
+            getsockopt::<c_int>(self.as_raw(), sys::IPPROTO_IPV6, sys::IPV6_MULTICAST_HOPS)
+                .map(|hops| hops as u32)
+        }
+    }
+
+    /// Set the value of the `IPV6_MULTICAST_HOPS` option for this socket
+    ///
+    /// Indicates the number of "routers" multicast packets will transit for
+    /// this socket. The default value is 1 which means that multicast packets
+    /// don't leave the local network unless explicitly requested.
+    pub fn set_multicast_hops_v6(&self, hops: u32) -> io::Result<()> {
+        unsafe {
+            setsockopt(
+                self.as_raw(),
+                sys::IPPROTO_IPV6,
+                sys::IPV6_MULTICAST_HOPS,
+                hops as c_int,
+            )
+        }
+    }
+
+    /// Get the value of the `IPV6_MULTICAST_IF` option for this socket.
+    ///
+    /// For more information about this option, see [`set_multicast_if_v6`].
+    ///
+    /// [`set_multicast_if_v6`]: Socket::set_multicast_if_v6
+    pub fn multicast_if_v6(&self) -> io::Result<u32> {
+        unsafe {
+            getsockopt::<c_int>(self.as_raw(), sys::IPPROTO_IPV6, sys::IPV6_MULTICAST_IF)
+                .map(|interface| interface as u32)
+        }
+    }
+
+    /// Set the value of the `IPV6_MULTICAST_IF` option for this socket.
+    ///
+    /// Specifies the interface to use for routing multicast packets. Unlike
+    /// ipv4, this is generally required in ipv6 contexts where network routing
+    /// prefixes may overlap.
+    pub fn set_multicast_if_v6(&self, interface: u32) -> io::Result<()> {
+        unsafe {
+            setsockopt(
+                self.as_raw(),
+                sys::IPPROTO_IPV6,
+                sys::IPV6_MULTICAST_IF,
+                interface as c_int,
+            )
+        }
+    }
+
+    /// Get the value of the `IPV6_MULTICAST_LOOP` option for this socket.
+    ///
+    /// For more information about this option, see [`set_multicast_loop_v6`].
+    ///
+    /// [`set_multicast_loop_v6`]: Socket::set_multicast_loop_v6
+    pub fn multicast_loop_v6(&self) -> io::Result<bool> {
+        unsafe {
+            getsockopt::<c_int>(self.as_raw(), sys::IPPROTO_IPV6, sys::IPV6_MULTICAST_LOOP)
+                .map(|loop_v6| loop_v6 != 0)
+        }
+    }
+
+    /// Set the value of the `IPV6_MULTICAST_LOOP` option for this socket.
+    ///
+    /// Controls whether this socket sees the multicast packets it sends itself.
+    /// Note that this may not have any affect on IPv4 sockets.
+    pub fn set_multicast_loop_v6(&self, loop_v6: bool) -> io::Result<()> {
+        unsafe {
+            setsockopt(
+                self.as_raw(),
+                sys::IPPROTO_IPV6,
+                sys::IPV6_MULTICAST_LOOP,
+                loop_v6 as c_int,
+            )
+        }
+    }
+
+    /// Get the value of the `IPV6_UNICAST_HOPS` option for this socket.
+    ///
+    /// Specifies the hop limit for ipv6 unicast packets
+    pub fn unicast_hops_v6(&self) -> io::Result<u32> {
+        unsafe {
+            getsockopt::<c_int>(self.as_raw(), sys::IPPROTO_IPV6, sys::IPV6_UNICAST_HOPS)
+                .map(|hops| hops as u32)
+        }
+    }
+
+    /// Set the value for the `IPV6_UNICAST_HOPS` option on this socket.
+    ///
+    /// Specifies the hop limit for ipv6 unicast packets
+    pub fn set_unicast_hops_v6(&self, hops: u32) -> io::Result<()> {
+        unsafe {
+            setsockopt(
+                self.as_raw(),
+                sys::IPPROTO_IPV6,
+                sys::IPV6_UNICAST_HOPS,
+                hops as c_int,
+            )
+        }
+    }
+
+    /// Get the value of the `IPV6_V6ONLY` option for this socket.
+    ///
+    /// For more information about this option, see [`set_only_v6`].
+    ///
+    /// [`set_only_v6`]: Socket::set_only_v6
+    pub fn only_v6(&self) -> io::Result<bool> {
+        unsafe {
+            getsockopt::<c_int>(self.as_raw(), sys::IPPROTO_IPV6, sys::IPV6_V6ONLY)
+                .map(|only_v6| only_v6 != 0)
+        }
+    }
+
+    /// Set the value for the `IPV6_V6ONLY` option on this socket.
+    ///
+    /// If this is set to `true` then the socket is restricted to sending and
+    /// receiving IPv6 packets only. In this case two IPv4 and IPv6 applications
+    /// can bind the same port at the same time.
+    ///
+    /// If this is set to `false` then the socket can be used to send and
+    /// receive packets from an IPv4-mapped IPv6 address.
+    pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> {
+        unsafe {
+            setsockopt(
+                self.as_raw(),
+                sys::IPPROTO_IPV6,
+                sys::IPV6_V6ONLY,
+                only_v6 as c_int,
+            )
+        }
+    }
+
+    /// Get the value of the `IPV6_RECVTCLASS` option for this socket.
+    ///
+    /// For more information about this option, see [`set_recv_tclass_v6`].
+    ///
+    /// [`set_recv_tclass_v6`]: Socket::set_recv_tclass_v6
+    #[cfg(not(any(
+        target_os = "dragonfly",
+        target_os = "fuchsia",
+        target_os = "illumos",
+        target_os = "netbsd",
+        target_os = "openbsd",
+        target_os = "redox",
+        target_os = "solaris",
+        target_os = "haiku",
+        target_os = "espidf",
+        target_os = "vita",
+    )))]
+    pub fn recv_tclass_v6(&self) -> io::Result<bool> {
+        unsafe {
+            getsockopt::<c_int>(self.as_raw(), sys::IPPROTO_IPV6, sys::IPV6_RECVTCLASS)
+                .map(|recv_tclass| recv_tclass > 0)
+        }
+    }
+
+    /// Set the value of the `IPV6_RECVTCLASS` option for this socket.
+    ///
+    /// If enabled, the `IPV6_TCLASS` ancillary message is passed with incoming
+    /// packets. It contains a byte which specifies the traffic class field of
+    /// the packet header.
+    #[cfg(not(any(
+        target_os = "dragonfly",
+        target_os = "fuchsia",
+        target_os = "illumos",
+        target_os = "netbsd",
+        target_os = "openbsd",
+        target_os = "redox",
+        target_os = "solaris",
+        target_os = "haiku",
+        target_os = "espidf",
+        target_os = "vita",
+    )))]
+    pub fn set_recv_tclass_v6(&self, recv_tclass: bool) -> io::Result<()> {
+        unsafe {
+            setsockopt(
+                self.as_raw(),
+                sys::IPPROTO_IPV6,
+                sys::IPV6_RECVTCLASS,
+                recv_tclass as c_int,
+            )
+        }
+    }
+}
+
+/// Socket options for TCP sockets, get/set using `IPPROTO_TCP`.
+///
+/// Additional documentation can be found in documentation of the OS.
+/// * Linux: <https://man7.org/linux/man-pages/man7/tcp.7.html>
+/// * Windows: <https://docs.microsoft.com/en-us/windows/win32/winsock/ipproto-tcp-socket-options>
+impl Socket {
+    /// Get the value of the `TCP_KEEPIDLE` option on this socket.
+    ///
+    /// This returns the value of `TCP_KEEPALIVE` on macOS and iOS and `TCP_KEEPIDLE` on all other
+    /// supported Unix operating systems.
+    #[cfg(all(
+        feature = "all",
+        not(any(
+            windows,
+            target_os = "haiku",
+            target_os = "openbsd",
+            target_os = "vita"
+        ))
+    ))]
+    #[cfg_attr(
+        docsrs,
+        doc(cfg(all(
+            feature = "all",
+            not(any(
+                windows,
+                target_os = "haiku",
+                target_os = "openbsd",
+                target_os = "vita"
+            ))
+        )))
+    )]
+    pub fn keepalive_time(&self) -> io::Result<Duration> {
+        sys::keepalive_time(self.as_raw())
+    }
+
+    /// Get the value of the `TCP_KEEPINTVL` option on this socket.
+    ///
+    /// For more information about this option, see [`set_tcp_keepalive`].
+    ///
+    /// [`set_tcp_keepalive`]: Socket::set_tcp_keepalive
+    #[cfg(all(
+        feature = "all",
+        any(
+            target_os = "android",
+            target_os = "dragonfly",
+            target_os = "freebsd",
+            target_os = "fuchsia",
+            target_os = "illumos",
+            target_os = "ios",
+            target_os = "linux",
+            target_os = "macos",
+            target_os = "netbsd",
+            target_os = "tvos",
+            target_os = "watchos",
+        )
+    ))]
+    #[cfg_attr(
+        docsrs,
+        doc(cfg(all(
+            feature = "all",
+            any(
+                target_os = "android",
+                target_os = "dragonfly",
+                target_os = "freebsd",
+                target_os = "fuchsia",
+                target_os = "illumos",
+                target_os = "ios",
+                target_os = "linux",
+                target_os = "macos",
+                target_os = "netbsd",
+                target_os = "tvos",
+                target_os = "watchos",
+            )
+        )))
+    )]
+    pub fn keepalive_interval(&self) -> io::Result<Duration> {
+        unsafe {
+            getsockopt::<c_int>(self.as_raw(), sys::IPPROTO_TCP, sys::TCP_KEEPINTVL)
+                .map(|secs| Duration::from_secs(secs as u64))
+        }
+    }
+
+    /// Get the value of the `TCP_KEEPCNT` option on this socket.
+    ///
+    /// For more information about this option, see [`set_tcp_keepalive`].
+    ///
+    /// [`set_tcp_keepalive`]: Socket::set_tcp_keepalive
+    #[cfg(all(
+        feature = "all",
+        any(
+            target_os = "android",
+            target_os = "dragonfly",
+            target_os = "freebsd",
+            target_os = "fuchsia",
+            target_os = "illumos",
+            target_os = "ios",
+            target_os = "linux",
+            target_os = "macos",
+            target_os = "netbsd",
+            target_os = "tvos",
+            target_os = "watchos",
+        )
+    ))]
+    #[cfg_attr(
+        docsrs,
+        doc(cfg(all(
+            feature = "all",
+            any(
+                target_os = "android",
+                target_os = "dragonfly",
+                target_os = "freebsd",
+                target_os = "fuchsia",
+                target_os = "illumos",
+                target_os = "ios",
+                target_os = "linux",
+                target_os = "macos",
+                target_os = "netbsd",
+                target_os = "tvos",
+                target_os = "watchos",
+            )
+        )))
+    )]
+    pub fn keepalive_retries(&self) -> io::Result<u32> {
+        unsafe {
+            getsockopt::<c_int>(self.as_raw(), sys::IPPROTO_TCP, sys::TCP_KEEPCNT)
+                .map(|retries| retries as u32)
+        }
+    }
+
+    /// Set parameters configuring TCP keepalive probes for this socket.
+    ///
+    /// The supported parameters depend on the operating system, and are
+    /// configured using the [`TcpKeepalive`] struct. At a minimum, all systems
+    /// support configuring the [keepalive time]: the time after which the OS
+    /// will start sending keepalive messages on an idle connection.
+    ///
+    /// [keepalive time]: TcpKeepalive::with_time
+    ///
+    /// # Notes
+    ///
+    /// * This will enable `SO_KEEPALIVE` on this socket, if it is not already
+    ///   enabled.
+    /// * On some platforms, such as Windows, any keepalive parameters *not*
+    ///   configured by the `TcpKeepalive` struct passed to this function may be
+    ///   overwritten with their default values. Therefore, this function should
+    ///   either only be called once per socket, or the same parameters should
+    ///   be passed every time it is called.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use std::time::Duration;
+    ///
+    /// use socket2::{Socket, TcpKeepalive, Domain, Type};
+    ///
+    /// # fn main() -> std::io::Result<()> {
+    /// let socket = Socket::new(Domain::IPV4, Type::STREAM, None)?;
+    /// let keepalive = TcpKeepalive::new()
+    ///     .with_time(Duration::from_secs(4));
+    ///     // Depending on the target operating system, we may also be able to
+    ///     // configure the keepalive probe interval and/or the number of
+    ///     // retries here as well.
+    ///
+    /// socket.set_tcp_keepalive(&keepalive)?;
+    /// # Ok(()) }
+    /// ```
+    ///
+    pub fn set_tcp_keepalive(&self, params: &TcpKeepalive) -> io::Result<()> {
+        self.set_keepalive(true)?;
+        sys::set_tcp_keepalive(self.as_raw(), params)
+    }
+
+    /// Get the value of the `TCP_NODELAY` option on this socket.
+    ///
+    /// For more information about this option, see [`set_nodelay`].
+    ///
+    /// [`set_nodelay`]: Socket::set_nodelay
+    pub fn nodelay(&self) -> io::Result<bool> {
+        unsafe {
+            getsockopt::<Bool>(self.as_raw(), sys::IPPROTO_TCP, sys::TCP_NODELAY)
+                .map(|nodelay| nodelay != 0)
+        }
+    }
+
+    /// Set the value of the `TCP_NODELAY` option on this socket.
+    ///
+    /// If set, this option disables the Nagle algorithm. This means that
+    /// segments are always sent as soon as possible, even if there is only a
+    /// small amount of data. When not set, data is buffered until there is a
+    /// sufficient amount to send out, thereby avoiding the frequent sending of
+    /// small packets.
+    pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> {
+        unsafe {
+            setsockopt(
+                self.as_raw(),
+                sys::IPPROTO_TCP,
+                sys::TCP_NODELAY,
+                nodelay as c_int,
+            )
+        }
+    }
+}
+
+impl Read for Socket {
+    fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+        // Safety: the `recv` implementation promises not to write uninitialised
+        // bytes to the `buf`fer, so this casting is safe.
+        let buf = unsafe { &mut *(buf as *mut [u8] as *mut [MaybeUninit<u8>]) };
+        self.recv(buf)
+    }
+
+    #[cfg(not(target_os = "redox"))]
+    fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+        // Safety: both `IoSliceMut` and `MaybeUninitSlice` promise to have the
+        // same layout, that of `iovec`/`WSABUF`. Furthermore `recv_vectored`
+        // promises to not write unitialised bytes to the `bufs` and pass it
+        // directly to the `recvmsg` system call, so this is safe.
+        let bufs = unsafe { &mut *(bufs as *mut [IoSliceMut<'_>] as *mut [MaybeUninitSlice<'_>]) };
+        self.recv_vectored(bufs).map(|(n, _)| n)
+    }
+}
+
+impl<'a> Read for &'a Socket {
+    fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+        // Safety: see other `Read::read` impl.
+        let buf = unsafe { &mut *(buf as *mut [u8] as *mut [MaybeUninit<u8>]) };
+        self.recv(buf)
+    }
+
+    #[cfg(not(target_os = "redox"))]
+    fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+        // Safety: see other `Read::read` impl.
+        let bufs = unsafe { &mut *(bufs as *mut [IoSliceMut<'_>] as *mut [MaybeUninitSlice<'_>]) };
+        self.recv_vectored(bufs).map(|(n, _)| n)
+    }
+}
+
+impl Write for Socket {
+    fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+        self.send(buf)
+    }
+
+    #[cfg(not(target_os = "redox"))]
+    fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+        self.send_vectored(bufs)
+    }
+
+    fn flush(&mut self) -> io::Result<()> {
+        Ok(())
+    }
+}
+
+impl<'a> Write for &'a Socket {
+    fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+        self.send(buf)
+    }
+
+    #[cfg(not(target_os = "redox"))]
+    fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+        self.send_vectored(bufs)
+    }
+
+    fn flush(&mut self) -> io::Result<()> {
+        Ok(())
+    }
+}
+
+impl fmt::Debug for Socket {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        f.debug_struct("Socket")
+            .field("raw", &self.as_raw())
+            .field("local_addr", &self.local_addr().ok())
+            .field("peer_addr", &self.peer_addr().ok())
+            .finish()
+    }
+}
+
+from!(net::TcpStream, Socket);
+from!(net::TcpListener, Socket);
+from!(net::UdpSocket, Socket);
+from!(Socket, net::TcpStream);
+from!(Socket, net::TcpListener);
+from!(Socket, net::UdpSocket);
diff --git a/crates/socket2/src/sockref.rs b/crates/socket2/src/sockref.rs
new file mode 100644
index 0000000..d23b7c0
--- /dev/null
+++ b/crates/socket2/src/sockref.rs
@@ -0,0 +1,123 @@
+use std::fmt;
+use std::marker::PhantomData;
+use std::mem::ManuallyDrop;
+use std::ops::Deref;
+#[cfg(unix)]
+use std::os::unix::io::{AsFd, AsRawFd, FromRawFd};
+#[cfg(windows)]
+use std::os::windows::io::{AsRawSocket, AsSocket, FromRawSocket};
+
+use crate::Socket;
+
+/// A reference to a [`Socket`] that can be used to configure socket types other
+/// than the `Socket` type itself.
+///
+/// This allows for example a [`TcpStream`], found in the standard library, to
+/// be configured using all the additional methods found in the [`Socket`] API.
+///
+/// `SockRef` can be created from any socket type that implements [`AsFd`]
+/// (Unix) or [`AsSocket`] (Windows) using the [`From`] implementation.
+///
+/// [`TcpStream`]: std::net::TcpStream
+// Don't use intra-doc links because they won't build on every platform.
+/// [`AsFd`]: https://doc.rust-lang.org/stable/std/os/unix/io/trait.AsFd.html
+/// [`AsSocket`]: https://doc.rust-lang.org/stable/std/os/windows/io/trait.AsSocket.html
+///
+/// # Examples
+///
+/// Below is an example of converting a [`TcpStream`] into a [`SockRef`].
+///
+/// ```
+/// use std::net::{TcpStream, SocketAddr};
+///
+/// use socket2::SockRef;
+///
+/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
+/// // Create `TcpStream` from the standard library.
+/// let address: SocketAddr = "127.0.0.1:1234".parse()?;
+/// # let b1 = std::sync::Arc::new(std::sync::Barrier::new(2));
+/// # let b2 = b1.clone();
+/// # let handle = std::thread::spawn(move || {
+/// #    let listener = std::net::TcpListener::bind(address).unwrap();
+/// #    b2.wait();
+/// #    let (stream, _) = listener.accept().unwrap();
+/// #    std::thread::sleep(std::time::Duration::from_millis(10));
+/// #    drop(stream);
+/// # });
+/// # b1.wait();
+/// let stream = TcpStream::connect(address)?;
+///
+/// // Create a `SockRef`erence to the stream.
+/// let socket_ref = SockRef::from(&stream);
+/// // Use `Socket::set_nodelay` on the stream.
+/// socket_ref.set_nodelay(true)?;
+/// drop(socket_ref);
+///
+/// assert_eq!(stream.nodelay()?, true);
+/// # handle.join().unwrap();
+/// # Ok(())
+/// # }
+/// ```
+pub struct SockRef<'s> {
+    /// Because this is a reference we don't own the `Socket`, however `Socket`
+    /// closes itself when dropped, so we use `ManuallyDrop` to prevent it from
+    /// closing itself.
+    socket: ManuallyDrop<Socket>,
+    /// Because we don't own the socket we need to ensure the socket remains
+    /// open while we have a "reference" to it, the lifetime `'s` ensures this.
+    _lifetime: PhantomData<&'s Socket>,
+}
+
+impl<'s> Deref for SockRef<'s> {
+    type Target = Socket;
+
+    fn deref(&self) -> &Self::Target {
+        &self.socket
+    }
+}
+
+/// On Windows, a corresponding `From<&impl AsSocket>` implementation exists.
+#[cfg(unix)]
+#[cfg_attr(docsrs, doc(cfg(unix)))]
+impl<'s, S> From<&'s S> for SockRef<'s>
+where
+    S: AsFd,
+{
+    /// The caller must ensure `S` is actually a socket.
+    fn from(socket: &'s S) -> Self {
+        let fd = socket.as_fd().as_raw_fd();
+        assert!(fd >= 0);
+        SockRef {
+            socket: ManuallyDrop::new(unsafe { Socket::from_raw_fd(fd) }),
+            _lifetime: PhantomData,
+        }
+    }
+}
+
+/// On Unix, a corresponding `From<&impl AsFd>` implementation exists.
+#[cfg(windows)]
+#[cfg_attr(docsrs, doc(cfg(windows)))]
+impl<'s, S> From<&'s S> for SockRef<'s>
+where
+    S: AsSocket,
+{
+    /// See the `From<&impl AsFd>` implementation.
+    fn from(socket: &'s S) -> Self {
+        let socket = socket.as_socket().as_raw_socket();
+        assert!(socket != windows_sys::Win32::Networking::WinSock::INVALID_SOCKET as _);
+        SockRef {
+            socket: ManuallyDrop::new(unsafe { Socket::from_raw_socket(socket) }),
+            _lifetime: PhantomData,
+        }
+    }
+}
+
+impl fmt::Debug for SockRef<'_> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        f.debug_struct("SockRef")
+            .field("raw", &self.socket.as_raw())
+            .field("local_addr", &self.socket.local_addr().ok())
+            .field("peer_addr", &self.socket.peer_addr().ok())
+            .finish()
+    }
+}
diff --git a/crates/socket2/src/sys/unix.rs b/crates/socket2/src/sys/unix.rs
new file mode 100644
index 0000000..c562600
--- /dev/null
+++ b/crates/socket2/src/sys/unix.rs
@@ -0,0 +1,3116 @@
+// Copyright 2015 The Rust Project Developers.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::cmp::min;
+use std::ffi::OsStr;
+#[cfg(not(target_os = "redox"))]
+use std::io::IoSlice;
+use std::marker::PhantomData;
+use std::mem::{self, size_of, MaybeUninit};
+use std::net::Shutdown;
+use std::net::{Ipv4Addr, Ipv6Addr};
+#[cfg(all(
+    feature = "all",
+    any(
+        target_os = "ios",
+        target_os = "macos",
+        target_os = "tvos",
+        target_os = "watchos",
+    )
+))]
+use std::num::NonZeroU32;
+#[cfg(all(
+    feature = "all",
+    any(
+        target_os = "aix",
+        target_os = "android",
+        target_os = "freebsd",
+        target_os = "ios",
+        target_os = "linux",
+        target_os = "macos",
+        target_os = "tvos",
+        target_os = "watchos",
+    )
+))]
+use std::num::NonZeroUsize;
+use std::os::unix::ffi::OsStrExt;
+#[cfg(all(
+    feature = "all",
+    any(
+        target_os = "aix",
+        target_os = "android",
+        target_os = "freebsd",
+        target_os = "ios",
+        target_os = "linux",
+        target_os = "macos",
+        target_os = "tvos",
+        target_os = "watchos",
+    )
+))]
+use std::os::unix::io::RawFd;
+use std::os::unix::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, IntoRawFd, OwnedFd};
+#[cfg(feature = "all")]
+use std::os::unix::net::{UnixDatagram, UnixListener, UnixStream};
+use std::path::Path;
+use std::ptr;
+use std::time::{Duration, Instant};
+use std::{io, slice};
+
+#[cfg(not(any(
+    target_os = "ios",
+    target_os = "macos",
+    target_os = "tvos",
+    target_os = "watchos",
+)))]
+use libc::ssize_t;
+use libc::{in6_addr, in_addr};
+
+use crate::{Domain, Protocol, SockAddr, TcpKeepalive, Type};
+#[cfg(not(target_os = "redox"))]
+use crate::{MsgHdr, MsgHdrMut, RecvFlags};
+
+pub(crate) use libc::c_int;
+
+// Used in `Domain`.
+pub(crate) use libc::{AF_INET, AF_INET6, AF_UNIX};
+// Used in `Type`.
+#[cfg(all(feature = "all", target_os = "linux"))]
+pub(crate) use libc::SOCK_DCCP;
+#[cfg(all(feature = "all", not(any(target_os = "redox", target_os = "espidf"))))]
+pub(crate) use libc::SOCK_RAW;
+#[cfg(all(feature = "all", not(target_os = "espidf")))]
+pub(crate) use libc::SOCK_SEQPACKET;
+pub(crate) use libc::{SOCK_DGRAM, SOCK_STREAM};
+// Used in `Protocol`.
+#[cfg(all(feature = "all", target_os = "linux"))]
+pub(crate) use libc::IPPROTO_DCCP;
+#[cfg(target_os = "linux")]
+pub(crate) use libc::IPPROTO_MPTCP;
+#[cfg(all(feature = "all", any(target_os = "freebsd", target_os = "linux")))]
+pub(crate) use libc::IPPROTO_SCTP;
+#[cfg(all(
+    feature = "all",
+    any(
+        target_os = "android",
+        target_os = "freebsd",
+        target_os = "fuchsia",
+        target_os = "linux",
+    )
+))]
+pub(crate) use libc::IPPROTO_UDPLITE;
+pub(crate) use libc::{IPPROTO_ICMP, IPPROTO_ICMPV6, IPPROTO_TCP, IPPROTO_UDP};
+// Used in `SockAddr`.
+#[cfg(all(feature = "all", any(target_os = "freebsd", target_os = "openbsd")))]
+pub(crate) use libc::IPPROTO_DIVERT;
+pub(crate) use libc::{
+    sa_family_t, sockaddr, sockaddr_in, sockaddr_in6, sockaddr_storage, socklen_t,
+};
+// Used in `RecvFlags`.
+#[cfg(not(any(target_os = "redox", target_os = "espidf")))]
+pub(crate) use libc::MSG_TRUNC;
+#[cfg(not(target_os = "redox"))]
+pub(crate) use libc::SO_OOBINLINE;
+// Used in `Socket`.
+#[cfg(not(target_os = "nto"))]
+pub(crate) use libc::ipv6_mreq as Ipv6Mreq;
+#[cfg(not(any(
+    target_os = "dragonfly",
+    target_os = "fuchsia",
+    target_os = "illumos",
+    target_os = "netbsd",
+    target_os = "openbsd",
+    target_os = "redox",
+    target_os = "solaris",
+    target_os = "haiku",
+    target_os = "espidf",
+    target_os = "vita",
+)))]
+pub(crate) use libc::IPV6_RECVTCLASS;
+#[cfg(all(feature = "all", not(any(target_os = "redox", target_os = "espidf"))))]
+pub(crate) use libc::IP_HDRINCL;
+#[cfg(not(any(
+    target_os = "aix",
+    target_os = "dragonfly",
+    target_os = "fuchsia",
+    target_os = "illumos",
+    target_os = "netbsd",
+    target_os = "openbsd",
+    target_os = "redox",
+    target_os = "solaris",
+    target_os = "haiku",
+    target_os = "nto",
+    target_os = "espidf",
+    target_os = "vita",
+)))]
+pub(crate) use libc::IP_RECVTOS;
+#[cfg(not(any(
+    target_os = "fuchsia",
+    target_os = "redox",
+    target_os = "solaris",
+    target_os = "illumos",
+)))]
+pub(crate) use libc::IP_TOS;
+#[cfg(not(any(
+    target_os = "ios",
+    target_os = "macos",
+    target_os = "tvos",
+    target_os = "watchos",
+)))]
+pub(crate) use libc::SO_LINGER;
+#[cfg(any(
+    target_os = "ios",
+    target_os = "macos",
+    target_os = "tvos",
+    target_os = "watchos",
+))]
+pub(crate) use libc::SO_LINGER_SEC as SO_LINGER;
+pub(crate) use libc::{
+    ip_mreq as IpMreq, linger, IPPROTO_IP, IPPROTO_IPV6, IPV6_MULTICAST_HOPS, IPV6_MULTICAST_IF,
+    IPV6_MULTICAST_LOOP, IPV6_UNICAST_HOPS, IPV6_V6ONLY, IP_ADD_MEMBERSHIP, IP_DROP_MEMBERSHIP,
+    IP_MULTICAST_IF, IP_MULTICAST_LOOP, IP_MULTICAST_TTL, IP_TTL, MSG_OOB, MSG_PEEK, SOL_SOCKET,
+    SO_BROADCAST, SO_ERROR, SO_KEEPALIVE, SO_RCVBUF, SO_RCVTIMEO, SO_REUSEADDR, SO_SNDBUF,
+    SO_SNDTIMEO, SO_TYPE, TCP_NODELAY,
+};
+#[cfg(not(any(
+    target_os = "dragonfly",
+    target_os = "haiku",
+    target_os = "netbsd",
+    target_os = "openbsd",
+    target_os = "redox",
+    target_os = "fuchsia",
+    target_os = "nto",
+    target_os = "espidf",
+    target_os = "vita",
+)))]
+pub(crate) use libc::{
+    ip_mreq_source as IpMreqSource, IP_ADD_SOURCE_MEMBERSHIP, IP_DROP_SOURCE_MEMBERSHIP,
+};
+#[cfg(not(any(
+    target_os = "dragonfly",
+    target_os = "freebsd",
+    target_os = "haiku",
+    target_os = "illumos",
+    target_os = "ios",
+    target_os = "macos",
+    target_os = "netbsd",
+    target_os = "nto",
+    target_os = "openbsd",
+    target_os = "solaris",
+    target_os = "tvos",
+    target_os = "watchos",
+)))]
+pub(crate) use libc::{IPV6_ADD_MEMBERSHIP, IPV6_DROP_MEMBERSHIP};
+#[cfg(any(
+    target_os = "dragonfly",
+    target_os = "freebsd",
+    target_os = "haiku",
+    target_os = "illumos",
+    target_os = "ios",
+    target_os = "macos",
+    target_os = "netbsd",
+    target_os = "openbsd",
+    target_os = "solaris",
+    target_os = "tvos",
+    target_os = "watchos",
+))]
+pub(crate) use libc::{
+    IPV6_JOIN_GROUP as IPV6_ADD_MEMBERSHIP, IPV6_LEAVE_GROUP as IPV6_DROP_MEMBERSHIP,
+};
+#[cfg(all(
+    feature = "all",
+    any(
+        target_os = "android",
+        target_os = "dragonfly",
+        target_os = "freebsd",
+        target_os = "fuchsia",
+        target_os = "illumos",
+        target_os = "ios",
+        target_os = "linux",
+        target_os = "macos",
+        target_os = "netbsd",
+        target_os = "tvos",
+        target_os = "watchos",
+    )
+))]
+pub(crate) use libc::{TCP_KEEPCNT, TCP_KEEPINTVL};
+
+// See this type in the Windows file.
+pub(crate) type Bool = c_int;
+
+#[cfg(any(
+    target_os = "ios",
+    target_os = "macos",
+    target_os = "nto",
+    target_os = "tvos",
+    target_os = "watchos",
+))]
+use libc::TCP_KEEPALIVE as KEEPALIVE_TIME;
+#[cfg(not(any(
+    target_os = "haiku",
+    target_os = "ios",
+    target_os = "macos",
+    target_os = "nto",
+    target_os = "openbsd",
+    target_os = "tvos",
+    target_os = "watchos",
+    target_os = "vita",
+)))]
+use libc::TCP_KEEPIDLE as KEEPALIVE_TIME;
+
+/// Helper macro to execute a system call that returns an `io::Result`.
+macro_rules! syscall {
+    ($fn: ident ( $($arg: expr),* $(,)* ) ) => {{
+        #[allow(unused_unsafe)]
+        let res = unsafe { libc::$fn($($arg, )*) };
+        if res == -1 {
+            Err(std::io::Error::last_os_error())
+        } else {
+            Ok(res)
+        }
+    }};
+}
+
+/// Maximum size of a buffer passed to system call like `recv` and `send`.
+#[cfg(not(any(
+    target_os = "ios",
+    target_os = "macos",
+    target_os = "tvos",
+    target_os = "watchos",
+)))]
+const MAX_BUF_LEN: usize = ssize_t::MAX as usize;
+
+// The maximum read limit on most posix-like systems is `SSIZE_MAX`, with the
+// man page quoting that if the count of bytes to read is greater than
+// `SSIZE_MAX` the result is "unspecified".
+//
+// On macOS, however, apparently the 64-bit libc is either buggy or
+// intentionally showing odd behavior by rejecting any read with a size larger
+// than or equal to INT_MAX. To handle both of these the read size is capped on
+// both platforms.
+#[cfg(any(
+    target_os = "ios",
+    target_os = "macos",
+    target_os = "tvos",
+    target_os = "watchos",
+))]
+const MAX_BUF_LEN: usize = c_int::MAX as usize - 1;
+
+// TCP_CA_NAME_MAX isn't defined in user space include files(not in libc)
+#[cfg(all(feature = "all", any(target_os = "freebsd", target_os = "linux")))]
+const TCP_CA_NAME_MAX: usize = 16;
+
+#[cfg(any(
+    all(
+        target_os = "linux",
+        any(
+            target_env = "gnu",
+            all(target_env = "uclibc", target_pointer_width = "64")
+        )
+    ),
+    target_os = "android",
+))]
+type IovLen = usize;
+
+#[cfg(any(
+    all(
+        target_os = "linux",
+        any(
+            target_env = "musl",
+            all(target_env = "uclibc", target_pointer_width = "32")
+        )
+    ),
+    target_os = "aix",
+    target_os = "dragonfly",
+    target_os = "freebsd",
+    target_os = "fuchsia",
+    target_os = "haiku",
+    target_os = "illumos",
+    target_os = "ios",
+    target_os = "macos",
+    target_os = "netbsd",
+    target_os = "nto",
+    target_os = "openbsd",
+    target_os = "solaris",
+    target_os = "tvos",
+    target_os = "watchos",
+    target_os = "espidf",
+    target_os = "vita",
+))]
+type IovLen = c_int;
+
+/// Unix only API.
+impl Domain {
+    /// Domain for low-level packet interface, corresponding to `AF_PACKET`.
+    #[cfg(all(
+        feature = "all",
+        any(target_os = "android", target_os = "fuchsia", target_os = "linux")
+    ))]
+    #[cfg_attr(
+        docsrs,
+        doc(cfg(all(
+            feature = "all",
+            any(target_os = "android", target_os = "fuchsia", target_os = "linux")
+        )))
+    )]
+    pub const PACKET: Domain = Domain(libc::AF_PACKET);
+
+    /// Domain for low-level VSOCK interface, corresponding to `AF_VSOCK`.
+    #[cfg(all(feature = "all", any(target_os = "android", target_os = "linux")))]
+    #[cfg_attr(
+        docsrs,
+        doc(cfg(all(feature = "all", any(target_os = "android", target_os = "linux"))))
+    )]
+    pub const VSOCK: Domain = Domain(libc::AF_VSOCK);
+}
+
+impl_debug!(
+    Domain,
+    libc::AF_INET,
+    libc::AF_INET6,
+    libc::AF_UNIX,
+    #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))]
+    #[cfg_attr(
+        docsrs,
+        doc(cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux")))
+    )]
+    libc::AF_PACKET,
+    #[cfg(any(target_os = "android", target_os = "linux"))]
+    #[cfg_attr(docsrs, doc(cfg(any(target_os = "android", target_os = "linux"))))]
+    libc::AF_VSOCK,
+    libc::AF_UNSPEC, // = 0.
+);
+
+/// Unix only API.
+impl Type {
+    /// Set `SOCK_NONBLOCK` on the `Type`.
+    #[cfg(all(
+        feature = "all",
+        any(
+            target_os = "android",
+            target_os = "dragonfly",
+            target_os = "freebsd",
+            target_os = "fuchsia",
+            target_os = "illumos",
+            target_os = "linux",
+            target_os = "netbsd",
+            target_os = "openbsd"
+        )
+    ))]
+    #[cfg_attr(
+        docsrs,
+        doc(cfg(all(
+            feature = "all",
+            any(
+                target_os = "android",
+                target_os = "dragonfly",
+                target_os = "freebsd",
+                target_os = "fuchsia",
+                target_os = "illumos",
+                target_os = "linux",
+                target_os = "netbsd",
+                target_os = "openbsd"
+            )
+        )))
+    )]
+    pub const fn nonblocking(self) -> Type {
+        Type(self.0 | libc::SOCK_NONBLOCK)
+    }
+
+    /// Set `SOCK_CLOEXEC` on the `Type`.
+    #[cfg(all(
+        feature = "all",
+        any(
+            target_os = "android",
+            target_os = "dragonfly",
+            target_os = "freebsd",
+            target_os = "fuchsia",
+            target_os = "illumos",
+            target_os = "linux",
+            target_os = "netbsd",
+            target_os = "openbsd",
+            target_os = "redox",
+            target_os = "solaris",
+        )
+    ))]
+    #[cfg_attr(
+        docsrs,
+        doc(cfg(all(
+            feature = "all",
+            any(
+                target_os = "android",
+                target_os = "dragonfly",
+                target_os = "freebsd",
+                target_os = "fuchsia",
+                target_os = "illumos",
+                target_os = "linux",
+                target_os = "netbsd",
+                target_os = "openbsd",
+                target_os = "redox",
+                target_os = "solaris",
+            )
+        )))
+    )]
+    pub const fn cloexec(self) -> Type {
+        self._cloexec()
+    }
+
+    #[cfg(any(
+        target_os = "android",
+        target_os = "dragonfly",
+        target_os = "freebsd",
+        target_os = "fuchsia",
+        target_os = "illumos",
+        target_os = "linux",
+        target_os = "netbsd",
+        target_os = "openbsd",
+        target_os = "redox",
+        target_os = "solaris",
+    ))]
+    pub(crate) const fn _cloexec(self) -> Type {
+        Type(self.0 | libc::SOCK_CLOEXEC)
+    }
+}
+
+impl_debug!(
+    Type,
+    libc::SOCK_STREAM,
+    libc::SOCK_DGRAM,
+    #[cfg(all(feature = "all", target_os = "linux"))]
+    libc::SOCK_DCCP,
+    #[cfg(not(any(target_os = "redox", target_os = "espidf")))]
+    libc::SOCK_RAW,
+    #[cfg(not(any(target_os = "redox", target_os = "haiku", target_os = "espidf")))]
+    libc::SOCK_RDM,
+    #[cfg(not(target_os = "espidf"))]
+    libc::SOCK_SEQPACKET,
+    /* TODO: add these optional bit OR-ed flags:
+    #[cfg(any(
+        target_os = "android",
+        target_os = "dragonfly",
+        target_os = "freebsd",
+        target_os = "fuchsia",
+        target_os = "linux",
+        target_os = "netbsd",
+        target_os = "openbsd"
+    ))]
+    libc::SOCK_NONBLOCK,
+    #[cfg(any(
+        target_os = "android",
+        target_os = "dragonfly",
+        target_os = "freebsd",
+        target_os = "fuchsia",
+        target_os = "linux",
+        target_os = "netbsd",
+        target_os = "openbsd"
+    ))]
+    libc::SOCK_CLOEXEC,
+    */
+);
+
+impl_debug!(
+    Protocol,
+    libc::IPPROTO_ICMP,
+    libc::IPPROTO_ICMPV6,
+    libc::IPPROTO_TCP,
+    libc::IPPROTO_UDP,
+    #[cfg(target_os = "linux")]
+    libc::IPPROTO_MPTCP,
+    #[cfg(all(feature = "all", target_os = "linux"))]
+    libc::IPPROTO_DCCP,
+    #[cfg(all(feature = "all", any(target_os = "freebsd", target_os = "linux")))]
+    libc::IPPROTO_SCTP,
+    #[cfg(all(
+        feature = "all",
+        any(
+            target_os = "android",
+            target_os = "freebsd",
+            target_os = "fuchsia",
+            target_os = "linux",
+        )
+    ))]
+    libc::IPPROTO_UDPLITE,
+    #[cfg(all(feature = "all", any(target_os = "freebsd", target_os = "openbsd")))]
+    libc::IPPROTO_DIVERT,
+);
+
+/// Unix-only API.
+#[cfg(not(target_os = "redox"))]
+impl RecvFlags {
+    /// Check if the message terminates a record.
+    ///
+    /// Not all socket types support the notion of records. For socket types
+    /// that do support it (such as [`SEQPACKET`]), a record is terminated by
+    /// sending a message with the end-of-record flag set.
+    ///
+    /// On Unix this corresponds to the `MSG_EOR` flag.
+    ///
+    /// [`SEQPACKET`]: Type::SEQPACKET
+    #[cfg(not(target_os = "espidf"))]
+    pub const fn is_end_of_record(self) -> bool {
+        self.0 & libc::MSG_EOR != 0
+    }
+
+    /// Check if the message contains out-of-band data.
+    ///
+    /// This is useful for protocols where you receive out-of-band data
+    /// mixed in with the normal data stream.
+    ///
+    /// On Unix this corresponds to the `MSG_OOB` flag.
+    pub const fn is_out_of_band(self) -> bool {
+        self.0 & libc::MSG_OOB != 0
+    }
+}
+
+#[cfg(not(target_os = "redox"))]
+impl std::fmt::Debug for RecvFlags {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        let mut s = f.debug_struct("RecvFlags");
+        #[cfg(not(target_os = "espidf"))]
+        s.field("is_end_of_record", &self.is_end_of_record());
+        s.field("is_out_of_band", &self.is_out_of_band());
+        #[cfg(not(target_os = "espidf"))]
+        s.field("is_truncated", &self.is_truncated());
+        s.finish()
+    }
+}
+
+#[repr(transparent)]
+pub struct MaybeUninitSlice<'a> {
+    vec: libc::iovec,
+    _lifetime: PhantomData<&'a mut [MaybeUninit<u8>]>,
+}
+
+unsafe impl<'a> Send for MaybeUninitSlice<'a> {}
+
+unsafe impl<'a> Sync for MaybeUninitSlice<'a> {}
+
+impl<'a> MaybeUninitSlice<'a> {
+    pub(crate) fn new(buf: &'a mut [MaybeUninit<u8>]) -> MaybeUninitSlice<'a> {
+        MaybeUninitSlice {
+            vec: libc::iovec {
+                iov_base: buf.as_mut_ptr().cast(),
+                iov_len: buf.len(),
+            },
+            _lifetime: PhantomData,
+        }
+    }
+
+    pub(crate) fn as_slice(&self) -> &[MaybeUninit<u8>] {
+        unsafe { slice::from_raw_parts(self.vec.iov_base.cast(), self.vec.iov_len) }
+    }
+
+    pub(crate) fn as_mut_slice(&mut self) -> &mut [MaybeUninit<u8>] {
+        unsafe { slice::from_raw_parts_mut(self.vec.iov_base.cast(), self.vec.iov_len) }
+    }
+}
+
+/// Returns the offset of the `sun_path` member of the passed unix socket address.
+pub(crate) fn offset_of_path(storage: &libc::sockaddr_un) -> usize {
+    let base = storage as *const _ as usize;
+    let path = ptr::addr_of!(storage.sun_path) as usize;
+    path - base
+}
+
+#[allow(unsafe_op_in_unsafe_fn)]
+pub(crate) fn unix_sockaddr(path: &Path) -> io::Result<SockAddr> {
+    // SAFETY: a `sockaddr_storage` of all zeros is valid.
+    let mut storage = unsafe { mem::zeroed::<sockaddr_storage>() };
+    let len = {
+        let storage = unsafe { &mut *ptr::addr_of_mut!(storage).cast::<libc::sockaddr_un>() };
+
+        let bytes = path.as_os_str().as_bytes();
+        let too_long = match bytes.first() {
+            None => false,
+            // linux abstract namespaces aren't null-terminated
+            Some(&0) => bytes.len() > storage.sun_path.len(),
+            Some(_) => bytes.len() >= storage.sun_path.len(),
+        };
+        if too_long {
+            return Err(io::Error::new(
+                io::ErrorKind::InvalidInput,
+                "path must be shorter than SUN_LEN",
+            ));
+        }
+
+        storage.sun_family = libc::AF_UNIX as sa_family_t;
+        // SAFETY: `bytes` and `addr.sun_path` are not overlapping and
+        // both point to valid memory.
+        // `storage` was initialized to zero above, so the path is
+        // already NULL terminated.
+        unsafe {
+            ptr::copy_nonoverlapping(
+                bytes.as_ptr(),
+                storage.sun_path.as_mut_ptr().cast(),
+                bytes.len(),
+            );
+        }
+
+        let sun_path_offset = offset_of_path(storage);
+        sun_path_offset
+            + bytes.len()
+            + match bytes.first() {
+                Some(&0) | None => 0,
+                Some(_) => 1,
+            }
+    };
+    Ok(unsafe { SockAddr::new(storage, len as socklen_t) })
+}
+
+// Used in `MsgHdr`.
+#[cfg(not(target_os = "redox"))]
+pub(crate) use libc::msghdr;
+
+#[cfg(not(target_os = "redox"))]
+pub(crate) fn set_msghdr_name(msg: &mut msghdr, name: &SockAddr) {
+    msg.msg_name = name.as_ptr() as *mut _;
+    msg.msg_namelen = name.len();
+}
+
+#[cfg(not(target_os = "redox"))]
+#[allow(clippy::unnecessary_cast)] // IovLen type can be `usize`.
+pub(crate) fn set_msghdr_iov(msg: &mut msghdr, ptr: *mut libc::iovec, len: usize) {
+    msg.msg_iov = ptr;
+    msg.msg_iovlen = min(len, IovLen::MAX as usize) as IovLen;
+}
+
+#[cfg(not(target_os = "redox"))]
+pub(crate) fn set_msghdr_control(msg: &mut msghdr, ptr: *mut libc::c_void, len: usize) {
+    msg.msg_control = ptr;
+    msg.msg_controllen = len as _;
+}
+
+#[cfg(not(target_os = "redox"))]
+pub(crate) fn set_msghdr_flags(msg: &mut msghdr, flags: libc::c_int) {
+    msg.msg_flags = flags;
+}
+
+#[cfg(not(target_os = "redox"))]
+pub(crate) fn msghdr_flags(msg: &msghdr) -> RecvFlags {
+    RecvFlags(msg.msg_flags)
+}
+
+/// Unix only API.
+impl SockAddr {
+    /// Constructs a `SockAddr` with the family `AF_VSOCK` and the provided CID/port.
+    ///
+    /// # Errors
+    ///
+    /// This function can never fail. In a future version of this library it will be made
+    /// infallible.
+    #[allow(unsafe_op_in_unsafe_fn)]
+    #[cfg(all(feature = "all", any(target_os = "android", target_os = "linux")))]
+    #[cfg_attr(
+        docsrs,
+        doc(cfg(all(feature = "all", any(target_os = "android", target_os = "linux"))))
+    )]
+    pub fn vsock(cid: u32, port: u32) -> SockAddr {
+        // SAFETY: a `sockaddr_storage` of all zeros is valid.
+        let mut storage = unsafe { mem::zeroed::<sockaddr_storage>() };
+        {
+            let storage: &mut libc::sockaddr_vm =
+                unsafe { &mut *((&mut storage as *mut sockaddr_storage).cast()) };
+            storage.svm_family = libc::AF_VSOCK as sa_family_t;
+            storage.svm_cid = cid;
+            storage.svm_port = port;
+        }
+        unsafe { SockAddr::new(storage, mem::size_of::<libc::sockaddr_vm>() as socklen_t) }
+    }
+
+    /// Returns this address VSOCK CID/port if it is in the `AF_VSOCK` family,
+    /// otherwise return `None`.
+    #[cfg(all(feature = "all", any(target_os = "android", target_os = "linux")))]
+    #[cfg_attr(
+        docsrs,
+        doc(cfg(all(feature = "all", any(target_os = "android", target_os = "linux"))))
+    )]
+    pub fn as_vsock_address(&self) -> Option<(u32, u32)> {
+        if self.family() == libc::AF_VSOCK as sa_family_t {
+            // Safety: if the ss_family field is AF_VSOCK then storage must be a sockaddr_vm.
+            let addr = unsafe { &*(self.as_ptr() as *const libc::sockaddr_vm) };
+            Some((addr.svm_cid, addr.svm_port))
+        } else {
+            None
+        }
+    }
+
+    /// Returns true if this address is an unnamed address from the `AF_UNIX` family (for local
+    /// interprocess communication), false otherwise.
+    pub fn is_unnamed(&self) -> bool {
+        self.as_sockaddr_un()
+            .map(|storage| {
+                self.len() == offset_of_path(storage) as _
+                    // On some non-linux platforms a zeroed path is returned for unnamed.
+                    // Abstract addresses only exist on Linux.
+                    // NOTE: although Fuchsia does define `AF_UNIX` it's not actually implemented.
+                    // See https://github.com/rust-lang/socket2/pull/403#discussion_r1123557978
+                    || (cfg!(not(any(target_os = "linux", target_os = "android")))
+                    && storage.sun_path[0] == 0)
+            })
+            .unwrap_or_default()
+    }
+
+    /// Returns the underlying `sockaddr_un` object if this addres is from the `AF_UNIX` family,
+    /// otherwise returns `None`.
+    pub(crate) fn as_sockaddr_un(&self) -> Option<&libc::sockaddr_un> {
+        self.is_unix().then(|| {
+            // SAFETY: if unix socket, i.e. the `ss_family` field is `AF_UNIX` then storage must be
+            // a `sockaddr_un`.
+            unsafe { &*self.as_ptr().cast::<libc::sockaddr_un>() }
+        })
+    }
+
+    /// Get the length of the path bytes of the address, not including the terminating or initial
+    /// (for abstract names) null byte.
+    ///
+    /// Should not be called on unnamed addresses.
+    fn path_len(&self, storage: &libc::sockaddr_un) -> usize {
+        debug_assert!(!self.is_unnamed());
+        self.len() as usize - offset_of_path(storage) - 1
+    }
+
+    /// Get a u8 slice for the bytes of the pathname or abstract name.
+    ///
+    /// Should not be called on unnamed addresses.
+    fn path_bytes(&self, storage: &libc::sockaddr_un, abstract_name: bool) -> &[u8] {
+        debug_assert!(!self.is_unnamed());
+        // SAFETY: the pointed objects of type `i8` have the same memory layout as `u8`. The path is
+        // the last field in the storage and so its length is equal to
+        //          TOTAL_LENGTH - OFFSET_OF_PATH -1
+        // Where the 1 is either a terminating null if we have a pathname address, or the initial
+        // null byte, if it's an abstract name address. In the latter case, the path bytes start
+        // after the initial null byte, hence the `offset`.
+        // There is no safe way to convert a `&[i8]` to `&[u8]`
+        unsafe {
+            slice::from_raw_parts(
+                (storage.sun_path.as_ptr() as *const u8).offset(abstract_name as isize),
+                self.path_len(storage),
+            )
+        }
+    }
+
+    /// Returns this address as Unix `SocketAddr` if it is an `AF_UNIX` pathname
+    /// address, otherwise returns `None`.
+    pub fn as_unix(&self) -> Option<std::os::unix::net::SocketAddr> {
+        let path = self.as_pathname()?;
+        // SAFETY: we can represent this as a valid pathname, then so can the
+        // standard library.
+        Some(std::os::unix::net::SocketAddr::from_pathname(path).unwrap())
+    }
+
+    /// Returns this address as a `Path` reference if it is an `AF_UNIX`
+    /// pathname address, otherwise returns `None`.
+    pub fn as_pathname(&self) -> Option<&Path> {
+        self.as_sockaddr_un().and_then(|storage| {
+            (self.len() > offset_of_path(storage) as _ && storage.sun_path[0] != 0).then(|| {
+                let path_slice = self.path_bytes(storage, false);
+                Path::new::<OsStr>(OsStrExt::from_bytes(path_slice))
+            })
+        })
+    }
+
+    /// Returns this address as a slice of bytes representing an abstract address if it is an
+    /// `AF_UNIX` abstract address, otherwise returns `None`.
+    ///
+    /// Abstract addresses are a Linux extension, so this method returns `None` on all non-Linux
+    /// platforms.
+    pub fn as_abstract_namespace(&self) -> Option<&[u8]> {
+        // NOTE: although Fuchsia does define `AF_UNIX` it's not actually implemented.
+        // See https://github.com/rust-lang/socket2/pull/403#discussion_r1123557978
+        #[cfg(any(target_os = "linux", target_os = "android"))]
+        {
+            self.as_sockaddr_un().and_then(|storage| {
+                (self.len() > offset_of_path(storage) as _ && storage.sun_path[0] == 0)
+                    .then(|| self.path_bytes(storage, true))
+            })
+        }
+        #[cfg(not(any(target_os = "linux", target_os = "android")))]
+        None
+    }
+}
+
+pub(crate) type Socket = c_int;
+
+pub(crate) unsafe fn socket_from_raw(socket: Socket) -> crate::socket::Inner {
+    crate::socket::Inner::from_raw_fd(socket)
+}
+
+pub(crate) fn socket_as_raw(socket: &crate::socket::Inner) -> Socket {
+    socket.as_raw_fd()
+}
+
+pub(crate) fn socket_into_raw(socket: crate::socket::Inner) -> Socket {
+    socket.into_raw_fd()
+}
+
+pub(crate) fn socket(family: c_int, ty: c_int, protocol: c_int) -> io::Result<Socket> {
+    syscall!(socket(family, ty, protocol))
+}
+
+#[cfg(all(feature = "all", unix))]
+#[cfg_attr(docsrs, doc(cfg(all(feature = "all", unix))))]
+pub(crate) fn socketpair(family: c_int, ty: c_int, protocol: c_int) -> io::Result<[Socket; 2]> {
+    let mut fds = [0, 0];
+    syscall!(socketpair(family, ty, protocol, fds.as_mut_ptr())).map(|_| fds)
+}
+
+pub(crate) fn bind(fd: Socket, addr: &SockAddr) -> io::Result<()> {
+    syscall!(bind(fd, addr.as_ptr(), addr.len() as _)).map(|_| ())
+}
+
+pub(crate) fn connect(fd: Socket, addr: &SockAddr) -> io::Result<()> {
+    syscall!(connect(fd, addr.as_ptr(), addr.len())).map(|_| ())
+}
+
+pub(crate) fn poll_connect(socket: &crate::Socket, timeout: Duration) -> io::Result<()> {
+    let start = Instant::now();
+
+    let mut pollfd = libc::pollfd {
+        fd: socket.as_raw(),
+        events: libc::POLLIN | libc::POLLOUT,
+        revents: 0,
+    };
+
+    loop {
+        let elapsed = start.elapsed();
+        if elapsed >= timeout {
+            return Err(io::ErrorKind::TimedOut.into());
+        }
+
+        let timeout = (timeout - elapsed).as_millis();
+        let timeout = timeout.clamp(1, c_int::MAX as u128) as c_int;
+
+        match syscall!(poll(&mut pollfd, 1, timeout)) {
+            Ok(0) => return Err(io::ErrorKind::TimedOut.into()),
+            Ok(_) => {
+                // Error or hang up indicates an error (or failure to connect).
+                if (pollfd.revents & libc::POLLHUP) != 0 || (pollfd.revents & libc::POLLERR) != 0 {
+                    match socket.take_error() {
+                        Ok(Some(err)) | Err(err) => return Err(err),
+                        Ok(None) => {
+                            return Err(io::Error::new(
+                                io::ErrorKind::Other,
+                                "no error set after POLLHUP",
+                            ))
+                        }
+                    }
+                }
+                return Ok(());
+            }
+            // Got interrupted, try again.
+            Err(ref err) if err.kind() == io::ErrorKind::Interrupted => continue,
+            Err(err) => return Err(err),
+        }
+    }
+}
+
+pub(crate) fn listen(fd: Socket, backlog: c_int) -> io::Result<()> {
+    syscall!(listen(fd, backlog)).map(|_| ())
+}
+
+pub(crate) fn accept(fd: Socket) -> io::Result<(Socket, SockAddr)> {
+    // Safety: `accept` initialises the `SockAddr` for us.
+    unsafe { SockAddr::try_init(|storage, len| syscall!(accept(fd, storage.cast(), len))) }
+}
+
+pub(crate) fn getsockname(fd: Socket) -> io::Result<SockAddr> {
+    // Safety: `accept` initialises the `SockAddr` for us.
+    unsafe { SockAddr::try_init(|storage, len| syscall!(getsockname(fd, storage.cast(), len))) }
+        .map(|(_, addr)| addr)
+}
+
+pub(crate) fn getpeername(fd: Socket) -> io::Result<SockAddr> {
+    // Safety: `accept` initialises the `SockAddr` for us.
+    unsafe { SockAddr::try_init(|storage, len| syscall!(getpeername(fd, storage.cast(), len))) }
+        .map(|(_, addr)| addr)
+}
+
+pub(crate) fn try_clone(fd: Socket) -> io::Result<Socket> {
+    syscall!(fcntl(fd, libc::F_DUPFD_CLOEXEC, 0))
+}
+
+#[cfg(all(feature = "all", unix, not(target_os = "vita")))]
+pub(crate) fn nonblocking(fd: Socket) -> io::Result<bool> {
+    let file_status_flags = fcntl_get(fd, libc::F_GETFL)?;
+    Ok((file_status_flags & libc::O_NONBLOCK) != 0)
+}
+
+#[cfg(all(feature = "all", target_os = "vita"))]
+pub(crate) fn nonblocking(fd: Socket) -> io::Result<bool> {
+    unsafe {
+        getsockopt::<Bool>(fd, libc::SOL_SOCKET, libc::SO_NONBLOCK).map(|non_block| non_block != 0)
+    }
+}
+
+#[cfg(not(target_os = "vita"))]
+pub(crate) fn set_nonblocking(fd: Socket, nonblocking: bool) -> io::Result<()> {
+    if nonblocking {
+        fcntl_add(fd, libc::F_GETFL, libc::F_SETFL, libc::O_NONBLOCK)
+    } else {
+        fcntl_remove(fd, libc::F_GETFL, libc::F_SETFL, libc::O_NONBLOCK)
+    }
+}
+
+#[cfg(target_os = "vita")]
+pub(crate) fn set_nonblocking(fd: Socket, nonblocking: bool) -> io::Result<()> {
+    unsafe {
+        setsockopt(
+            fd,
+            libc::SOL_SOCKET,
+            libc::SO_NONBLOCK,
+            nonblocking as libc::c_int,
+        )
+    }
+}
+
+pub(crate) fn shutdown(fd: Socket, how: Shutdown) -> io::Result<()> {
+    let how = match how {
+        Shutdown::Write => libc::SHUT_WR,
+        Shutdown::Read => libc::SHUT_RD,
+        Shutdown::Both => libc::SHUT_RDWR,
+    };
+    syscall!(shutdown(fd, how)).map(|_| ())
+}
+
+pub(crate) fn recv(fd: Socket, buf: &mut [MaybeUninit<u8>], flags: c_int) -> io::Result<usize> {
+    syscall!(recv(
+        fd,
+        buf.as_mut_ptr().cast(),
+        min(buf.len(), MAX_BUF_LEN),
+        flags,
+    ))
+    .map(|n| n as usize)
+}
+
+pub(crate) fn recv_from(
+    fd: Socket,
+    buf: &mut [MaybeUninit<u8>],
+    flags: c_int,
+) -> io::Result<(usize, SockAddr)> {
+    // Safety: `recvfrom` initialises the `SockAddr` for us.
+    unsafe {
+        SockAddr::try_init(|addr, addrlen| {
+            syscall!(recvfrom(
+                fd,
+                buf.as_mut_ptr().cast(),
+                min(buf.len(), MAX_BUF_LEN),
+                flags,
+                addr.cast(),
+                addrlen
+            ))
+            .map(|n| n as usize)
+        })
+    }
+}
+
+pub(crate) fn peek_sender(fd: Socket) -> io::Result<SockAddr> {
+    // Unix-like platforms simply truncate the returned data, so this implementation is trivial.
+    // However, for Windows this requires suppressing the `WSAEMSGSIZE` error,
+    // so that requires a different approach.
+    // NOTE: macOS does not populate `sockaddr` if you pass a zero-sized buffer.
+    let (_, sender) = recv_from(fd, &mut [MaybeUninit::uninit(); 8], MSG_PEEK)?;
+    Ok(sender)
+}
+
+#[cfg(not(target_os = "redox"))]
+pub(crate) fn recv_vectored(
+    fd: Socket,
+    bufs: &mut [crate::MaybeUninitSlice<'_>],
+    flags: c_int,
+) -> io::Result<(usize, RecvFlags)> {
+    let mut msg = MsgHdrMut::new().with_buffers(bufs);
+    let n = recvmsg(fd, &mut msg, flags)?;
+    Ok((n, msg.flags()))
+}
+
+#[cfg(not(target_os = "redox"))]
+pub(crate) fn recv_from_vectored(
+    fd: Socket,
+    bufs: &mut [crate::MaybeUninitSlice<'_>],
+    flags: c_int,
+) -> io::Result<(usize, RecvFlags, SockAddr)> {
+    let mut msg = MsgHdrMut::new().with_buffers(bufs);
+    // SAFETY: `recvmsg` initialises the address storage and we set the length
+    // manually.
+    let (n, addr) = unsafe {
+        SockAddr::try_init(|storage, len| {
+            msg.inner.msg_name = storage.cast();
+            msg.inner.msg_namelen = *len;
+            let n = recvmsg(fd, &mut msg, flags)?;
+            // Set the correct address length.
+            *len = msg.inner.msg_namelen;
+            Ok(n)
+        })?
+    };
+    Ok((n, msg.flags(), addr))
+}
+
+#[cfg(not(target_os = "redox"))]
+pub(crate) fn recvmsg(
+    fd: Socket,
+    msg: &mut MsgHdrMut<'_, '_, '_>,
+    flags: c_int,
+) -> io::Result<usize> {
+    syscall!(recvmsg(fd, &mut msg.inner, flags)).map(|n| n as usize)
+}
+
+pub(crate) fn send(fd: Socket, buf: &[u8], flags: c_int) -> io::Result<usize> {
+    syscall!(send(
+        fd,
+        buf.as_ptr().cast(),
+        min(buf.len(), MAX_BUF_LEN),
+        flags,
+    ))
+    .map(|n| n as usize)
+}
+
+#[cfg(not(target_os = "redox"))]
+pub(crate) fn send_vectored(fd: Socket, bufs: &[IoSlice<'_>], flags: c_int) -> io::Result<usize> {
+    let msg = MsgHdr::new().with_buffers(bufs);
+    sendmsg(fd, &msg, flags)
+}
+
+pub(crate) fn send_to(fd: Socket, buf: &[u8], addr: &SockAddr, flags: c_int) -> io::Result<usize> {
+    syscall!(sendto(
+        fd,
+        buf.as_ptr().cast(),
+        min(buf.len(), MAX_BUF_LEN),
+        flags,
+        addr.as_ptr(),
+        addr.len(),
+    ))
+    .map(|n| n as usize)
+}
+
+#[cfg(not(target_os = "redox"))]
+pub(crate) fn send_to_vectored(
+    fd: Socket,
+    bufs: &[IoSlice<'_>],
+    addr: &SockAddr,
+    flags: c_int,
+) -> io::Result<usize> {
+    let msg = MsgHdr::new().with_addr(addr).with_buffers(bufs);
+    sendmsg(fd, &msg, flags)
+}
+
+#[cfg(not(target_os = "redox"))]
+pub(crate) fn sendmsg(fd: Socket, msg: &MsgHdr<'_, '_, '_>, flags: c_int) -> io::Result<usize> {
+    syscall!(sendmsg(fd, &msg.inner, flags)).map(|n| n as usize)
+}
+
+/// Wrapper around `getsockopt` to deal with platform specific timeouts.
+pub(crate) fn timeout_opt(fd: Socket, opt: c_int, val: c_int) -> io::Result<Option<Duration>> {
+    unsafe { getsockopt(fd, opt, val).map(from_timeval) }
+}
+
+const fn from_timeval(duration: libc::timeval) -> Option<Duration> {
+    if duration.tv_sec == 0 && duration.tv_usec == 0 {
+        None
+    } else {
+        let sec = duration.tv_sec as u64;
+        let nsec = (duration.tv_usec as u32) * 1000;
+        Some(Duration::new(sec, nsec))
+    }
+}
+
+/// Wrapper around `setsockopt` to deal with platform specific timeouts.
+pub(crate) fn set_timeout_opt(
+    fd: Socket,
+    opt: c_int,
+    val: c_int,
+    duration: Option<Duration>,
+) -> io::Result<()> {
+    let duration = into_timeval(duration);
+    unsafe { setsockopt(fd, opt, val, duration) }
+}
+
+fn into_timeval(duration: Option<Duration>) -> libc::timeval {
+    match duration {
+        // https://github.com/rust-lang/libc/issues/1848
+        #[cfg_attr(target_env = "musl", allow(deprecated))]
+        Some(duration) => libc::timeval {
+            tv_sec: min(duration.as_secs(), libc::time_t::MAX as u64) as libc::time_t,
+            tv_usec: duration.subsec_micros() as libc::suseconds_t,
+        },
+        None => libc::timeval {
+            tv_sec: 0,
+            tv_usec: 0,
+        },
+    }
+}
+
+#[cfg(all(
+    feature = "all",
+    not(any(target_os = "haiku", target_os = "openbsd", target_os = "vita"))
+))]
+#[cfg_attr(
+    docsrs,
+    doc(cfg(all(
+        feature = "all",
+        not(any(target_os = "haiku", target_os = "openbsd", target_os = "vita"))
+    )))
+)]
+pub(crate) fn keepalive_time(fd: Socket) -> io::Result<Duration> {
+    unsafe {
+        getsockopt::<c_int>(fd, IPPROTO_TCP, KEEPALIVE_TIME)
+            .map(|secs| Duration::from_secs(secs as u64))
+    }
+}
+
+#[allow(unused_variables)]
+pub(crate) fn set_tcp_keepalive(fd: Socket, keepalive: &TcpKeepalive) -> io::Result<()> {
+    #[cfg(not(any(
+        target_os = "haiku",
+        target_os = "openbsd",
+        target_os = "nto",
+        target_os = "vita"
+    )))]
+    if let Some(time) = keepalive.time {
+        let secs = into_secs(time);
+        unsafe { setsockopt(fd, libc::IPPROTO_TCP, KEEPALIVE_TIME, secs)? }
+    }
+
+    #[cfg(any(
+        target_os = "aix",
+        target_os = "android",
+        target_os = "dragonfly",
+        target_os = "freebsd",
+        target_os = "fuchsia",
+        target_os = "illumos",
+        target_os = "ios",
+        target_os = "linux",
+        target_os = "macos",
+        target_os = "netbsd",
+        target_os = "tvos",
+        target_os = "watchos",
+    ))]
+    {
+        if let Some(interval) = keepalive.interval {
+            let secs = into_secs(interval);
+            unsafe { setsockopt(fd, libc::IPPROTO_TCP, libc::TCP_KEEPINTVL, secs)? }
+        }
+
+        if let Some(retries) = keepalive.retries {
+            unsafe { setsockopt(fd, libc::IPPROTO_TCP, libc::TCP_KEEPCNT, retries as c_int)? }
+        }
+    }
+
+    #[cfg(target_os = "nto")]
+    if let Some(time) = keepalive.time {
+        let secs = into_timeval(Some(time));
+        unsafe { setsockopt(fd, libc::IPPROTO_TCP, KEEPALIVE_TIME, secs)? }
+    }
+
+    Ok(())
+}
+
+#[cfg(not(any(
+    target_os = "haiku",
+    target_os = "openbsd",
+    target_os = "nto",
+    target_os = "vita"
+)))]
+fn into_secs(duration: Duration) -> c_int {
+    min(duration.as_secs(), c_int::MAX as u64) as c_int
+}
+
+/// Get the flags using `cmd`.
+#[cfg(not(target_os = "vita"))]
+fn fcntl_get(fd: Socket, cmd: c_int) -> io::Result<c_int> {
+    syscall!(fcntl(fd, cmd))
+}
+
+/// Add `flag` to the current set flags of `F_GETFD`.
+#[cfg(not(target_os = "vita"))]
+fn fcntl_add(fd: Socket, get_cmd: c_int, set_cmd: c_int, flag: c_int) -> io::Result<()> {
+    let previous = fcntl_get(fd, get_cmd)?;
+    let new = previous | flag;
+    if new != previous {
+        syscall!(fcntl(fd, set_cmd, new)).map(|_| ())
+    } else {
+        // Flag was already set.
+        Ok(())
+    }
+}
+
+/// Remove `flag` to the current set flags of `F_GETFD`.
+#[cfg(not(target_os = "vita"))]
+fn fcntl_remove(fd: Socket, get_cmd: c_int, set_cmd: c_int, flag: c_int) -> io::Result<()> {
+    let previous = fcntl_get(fd, get_cmd)?;
+    let new = previous & !flag;
+    if new != previous {
+        syscall!(fcntl(fd, set_cmd, new)).map(|_| ())
+    } else {
+        // Flag was already set.
+        Ok(())
+    }
+}
+
+/// Caller must ensure `T` is the correct type for `opt` and `val`.
+pub(crate) unsafe fn getsockopt<T>(fd: Socket, opt: c_int, val: c_int) -> io::Result<T> {
+    let mut payload: MaybeUninit<T> = MaybeUninit::uninit();
+    let mut len = size_of::<T>() as libc::socklen_t;
+    syscall!(getsockopt(
+        fd,
+        opt,
+        val,
+        payload.as_mut_ptr().cast(),
+        &mut len,
+    ))
+    .map(|_| {
+        debug_assert_eq!(len as usize, size_of::<T>());
+        // Safety: `getsockopt` initialised `payload` for us.
+        payload.assume_init()
+    })
+}
+
+/// Caller must ensure `T` is the correct type for `opt` and `val`.
+pub(crate) unsafe fn setsockopt<T>(
+    fd: Socket,
+    opt: c_int,
+    val: c_int,
+    payload: T,
+) -> io::Result<()> {
+    let payload = ptr::addr_of!(payload).cast();
+    syscall!(setsockopt(
+        fd,
+        opt,
+        val,
+        payload,
+        mem::size_of::<T>() as libc::socklen_t,
+    ))
+    .map(|_| ())
+}
+
+pub(crate) const fn to_in_addr(addr: &Ipv4Addr) -> in_addr {
+    // `s_addr` is stored as BE on all machines, and the array is in BE order.
+    // So the native endian conversion method is used so that it's never
+    // swapped.
+    in_addr {
+        s_addr: u32::from_ne_bytes(addr.octets()),
+    }
+}
+
+pub(crate) fn from_in_addr(in_addr: in_addr) -> Ipv4Addr {
+    Ipv4Addr::from(in_addr.s_addr.to_ne_bytes())
+}
+
+pub(crate) const fn to_in6_addr(addr: &Ipv6Addr) -> in6_addr {
+    in6_addr {
+        s6_addr: addr.octets(),
+    }
+}
+
+pub(crate) fn from_in6_addr(addr: in6_addr) -> Ipv6Addr {
+    Ipv6Addr::from(addr.s6_addr)
+}
+
+#[cfg(not(any(
+    target_os = "aix",
+    target_os = "haiku",
+    target_os = "illumos",
+    target_os = "netbsd",
+    target_os = "openbsd",
+    target_os = "redox",
+    target_os = "solaris",
+    target_os = "nto",
+    target_os = "espidf",
+    target_os = "vita",
+)))]
+pub(crate) const fn to_mreqn(
+    multiaddr: &Ipv4Addr,
+    interface: &crate::socket::InterfaceIndexOrAddress,
+) -> libc::ip_mreqn {
+    match interface {
+        crate::socket::InterfaceIndexOrAddress::Index(interface) => libc::ip_mreqn {
+            imr_multiaddr: to_in_addr(multiaddr),
+            imr_address: to_in_addr(&Ipv4Addr::UNSPECIFIED),
+            imr_ifindex: *interface as _,
+        },
+        crate::socket::InterfaceIndexOrAddress::Address(interface) => libc::ip_mreqn {
+            imr_multiaddr: to_in_addr(multiaddr),
+            imr_address: to_in_addr(interface),
+            imr_ifindex: 0,
+        },
+    }
+}
+
+/// Unix only API.
+impl crate::Socket {
+    /// Accept a new incoming connection from this listener.
+    ///
+    /// This function directly corresponds to the `accept4(2)` function.
+    ///
+    /// This function will block the calling thread until a new connection is
+    /// established. When established, the corresponding `Socket` and the remote
+    /// peer's address will be returned.
+    #[doc = man_links!(unix: accept4(2))]
+    #[cfg(all(
+        feature = "all",
+        any(
+            target_os = "android",
+            target_os = "dragonfly",
+            target_os = "freebsd",
+            target_os = "fuchsia",
+            target_os = "illumos",
+            target_os = "linux",
+            target_os = "netbsd",
+            target_os = "openbsd",
+        )
+    ))]
+    #[cfg_attr(
+        docsrs,
+        doc(cfg(all(
+            feature = "all",
+            any(
+                target_os = "android",
+                target_os = "dragonfly",
+                target_os = "freebsd",
+                target_os = "fuchsia",
+                target_os = "illumos",
+                target_os = "linux",
+                target_os = "netbsd",
+                target_os = "openbsd",
+            )
+        )))
+    )]
+    pub fn accept4(&self, flags: c_int) -> io::Result<(crate::Socket, SockAddr)> {
+        self._accept4(flags)
+    }
+
+    #[cfg(any(
+        target_os = "android",
+        target_os = "dragonfly",
+        target_os = "freebsd",
+        target_os = "fuchsia",
+        target_os = "illumos",
+        target_os = "linux",
+        target_os = "netbsd",
+        target_os = "openbsd",
+    ))]
+    pub(crate) fn _accept4(&self, flags: c_int) -> io::Result<(crate::Socket, SockAddr)> {
+        // Safety: `accept4` initialises the `SockAddr` for us.
+        unsafe {
+            SockAddr::try_init(|storage, len| {
+                syscall!(accept4(self.as_raw(), storage.cast(), len, flags))
+                    .map(crate::Socket::from_raw)
+            })
+        }
+    }
+
+    /// Sets `CLOEXEC` on the socket.
+    ///
+    /// # Notes
+    ///
+    /// On supported platforms you can use [`Type::cloexec`].
+    #[cfg_attr(
+        any(
+            target_os = "ios",
+            target_os = "macos",
+            target_os = "tvos",
+            target_os = "watchos"
+        ),
+        allow(rustdoc::broken_intra_doc_links)
+    )]
+    #[cfg(all(feature = "all", not(target_os = "vita")))]
+    #[cfg_attr(docsrs, doc(cfg(all(feature = "all", unix))))]
+    pub fn set_cloexec(&self, close_on_exec: bool) -> io::Result<()> {
+        self._set_cloexec(close_on_exec)
+    }
+
+    #[cfg(not(target_os = "vita"))]
+    pub(crate) fn _set_cloexec(&self, close_on_exec: bool) -> io::Result<()> {
+        if close_on_exec {
+            fcntl_add(
+                self.as_raw(),
+                libc::F_GETFD,
+                libc::F_SETFD,
+                libc::FD_CLOEXEC,
+            )
+        } else {
+            fcntl_remove(
+                self.as_raw(),
+                libc::F_GETFD,
+                libc::F_SETFD,
+                libc::FD_CLOEXEC,
+            )
+        }
+    }
+
+    /// Sets `SO_NOSIGPIPE` on the socket.
+    #[cfg(all(
+        feature = "all",
+        any(
+            target_os = "ios",
+            target_os = "macos",
+            target_os = "tvos",
+            target_os = "watchos",
+        )
+    ))]
+    #[cfg_attr(
+        docsrs,
+        doc(cfg(all(
+            feature = "all",
+            any(
+                target_os = "ios",
+                target_os = "macos",
+                target_os = "tvos",
+                target_os = "watchos",
+            )
+        )))
+    )]
+    pub fn set_nosigpipe(&self, nosigpipe: bool) -> io::Result<()> {
+        self._set_nosigpipe(nosigpipe)
+    }
+
+    #[cfg(any(
+        target_os = "ios",
+        target_os = "macos",
+        target_os = "tvos",
+        target_os = "watchos",
+    ))]
+    pub(crate) fn _set_nosigpipe(&self, nosigpipe: bool) -> io::Result<()> {
+        unsafe {
+            setsockopt(
+                self.as_raw(),
+                libc::SOL_SOCKET,
+                libc::SO_NOSIGPIPE,
+                nosigpipe as c_int,
+            )
+        }
+    }
+
+    /// Gets the value of the `TCP_MAXSEG` option on this socket.
+    ///
+    /// For more information about this option, see [`set_mss`].
+    ///
+    /// [`set_mss`]: crate::Socket::set_mss
+    #[cfg(all(feature = "all", not(target_os = "redox")))]
+    #[cfg_attr(docsrs, doc(cfg(all(feature = "all", unix, not(target_os = "redox")))))]
+    pub fn mss(&self) -> io::Result<u32> {
+        unsafe {
+            getsockopt::<c_int>(self.as_raw(), libc::IPPROTO_TCP, libc::TCP_MAXSEG)
+                .map(|mss| mss as u32)
+        }
+    }
+
+    /// Sets the value of the `TCP_MAXSEG` option on this socket.
+    ///
+    /// The `TCP_MAXSEG` option denotes the TCP Maximum Segment Size and is only
+    /// available on TCP sockets.
+    #[cfg(all(feature = "all", not(target_os = "redox")))]
+    #[cfg_attr(docsrs, doc(cfg(all(feature = "all", unix, not(target_os = "redox")))))]
+    pub fn set_mss(&self, mss: u32) -> io::Result<()> {
+        unsafe {
+            setsockopt(
+                self.as_raw(),
+                libc::IPPROTO_TCP,
+                libc::TCP_MAXSEG,
+                mss as c_int,
+            )
+        }
+    }
+
+    /// Returns `true` if `listen(2)` was called on this socket by checking the
+    /// `SO_ACCEPTCONN` option on this socket.
+    #[cfg(all(
+        feature = "all",
+        any(
+            target_os = "aix",
+            target_os = "android",
+            target_os = "freebsd",
+            target_os = "fuchsia",
+            target_os = "linux",
+        )
+    ))]
+    #[cfg_attr(
+        docsrs,
+        doc(cfg(all(
+            feature = "all",
+            any(
+                target_os = "aix",
+                target_os = "android",
+                target_os = "freebsd",
+                target_os = "fuchsia",
+                target_os = "linux",
+            )
+        )))
+    )]
+    pub fn is_listener(&self) -> io::Result<bool> {
+        unsafe {
+            getsockopt::<c_int>(self.as_raw(), libc::SOL_SOCKET, libc::SO_ACCEPTCONN)
+                .map(|v| v != 0)
+        }
+    }
+
+    /// Returns the [`Domain`] of this socket by checking the `SO_DOMAIN` option
+    /// on this socket.
+    #[cfg(all(
+        feature = "all",
+        any(
+            target_os = "android",
+            // TODO: add FreeBSD.
+            // target_os = "freebsd",
+            target_os = "fuchsia",
+            target_os = "linux",
+        )
+    ))]
+    #[cfg_attr(docsrs, doc(cfg(all(
+        feature = "all",
+        any(
+            target_os = "android",
+            // TODO: add FreeBSD.
+            // target_os = "freebsd",
+            target_os = "fuchsia",
+            target_os = "linux",
+        )
+    ))))]
+    pub fn domain(&self) -> io::Result<Domain> {
+        unsafe { getsockopt::<c_int>(self.as_raw(), libc::SOL_SOCKET, libc::SO_DOMAIN).map(Domain) }
+    }
+
+    /// Returns the [`Protocol`] of this socket by checking the `SO_PROTOCOL`
+    /// option on this socket.
+    #[cfg(all(
+        feature = "all",
+        any(
+            target_os = "android",
+            target_os = "freebsd",
+            target_os = "fuchsia",
+            target_os = "linux",
+        )
+    ))]
+    #[cfg_attr(
+        docsrs,
+        doc(cfg(all(
+            feature = "all",
+            any(
+                target_os = "android",
+                target_os = "freebsd",
+                target_os = "fuchsia",
+                target_os = "linux",
+            )
+        )))
+    )]
+    pub fn protocol(&self) -> io::Result<Option<Protocol>> {
+        unsafe {
+            getsockopt::<c_int>(self.as_raw(), libc::SOL_SOCKET, libc::SO_PROTOCOL).map(|v| match v
+            {
+                0 => None,
+                p => Some(Protocol(p)),
+            })
+        }
+    }
+
+    /// Gets the value for the `SO_MARK` option on this socket.
+    ///
+    /// This value gets the socket mark field for each packet sent through
+    /// this socket.
+    ///
+    /// On Linux this function requires the `CAP_NET_ADMIN` capability.
+    #[cfg(all(
+        feature = "all",
+        any(target_os = "android", target_os = "fuchsia", target_os = "linux")
+    ))]
+    #[cfg_attr(
+        docsrs,
+        doc(cfg(all(
+            feature = "all",
+            any(target_os = "android", target_os = "fuchsia", target_os = "linux")
+        )))
+    )]
+    pub fn mark(&self) -> io::Result<u32> {
+        unsafe {
+            getsockopt::<c_int>(self.as_raw(), libc::SOL_SOCKET, libc::SO_MARK)
+                .map(|mark| mark as u32)
+        }
+    }
+
+    /// Sets the value for the `SO_MARK` option on this socket.
+    ///
+    /// This value sets the socket mark field for each packet sent through
+    /// this socket. Changing the mark can be used for mark-based routing
+    /// without netfilter or for packet filtering.
+    ///
+    /// On Linux this function requires the `CAP_NET_ADMIN` capability.
+    #[cfg(all(
+        feature = "all",
+        any(target_os = "android", target_os = "fuchsia", target_os = "linux")
+    ))]
+    #[cfg_attr(
+        docsrs,
+        doc(cfg(all(
+            feature = "all",
+            any(target_os = "android", target_os = "fuchsia", target_os = "linux")
+        )))
+    )]
+    pub fn set_mark(&self, mark: u32) -> io::Result<()> {
+        unsafe {
+            setsockopt::<c_int>(
+                self.as_raw(),
+                libc::SOL_SOCKET,
+                libc::SO_MARK,
+                mark as c_int,
+            )
+        }
+    }
+
+    /// Get the value of the `TCP_CORK` option on this socket.
+    ///
+    /// For more information about this option, see [`set_cork`].
+    ///
+    /// [`set_cork`]: crate::Socket::set_cork
+    #[cfg(all(
+        feature = "all",
+        any(target_os = "android", target_os = "fuchsia", target_os = "linux")
+    ))]
+    #[cfg_attr(
+        docsrs,
+        doc(cfg(all(
+            feature = "all",
+            any(target_os = "android", target_os = "fuchsia", target_os = "linux")
+        )))
+    )]
+    pub fn cork(&self) -> io::Result<bool> {
+        unsafe {
+            getsockopt::<Bool>(self.as_raw(), libc::IPPROTO_TCP, libc::TCP_CORK)
+                .map(|cork| cork != 0)
+        }
+    }
+
+    /// Set the value of the `TCP_CORK` option on this socket.
+    ///
+    /// If set, don't send out partial frames. All queued partial frames are
+    /// sent when the option is cleared again. There is a 200 millisecond ceiling on
+    /// the time for which output is corked by `TCP_CORK`. If this ceiling is reached,
+    /// then queued data is automatically transmitted.
+    #[cfg(all(
+        feature = "all",
+        any(target_os = "android", target_os = "fuchsia", target_os = "linux")
+    ))]
+    #[cfg_attr(
+        docsrs,
+        doc(cfg(all(
+            feature = "all",
+            any(target_os = "android", target_os = "fuchsia", target_os = "linux")
+        )))
+    )]
+    pub fn set_cork(&self, cork: bool) -> io::Result<()> {
+        unsafe {
+            setsockopt(
+                self.as_raw(),
+                libc::IPPROTO_TCP,
+                libc::TCP_CORK,
+                cork as c_int,
+            )
+        }
+    }
+
+    /// Get the value of the `TCP_QUICKACK` option on this socket.
+    ///
+    /// For more information about this option, see [`set_quickack`].
+    ///
+    /// [`set_quickack`]: crate::Socket::set_quickack
+    #[cfg(all(
+        feature = "all",
+        any(target_os = "android", target_os = "fuchsia", target_os = "linux")
+    ))]
+    #[cfg_attr(
+        docsrs,
+        doc(cfg(all(
+            feature = "all",
+            any(target_os = "android", target_os = "fuchsia", target_os = "linux")
+        )))
+    )]
+    pub fn quickack(&self) -> io::Result<bool> {
+        unsafe {
+            getsockopt::<Bool>(self.as_raw(), libc::IPPROTO_TCP, libc::TCP_QUICKACK)
+                .map(|quickack| quickack != 0)
+        }
+    }
+
+    /// Set the value of the `TCP_QUICKACK` option on this socket.
+    ///
+    /// If set, acks are sent immediately, rather than delayed if needed in accordance to normal
+    /// TCP operation. This flag is not permanent, it only enables a switch to or from quickack mode.
+    /// Subsequent operation of the TCP protocol will once again enter/leave quickack mode depending on
+    /// internal protocol processing and factors such as delayed ack timeouts occurring and data transfer.
+    #[cfg(all(
+        feature = "all",
+        any(target_os = "android", target_os = "fuchsia", target_os = "linux")
+    ))]
+    #[cfg_attr(
+        docsrs,
+        doc(cfg(all(
+            feature = "all",
+            any(target_os = "android", target_os = "fuchsia", target_os = "linux")
+        )))
+    )]
+    pub fn set_quickack(&self, quickack: bool) -> io::Result<()> {
+        unsafe {
+            setsockopt(
+                self.as_raw(),
+                libc::IPPROTO_TCP,
+                libc::TCP_QUICKACK,
+                quickack as c_int,
+            )
+        }
+    }
+
+    /// Get the value of the `TCP_THIN_LINEAR_TIMEOUTS` option on this socket.
+    ///
+    /// For more information about this option, see [`set_thin_linear_timeouts`].
+    ///
+    /// [`set_thin_linear_timeouts`]: crate::Socket::set_thin_linear_timeouts
+    #[cfg(all(
+        feature = "all",
+        any(target_os = "android", target_os = "fuchsia", target_os = "linux")
+    ))]
+    #[cfg_attr(
+        docsrs,
+        doc(cfg(all(
+            feature = "all",
+            any(target_os = "android", target_os = "fuchsia", target_os = "linux")
+        )))
+    )]
+    pub fn thin_linear_timeouts(&self) -> io::Result<bool> {
+        unsafe {
+            getsockopt::<Bool>(
+                self.as_raw(),
+                libc::IPPROTO_TCP,
+                libc::TCP_THIN_LINEAR_TIMEOUTS,
+            )
+            .map(|timeouts| timeouts != 0)
+        }
+    }
+
+    /// Set the value of the `TCP_THIN_LINEAR_TIMEOUTS` option on this socket.
+    ///
+    /// If set, the kernel will dynamically detect a thin-stream connection if there are less than four packets in flight.
+    /// With less than four packets in flight the normal TCP fast retransmission will not be effective.
+    /// The kernel will modify the retransmission to avoid the very high latencies that thin stream suffer because of exponential backoff.
+    #[cfg(all(
+        feature = "all",
+        any(target_os = "android", target_os = "fuchsia", target_os = "linux")
+    ))]
+    #[cfg_attr(
+        docsrs,
+        doc(cfg(all(
+            feature = "all",
+            any(target_os = "android", target_os = "fuchsia", target_os = "linux")
+        )))
+    )]
+    pub fn set_thin_linear_timeouts(&self, timeouts: bool) -> io::Result<()> {
+        unsafe {
+            setsockopt(
+                self.as_raw(),
+                libc::IPPROTO_TCP,
+                libc::TCP_THIN_LINEAR_TIMEOUTS,
+                timeouts as c_int,
+            )
+        }
+    }
+
+    /// Gets the value for the `SO_BINDTODEVICE` option on this socket.
+    ///
+    /// This value gets the socket binded device's interface name.
+    #[cfg(all(
+        feature = "all",
+        any(target_os = "android", target_os = "fuchsia", target_os = "linux")
+    ))]
+    #[cfg_attr(
+        docsrs,
+        doc(cfg(all(
+            feature = "all",
+            any(target_os = "android", target_os = "fuchsia", target_os = "linux")
+        )))
+    )]
+    pub fn device(&self) -> io::Result<Option<Vec<u8>>> {
+        // TODO: replace with `MaybeUninit::uninit_array` once stable.
+        let mut buf: [MaybeUninit<u8>; libc::IFNAMSIZ] =
+            unsafe { MaybeUninit::uninit().assume_init() };
+        let mut len = buf.len() as libc::socklen_t;
+        syscall!(getsockopt(
+            self.as_raw(),
+            libc::SOL_SOCKET,
+            libc::SO_BINDTODEVICE,
+            buf.as_mut_ptr().cast(),
+            &mut len,
+        ))?;
+        if len == 0 {
+            Ok(None)
+        } else {
+            let buf = &buf[..len as usize - 1];
+            // TODO: use `MaybeUninit::slice_assume_init_ref` once stable.
+            Ok(Some(unsafe { &*(buf as *const [_] as *const [u8]) }.into()))
+        }
+    }
+
+    /// Sets the value for the `SO_BINDTODEVICE` option on this socket.
+    ///
+    /// If a socket is bound to an interface, only packets received from that
+    /// particular interface are processed by the socket. Note that this only
+    /// works for some socket types, particularly `AF_INET` sockets.
+    ///
+    /// If `interface` is `None` or an empty string it removes the binding.
+    #[cfg(all(
+        feature = "all",
+        any(target_os = "android", target_os = "fuchsia", target_os = "linux")
+    ))]
+    #[cfg_attr(
+        docsrs,
+        doc(cfg(all(
+            feature = "all",
+            any(target_os = "android", target_os = "fuchsia", target_os = "linux")
+        )))
+    )]
+    pub fn bind_device(&self, interface: Option<&[u8]>) -> io::Result<()> {
+        let (value, len) = if let Some(interface) = interface {
+            (interface.as_ptr(), interface.len())
+        } else {
+            (ptr::null(), 0)
+        };
+        syscall!(setsockopt(
+            self.as_raw(),
+            libc::SOL_SOCKET,
+            libc::SO_BINDTODEVICE,
+            value.cast(),
+            len as libc::socklen_t,
+        ))
+        .map(|_| ())
+    }
+
+    /// Sets the value for the `SO_SETFIB` option on this socket.
+    ///
+    /// Bind socket to the specified forwarding table (VRF) on a FreeBSD.
+    #[cfg(all(feature = "all", target_os = "freebsd"))]
+    #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_os = "freebsd"))))]
+    pub fn set_fib(&self, fib: u32) -> io::Result<()> {
+        syscall!(setsockopt(
+            self.as_raw(),
+            libc::SOL_SOCKET,
+            libc::SO_SETFIB,
+            (&fib as *const u32).cast(),
+            mem::size_of::<u32>() as libc::socklen_t,
+        ))
+        .map(|_| ())
+    }
+
+    /// This method is deprecated, use [`crate::Socket::bind_device_by_index_v4`].
+    #[cfg(all(
+        feature = "all",
+        any(
+            target_os = "ios",
+            target_os = "macos",
+            target_os = "tvos",
+            target_os = "watchos",
+        )
+    ))]
+    #[cfg_attr(
+        docsrs,
+        doc(cfg(all(
+            feature = "all",
+            any(
+                target_os = "ios",
+                target_os = "macos",
+                target_os = "tvos",
+                target_os = "watchos",
+            )
+        )))
+    )]
+    #[deprecated = "Use `Socket::bind_device_by_index_v4` instead"]
+    pub fn bind_device_by_index(&self, interface: Option<NonZeroU32>) -> io::Result<()> {
+        self.bind_device_by_index_v4(interface)
+    }
+
+    /// Sets the value for `IP_BOUND_IF` option on this socket.
+    ///
+    /// If a socket is bound to an interface, only packets received from that
+    /// particular interface are processed by the socket.
+    ///
+    /// If `interface` is `None`, the binding is removed. If the `interface`
+    /// index is not valid, an error is returned.
+    ///
+    /// One can use [`libc::if_nametoindex`] to convert an interface alias to an
+    /// index.
+    #[cfg(all(
+        feature = "all",
+        any(
+            target_os = "ios",
+            target_os = "macos",
+            target_os = "tvos",
+            target_os = "watchos",
+        )
+    ))]
+    #[cfg_attr(
+        docsrs,
+        doc(cfg(all(
+            feature = "all",
+            any(
+                target_os = "ios",
+                target_os = "macos",
+                target_os = "tvos",
+                target_os = "watchos",
+            )
+        )))
+    )]
+    pub fn bind_device_by_index_v4(&self, interface: Option<NonZeroU32>) -> io::Result<()> {
+        let index = interface.map_or(0, NonZeroU32::get);
+        unsafe { setsockopt(self.as_raw(), IPPROTO_IP, libc::IP_BOUND_IF, index) }
+    }
+
+    /// Sets the value for `IPV6_BOUND_IF` option on this socket.
+    ///
+    /// If a socket is bound to an interface, only packets received from that
+    /// particular interface are processed by the socket.
+    ///
+    /// If `interface` is `None`, the binding is removed. If the `interface`
+    /// index is not valid, an error is returned.
+    ///
+    /// One can use [`libc::if_nametoindex`] to convert an interface alias to an
+    /// index.
+    #[cfg(all(
+        feature = "all",
+        any(
+            target_os = "ios",
+            target_os = "macos",
+            target_os = "tvos",
+            target_os = "watchos",
+        )
+    ))]
+    #[cfg_attr(
+        docsrs,
+        doc(cfg(all(
+            feature = "all",
+            any(
+                target_os = "ios",
+                target_os = "macos",
+                target_os = "tvos",
+                target_os = "watchos",
+            )
+        )))
+    )]
+    pub fn bind_device_by_index_v6(&self, interface: Option<NonZeroU32>) -> io::Result<()> {
+        let index = interface.map_or(0, NonZeroU32::get);
+        unsafe { setsockopt(self.as_raw(), IPPROTO_IPV6, libc::IPV6_BOUND_IF, index) }
+    }
+
+    /// Gets the value for `IP_BOUND_IF` option on this socket, i.e. the index
+    /// for the interface to which the socket is bound.
+    ///
+    /// Returns `None` if the socket is not bound to any interface, otherwise
+    /// returns an interface index.
+    #[cfg(all(
+        feature = "all",
+        any(
+            target_os = "ios",
+            target_os = "macos",
+            target_os = "tvos",
+            target_os = "watchos",
+        )
+    ))]
+    #[cfg_attr(
+        docsrs,
+        doc(cfg(all(
+            feature = "all",
+            any(
+                target_os = "ios",
+                target_os = "macos",
+                target_os = "tvos",
+                target_os = "watchos",
+            )
+        )))
+    )]
+    pub fn device_index_v4(&self) -> io::Result<Option<NonZeroU32>> {
+        let index =
+            unsafe { getsockopt::<libc::c_uint>(self.as_raw(), IPPROTO_IP, libc::IP_BOUND_IF)? };
+        Ok(NonZeroU32::new(index))
+    }
+
+    /// This method is deprecated, use [`crate::Socket::device_index_v4`].
+    #[cfg(all(
+        feature = "all",
+        any(
+            target_os = "ios",
+            target_os = "macos",
+            target_os = "tvos",
+            target_os = "watchos",
+        )
+    ))]
+    #[cfg_attr(
+        docsrs,
+        doc(cfg(all(
+            feature = "all",
+            any(
+                target_os = "ios",
+                target_os = "macos",
+                target_os = "tvos",
+                target_os = "watchos",
+            )
+        )))
+    )]
+    #[deprecated = "Use `Socket::device_index_v4` instead"]
+    pub fn device_index(&self) -> io::Result<Option<NonZeroU32>> {
+        self.device_index_v4()
+    }
+
+    /// Gets the value for `IPV6_BOUND_IF` option on this socket, i.e. the index
+    /// for the interface to which the socket is bound.
+    ///
+    /// Returns `None` if the socket is not bound to any interface, otherwise
+    /// returns an interface index.
+    #[cfg(all(
+        feature = "all",
+        any(
+            target_os = "ios",
+            target_os = "macos",
+            target_os = "tvos",
+            target_os = "watchos",
+        )
+    ))]
+    #[cfg_attr(
+        docsrs,
+        doc(cfg(all(
+            feature = "all",
+            any(
+                target_os = "ios",
+                target_os = "macos",
+                target_os = "tvos",
+                target_os = "watchos",
+            )
+        )))
+    )]
+    pub fn device_index_v6(&self) -> io::Result<Option<NonZeroU32>> {
+        let index = unsafe {
+            getsockopt::<libc::c_uint>(self.as_raw(), IPPROTO_IPV6, libc::IPV6_BOUND_IF)?
+        };
+        Ok(NonZeroU32::new(index))
+    }
+
+    /// Get the value of the `SO_INCOMING_CPU` option on this socket.
+    ///
+    /// For more information about this option, see [`set_cpu_affinity`].
+    ///
+    /// [`set_cpu_affinity`]: crate::Socket::set_cpu_affinity
+    #[cfg(all(feature = "all", target_os = "linux"))]
+    #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_os = "linux"))))]
+    pub fn cpu_affinity(&self) -> io::Result<usize> {
+        unsafe {
+            getsockopt::<c_int>(self.as_raw(), libc::SOL_SOCKET, libc::SO_INCOMING_CPU)
+                .map(|cpu| cpu as usize)
+        }
+    }
+
+    /// Set value for the `SO_INCOMING_CPU` option on this socket.
+    ///
+    /// Sets the CPU affinity of the socket.
+    #[cfg(all(feature = "all", target_os = "linux"))]
+    #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_os = "linux"))))]
+    pub fn set_cpu_affinity(&self, cpu: usize) -> io::Result<()> {
+        unsafe {
+            setsockopt(
+                self.as_raw(),
+                libc::SOL_SOCKET,
+                libc::SO_INCOMING_CPU,
+                cpu as c_int,
+            )
+        }
+    }
+
+    /// Get the value of the `SO_REUSEPORT` option on this socket.
+    ///
+    /// For more information about this option, see [`set_reuse_port`].
+    ///
+    /// [`set_reuse_port`]: crate::Socket::set_reuse_port
+    #[cfg(all(
+        feature = "all",
+        not(any(target_os = "solaris", target_os = "illumos"))
+    ))]
+    #[cfg_attr(
+        docsrs,
+        doc(cfg(all(
+            feature = "all",
+            unix,
+            not(any(target_os = "solaris", target_os = "illumos"))
+        )))
+    )]
+    pub fn reuse_port(&self) -> io::Result<bool> {
+        unsafe {
+            getsockopt::<c_int>(self.as_raw(), libc::SOL_SOCKET, libc::SO_REUSEPORT)
+                .map(|reuse| reuse != 0)
+        }
+    }
+
+    /// Set value for the `SO_REUSEPORT` option on this socket.
+    ///
+    /// This indicates that further calls to `bind` may allow reuse of local
+    /// addresses. For IPv4 sockets this means that a socket may bind even when
+    /// there's a socket already listening on this port.
+    #[cfg(all(
+        feature = "all",
+        not(any(target_os = "solaris", target_os = "illumos"))
+    ))]
+    #[cfg_attr(
+        docsrs,
+        doc(cfg(all(
+            feature = "all",
+            unix,
+            not(any(target_os = "solaris", target_os = "illumos"))
+        )))
+    )]
+    pub fn set_reuse_port(&self, reuse: bool) -> io::Result<()> {
+        unsafe {
+            setsockopt(
+                self.as_raw(),
+                libc::SOL_SOCKET,
+                libc::SO_REUSEPORT,
+                reuse as c_int,
+            )
+        }
+    }
+
+    /// Get the value of the `SO_REUSEPORT_LB` option on this socket.
+    ///
+    /// For more information about this option, see [`set_reuse_port_lb`].
+    ///
+    /// [`set_reuse_port_lb`]: crate::Socket::set_reuse_port_lb
+    #[cfg(all(feature = "all", target_os = "freebsd"))]
+    pub fn reuse_port_lb(&self) -> io::Result<bool> {
+        unsafe {
+            getsockopt::<c_int>(self.as_raw(), libc::SOL_SOCKET, libc::SO_REUSEPORT_LB)
+                .map(|reuse| reuse != 0)
+        }
+    }
+
+    /// Set value for the `SO_REUSEPORT_LB` option on this socket.
+    ///
+    /// This allows multiple programs or threads to bind to the same port and
+    /// incoming connections will be load balanced using a hash function.
+    #[cfg(all(feature = "all", target_os = "freebsd"))]
+    pub fn set_reuse_port_lb(&self, reuse: bool) -> io::Result<()> {
+        unsafe {
+            setsockopt(
+                self.as_raw(),
+                libc::SOL_SOCKET,
+                libc::SO_REUSEPORT_LB,
+                reuse as c_int,
+            )
+        }
+    }
+
+    /// Get the value of the `IP_FREEBIND` option on this socket.
+    ///
+    /// For more information about this option, see [`set_freebind`].
+    ///
+    /// [`set_freebind`]: crate::Socket::set_freebind
+    #[cfg(all(
+        feature = "all",
+        any(target_os = "android", target_os = "fuchsia", target_os = "linux")
+    ))]
+    #[cfg_attr(
+        docsrs,
+        doc(cfg(all(
+            feature = "all",
+            any(target_os = "android", target_os = "fuchsia", target_os = "linux")
+        )))
+    )]
+    pub fn freebind(&self) -> io::Result<bool> {
+        unsafe {
+            getsockopt::<c_int>(self.as_raw(), libc::SOL_IP, libc::IP_FREEBIND)
+                .map(|freebind| freebind != 0)
+        }
+    }
+
+    /// Set value for the `IP_FREEBIND` option on this socket.
+    ///
+    /// If enabled, this boolean option allows binding to an IP address that is
+    /// nonlocal or does not (yet) exist.  This permits listening on a socket,
+    /// without requiring the underlying network interface or the specified
+    /// dynamic IP address to be up at the time that the application is trying
+    /// to bind to it.
+    #[cfg(all(
+        feature = "all",
+        any(target_os = "android", target_os = "fuchsia", target_os = "linux")
+    ))]
+    #[cfg_attr(
+        docsrs,
+        doc(cfg(all(
+            feature = "all",
+            any(target_os = "android", target_os = "fuchsia", target_os = "linux")
+        )))
+    )]
+    pub fn set_freebind(&self, freebind: bool) -> io::Result<()> {
+        unsafe {
+            setsockopt(
+                self.as_raw(),
+                libc::SOL_IP,
+                libc::IP_FREEBIND,
+                freebind as c_int,
+            )
+        }
+    }
+
+    /// Get the value of the `IPV6_FREEBIND` option on this socket.
+    ///
+    /// This is an IPv6 counterpart of `IP_FREEBIND` socket option on
+    /// Android/Linux. For more information about this option, see
+    /// [`set_freebind`].
+    ///
+    /// [`set_freebind`]: crate::Socket::set_freebind
+    #[cfg(all(feature = "all", any(target_os = "android", target_os = "linux")))]
+    #[cfg_attr(
+        docsrs,
+        doc(cfg(all(feature = "all", any(target_os = "android", target_os = "linux"))))
+    )]
+    pub fn freebind_ipv6(&self) -> io::Result<bool> {
+        unsafe {
+            getsockopt::<c_int>(self.as_raw(), libc::SOL_IPV6, libc::IPV6_FREEBIND)
+                .map(|freebind| freebind != 0)
+        }
+    }
+
+    /// Set value for the `IPV6_FREEBIND` option on this socket.
+    ///
+    /// This is an IPv6 counterpart of `IP_FREEBIND` socket option on
+    /// Android/Linux. For more information about this option, see
+    /// [`set_freebind`].
+    ///
+    /// [`set_freebind`]: crate::Socket::set_freebind
+    ///
+    /// # Examples
+    ///
+    /// On Linux:
+    ///
+    /// ```
+    /// use socket2::{Domain, Socket, Type};
+    /// use std::io::{self, Error, ErrorKind};
+    ///
+    /// fn enable_freebind(socket: &Socket) -> io::Result<()> {
+    ///     match socket.domain()? {
+    ///         Domain::IPV4 => socket.set_freebind(true)?,
+    ///         Domain::IPV6 => socket.set_freebind_ipv6(true)?,
+    ///         _ => return Err(Error::new(ErrorKind::Other, "unsupported domain")),
+    ///     };
+    ///     Ok(())
+    /// }
+    ///
+    /// # fn main() -> io::Result<()> {
+    /// #     let socket = Socket::new(Domain::IPV6, Type::STREAM, None)?;
+    /// #     enable_freebind(&socket)
+    /// # }
+    /// ```
+    #[cfg(all(feature = "all", any(target_os = "android", target_os = "linux")))]
+    #[cfg_attr(
+        docsrs,
+        doc(cfg(all(feature = "all", any(target_os = "android", target_os = "linux"))))
+    )]
+    pub fn set_freebind_ipv6(&self, freebind: bool) -> io::Result<()> {
+        unsafe {
+            setsockopt(
+                self.as_raw(),
+                libc::SOL_IPV6,
+                libc::IPV6_FREEBIND,
+                freebind as c_int,
+            )
+        }
+    }
+
+    /// Get the value for the `SO_ORIGINAL_DST` option on this socket.
+    ///
+    /// This value contains the original destination IPv4 address of the connection
+    /// redirected using `iptables` `REDIRECT` or `TPROXY`.
+    #[cfg(all(
+        feature = "all",
+        any(target_os = "android", target_os = "fuchsia", target_os = "linux")
+    ))]
+    #[cfg_attr(
+        docsrs,
+        doc(cfg(all(
+            feature = "all",
+            any(target_os = "android", target_os = "fuchsia", target_os = "linux")
+        )))
+    )]
+    pub fn original_dst(&self) -> io::Result<SockAddr> {
+        // Safety: `getsockopt` initialises the `SockAddr` for us.
+        unsafe {
+            SockAddr::try_init(|storage, len| {
+                syscall!(getsockopt(
+                    self.as_raw(),
+                    libc::SOL_IP,
+                    libc::SO_ORIGINAL_DST,
+                    storage.cast(),
+                    len
+                ))
+            })
+        }
+        .map(|(_, addr)| addr)
+    }
+
+    /// Get the value for the `IP6T_SO_ORIGINAL_DST` option on this socket.
+    ///
+    /// This value contains the original destination IPv6 address of the connection
+    /// redirected using `ip6tables` `REDIRECT` or `TPROXY`.
+    #[cfg(all(feature = "all", any(target_os = "android", target_os = "linux")))]
+    #[cfg_attr(
+        docsrs,
+        doc(cfg(all(feature = "all", any(target_os = "android", target_os = "linux"))))
+    )]
+    pub fn original_dst_ipv6(&self) -> io::Result<SockAddr> {
+        // Safety: `getsockopt` initialises the `SockAddr` for us.
+        unsafe {
+            SockAddr::try_init(|storage, len| {
+                syscall!(getsockopt(
+                    self.as_raw(),
+                    libc::SOL_IPV6,
+                    libc::IP6T_SO_ORIGINAL_DST,
+                    storage.cast(),
+                    len
+                ))
+            })
+        }
+        .map(|(_, addr)| addr)
+    }
+
+    /// Copies data between a `file` and this socket using the `sendfile(2)`
+    /// system call. Because this copying is done within the kernel,
+    /// `sendfile()` is more efficient than the combination of `read(2)` and
+    /// `write(2)`, which would require transferring data to and from user
+    /// space.
+    ///
+    /// Different OSs support different kinds of `file`s, see the OS
+    /// documentation for what kind of files are supported. Generally *regular*
+    /// files are supported by all OSs.
+    #[doc = man_links!(unix: sendfile(2))]
+    ///
+    /// The `offset` is the absolute offset into the `file` to use as starting
+    /// point.
+    ///
+    /// Depending on the OS this function *may* change the offset of `file`. For
+    /// the best results reset the offset of the file before using it again.
+    ///
+    /// The `length` determines how many bytes to send, where a length of `None`
+    /// means it will try to send all bytes.
+    #[cfg(all(
+        feature = "all",
+        any(
+            target_os = "aix",
+            target_os = "android",
+            target_os = "freebsd",
+            target_os = "ios",
+            target_os = "linux",
+            target_os = "macos",
+            target_os = "tvos",
+            target_os = "watchos",
+        )
+    ))]
+    #[cfg_attr(
+        docsrs,
+        doc(cfg(all(
+            feature = "all",
+            any(
+                target_os = "aix",
+                target_os = "android",
+                target_os = "freebsd",
+                target_os = "ios",
+                target_os = "linux",
+                target_os = "macos",
+                target_os = "tvos",
+                target_os = "watchos",
+            )
+        )))
+    )]
+    pub fn sendfile<F>(
+        &self,
+        file: &F,
+        offset: usize,
+        length: Option<NonZeroUsize>,
+    ) -> io::Result<usize>
+    where
+        F: AsRawFd,
+    {
+        self._sendfile(file.as_raw_fd(), offset as _, length)
+    }
+
+    #[cfg(all(
+        feature = "all",
+        any(
+            target_os = "ios",
+            target_os = "macos",
+            target_os = "tvos",
+            target_os = "watchos",
+        )
+    ))]
+    fn _sendfile(
+        &self,
+        file: RawFd,
+        offset: libc::off_t,
+        length: Option<NonZeroUsize>,
+    ) -> io::Result<usize> {
+        // On macOS `length` is value-result parameter. It determines the number
+        // of bytes to write and returns the number of bytes written.
+        let mut length = match length {
+            Some(n) => n.get() as libc::off_t,
+            // A value of `0` means send all bytes.
+            None => 0,
+        };
+        syscall!(sendfile(
+            file,
+            self.as_raw(),
+            offset,
+            &mut length,
+            ptr::null_mut(),
+            0,
+        ))
+        .map(|_| length as usize)
+    }
+
+    #[cfg(all(feature = "all", any(target_os = "android", target_os = "linux")))]
+    fn _sendfile(
+        &self,
+        file: RawFd,
+        offset: libc::off_t,
+        length: Option<NonZeroUsize>,
+    ) -> io::Result<usize> {
+        let count = match length {
+            Some(n) => n.get() as libc::size_t,
+            // The maximum the Linux kernel will write in a single call.
+            None => 0x7ffff000, // 2,147,479,552 bytes.
+        };
+        let mut offset = offset;
+        syscall!(sendfile(self.as_raw(), file, &mut offset, count)).map(|n| n as usize)
+    }
+
+    #[cfg(all(feature = "all", target_os = "freebsd"))]
+    fn _sendfile(
+        &self,
+        file: RawFd,
+        offset: libc::off_t,
+        length: Option<NonZeroUsize>,
+    ) -> io::Result<usize> {
+        let nbytes = match length {
+            Some(n) => n.get() as libc::size_t,
+            // A value of `0` means send all bytes.
+            None => 0,
+        };
+        let mut sbytes: libc::off_t = 0;
+        syscall!(sendfile(
+            file,
+            self.as_raw(),
+            offset,
+            nbytes,
+            ptr::null_mut(),
+            &mut sbytes,
+            0,
+        ))
+        .map(|_| sbytes as usize)
+    }
+
+    #[cfg(all(feature = "all", target_os = "aix"))]
+    fn _sendfile(
+        &self,
+        file: RawFd,
+        offset: libc::off_t,
+        length: Option<NonZeroUsize>,
+    ) -> io::Result<usize> {
+        let nbytes = match length {
+            Some(n) => n.get() as i64,
+            None => -1,
+        };
+        let mut params = libc::sf_parms {
+            header_data: ptr::null_mut(),
+            header_length: 0,
+            file_descriptor: file,
+            file_size: 0,
+            file_offset: offset as u64,
+            file_bytes: nbytes,
+            trailer_data: ptr::null_mut(),
+            trailer_length: 0,
+            bytes_sent: 0,
+        };
+        // AIX doesn't support SF_REUSE, socket will be closed after successful transmission.
+        syscall!(send_file(
+            &mut self.as_raw() as *mut _,
+            &mut params as *mut _,
+            libc::SF_CLOSE as libc::c_uint,
+        ))
+        .map(|_| params.bytes_sent as usize)
+    }
+
+    /// Set the value of the `TCP_USER_TIMEOUT` option on this socket.
+    ///
+    /// If set, this specifies the maximum amount of time that transmitted data may remain
+    /// unacknowledged or buffered data may remain untransmitted before TCP will forcibly close the
+    /// corresponding connection.
+    ///
+    /// Setting `timeout` to `None` or a zero duration causes the system default timeouts to
+    /// be used. If `timeout` in milliseconds is larger than `c_uint::MAX`, the timeout is clamped
+    /// to `c_uint::MAX`. For example, when `c_uint` is a 32-bit value, this limits the timeout to
+    /// approximately 49.71 days.
+    #[cfg(all(
+        feature = "all",
+        any(target_os = "android", target_os = "fuchsia", target_os = "linux")
+    ))]
+    #[cfg_attr(
+        docsrs,
+        doc(cfg(all(
+            feature = "all",
+            any(target_os = "android", target_os = "fuchsia", target_os = "linux")
+        )))
+    )]
+    pub fn set_tcp_user_timeout(&self, timeout: Option<Duration>) -> io::Result<()> {
+        let timeout = timeout.map_or(0, |to| {
+            min(to.as_millis(), libc::c_uint::MAX as u128) as libc::c_uint
+        });
+        unsafe {
+            setsockopt(
+                self.as_raw(),
+                libc::IPPROTO_TCP,
+                libc::TCP_USER_TIMEOUT,
+                timeout,
+            )
+        }
+    }
+
+    /// Get the value of the `TCP_USER_TIMEOUT` option on this socket.
+    ///
+    /// For more information about this option, see [`set_tcp_user_timeout`].
+    ///
+    /// [`set_tcp_user_timeout`]: crate::Socket::set_tcp_user_timeout
+    #[cfg(all(
+        feature = "all",
+        any(target_os = "android", target_os = "fuchsia", target_os = "linux")
+    ))]
+    #[cfg_attr(
+        docsrs,
+        doc(cfg(all(
+            feature = "all",
+            any(target_os = "android", target_os = "fuchsia", target_os = "linux")
+        )))
+    )]
+    pub fn tcp_user_timeout(&self) -> io::Result<Option<Duration>> {
+        unsafe {
+            getsockopt::<libc::c_uint>(self.as_raw(), libc::IPPROTO_TCP, libc::TCP_USER_TIMEOUT)
+                .map(|millis| {
+                    if millis == 0 {
+                        None
+                    } else {
+                        Some(Duration::from_millis(millis as u64))
+                    }
+                })
+        }
+    }
+
+    /// Attach Berkeley Packet Filter(BPF) on this socket.
+    ///
+    /// BPF allows a user-space program to attach a filter onto any socket
+    /// and allow or disallow certain types of data to come through the socket.
+    ///
+    /// For more information about this option, see [filter](https://www.kernel.org/doc/html/v5.12/networking/filter.html)
+    #[cfg(all(feature = "all", any(target_os = "linux", target_os = "android")))]
+    pub fn attach_filter(&self, filters: &[libc::sock_filter]) -> io::Result<()> {
+        let prog = libc::sock_fprog {
+            len: filters.len() as u16,
+            filter: filters.as_ptr() as *mut _,
+        };
+
+        unsafe {
+            setsockopt(
+                self.as_raw(),
+                libc::SOL_SOCKET,
+                libc::SO_ATTACH_FILTER,
+                prog,
+            )
+        }
+    }
+
+    /// Detach Berkeley Packet Filter(BPF) from this socket.
+    ///
+    /// For more information about this option, see [`attach_filter`]
+    ///
+    /// [`attach_filter`]: crate::Socket::attach_filter
+    #[cfg(all(feature = "all", any(target_os = "linux", target_os = "android")))]
+    pub fn detach_filter(&self) -> io::Result<()> {
+        unsafe { setsockopt(self.as_raw(), libc::SOL_SOCKET, libc::SO_DETACH_FILTER, 0) }
+    }
+
+    /// Gets the value for the `SO_COOKIE` option on this socket.
+    ///
+    /// The socket cookie is a unique, kernel-managed identifier tied to each socket.
+    /// Therefore, there is no corresponding `set` helper.
+    ///
+    /// For more information about this option, see [Linux patch](https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=5daab9db7b65df87da26fd8cfa695fb9546a1ddb)
+    #[cfg(all(feature = "all", target_os = "linux"))]
+    #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_os = "linux"))))]
+    pub fn cookie(&self) -> io::Result<u64> {
+        unsafe { getsockopt::<libc::c_ulonglong>(self.as_raw(), libc::SOL_SOCKET, libc::SO_COOKIE) }
+    }
+
+    /// Get the value of the `IPV6_TCLASS` option for this socket.
+    ///
+    /// For more information about this option, see [`set_tclass_v6`].
+    ///
+    /// [`set_tclass_v6`]: crate::Socket::set_tclass_v6
+    #[cfg(all(
+        feature = "all",
+        any(
+            target_os = "android",
+            target_os = "dragonfly",
+            target_os = "freebsd",
+            target_os = "fuchsia",
+            target_os = "linux",
+            target_os = "macos",
+            target_os = "netbsd",
+            target_os = "openbsd"
+        )
+    ))]
+    #[cfg_attr(
+        docsrs,
+        doc(cfg(all(
+            feature = "all",
+            any(
+                target_os = "android",
+                target_os = "dragonfly",
+                target_os = "freebsd",
+                target_os = "fuchsia",
+                target_os = "linux",
+                target_os = "macos",
+                target_os = "netbsd",
+                target_os = "openbsd"
+            )
+        )))
+    )]
+    pub fn tclass_v6(&self) -> io::Result<u32> {
+        unsafe {
+            getsockopt::<c_int>(self.as_raw(), IPPROTO_IPV6, libc::IPV6_TCLASS)
+                .map(|tclass| tclass as u32)
+        }
+    }
+
+    /// Set the value of the `IPV6_TCLASS` option for this socket.
+    ///
+    /// Specifies the traffic class field that is used in every packets
+    /// sent from this socket.
+    #[cfg(all(
+        feature = "all",
+        any(
+            target_os = "android",
+            target_os = "dragonfly",
+            target_os = "freebsd",
+            target_os = "fuchsia",
+            target_os = "linux",
+            target_os = "macos",
+            target_os = "netbsd",
+            target_os = "openbsd"
+        )
+    ))]
+    #[cfg_attr(
+        docsrs,
+        doc(cfg(all(
+            feature = "all",
+            any(
+                target_os = "android",
+                target_os = "dragonfly",
+                target_os = "freebsd",
+                target_os = "fuchsia",
+                target_os = "linux",
+                target_os = "macos",
+                target_os = "netbsd",
+                target_os = "openbsd"
+            )
+        )))
+    )]
+    pub fn set_tclass_v6(&self, tclass: u32) -> io::Result<()> {
+        unsafe {
+            setsockopt(
+                self.as_raw(),
+                IPPROTO_IPV6,
+                libc::IPV6_TCLASS,
+                tclass as c_int,
+            )
+        }
+    }
+
+    /// Get the value of the `TCP_CONGESTION` option for this socket.
+    ///
+    /// For more information about this option, see [`set_tcp_congestion`].
+    ///
+    /// [`set_tcp_congestion`]: crate::Socket::set_tcp_congestion
+    #[cfg(all(feature = "all", any(target_os = "freebsd", target_os = "linux")))]
+    #[cfg_attr(
+        docsrs,
+        doc(cfg(all(feature = "all", any(target_os = "freebsd", target_os = "linux"))))
+    )]
+    pub fn tcp_congestion(&self) -> io::Result<Vec<u8>> {
+        let mut payload: [u8; TCP_CA_NAME_MAX] = [0; TCP_CA_NAME_MAX];
+        let mut len = payload.len() as libc::socklen_t;
+        syscall!(getsockopt(
+            self.as_raw(),
+            IPPROTO_TCP,
+            libc::TCP_CONGESTION,
+            payload.as_mut_ptr().cast(),
+            &mut len,
+        ))
+        .map(|_| payload[..len as usize].to_vec())
+    }
+
+    /// Set the value of the `TCP_CONGESTION` option for this socket.
+    ///
+    /// Specifies the TCP congestion control algorithm to use for this socket.
+    ///
+    /// The value must be a valid TCP congestion control algorithm name of the
+    /// platform. For example, Linux may supports "reno", "cubic".
+    #[cfg(all(feature = "all", any(target_os = "freebsd", target_os = "linux")))]
+    #[cfg_attr(
+        docsrs,
+        doc(cfg(all(feature = "all", any(target_os = "freebsd", target_os = "linux"))))
+    )]
+    pub fn set_tcp_congestion(&self, tcp_ca_name: &[u8]) -> io::Result<()> {
+        syscall!(setsockopt(
+            self.as_raw(),
+            IPPROTO_TCP,
+            libc::TCP_CONGESTION,
+            tcp_ca_name.as_ptr() as *const _,
+            tcp_ca_name.len() as libc::socklen_t,
+        ))
+        .map(|_| ())
+    }
+
+    /// Set value for the `DCCP_SOCKOPT_SERVICE` option on this socket.
+    ///
+    /// Sets the DCCP service. The specification mandates use of service codes.
+    /// If this socket option is not set, the socket will fall back to 0 (which
+    /// means that no meaningful service code is present). On active sockets
+    /// this is set before [`connect`]. On passive sockets up to 32 service
+    /// codes can be set before calling [`bind`]
+    ///
+    /// [`connect`]: crate::Socket::connect
+    /// [`bind`]: crate::Socket::bind
+    #[cfg(all(feature = "all", target_os = "linux"))]
+    #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_os = "linux"))))]
+    pub fn set_dccp_service(&self, code: u32) -> io::Result<()> {
+        unsafe {
+            setsockopt(
+                self.as_raw(),
+                libc::SOL_DCCP,
+                libc::DCCP_SOCKOPT_SERVICE,
+                code,
+            )
+        }
+    }
+
+    /// Get the value of the `DCCP_SOCKOPT_SERVICE` option on this socket.
+    ///
+    /// For more information about this option see [`set_dccp_service`]
+    ///
+    /// [`set_dccp_service`]: crate::Socket::set_dccp_service
+    #[cfg(all(feature = "all", target_os = "linux"))]
+    #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_os = "linux"))))]
+    pub fn dccp_service(&self) -> io::Result<u32> {
+        unsafe { getsockopt(self.as_raw(), libc::SOL_DCCP, libc::DCCP_SOCKOPT_SERVICE) }
+    }
+
+    /// Set value for the `DCCP_SOCKOPT_CCID` option on this socket.
+    ///
+    /// This option sets both the TX and RX CCIDs at the same time.
+    #[cfg(all(feature = "all", target_os = "linux"))]
+    #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_os = "linux"))))]
+    pub fn set_dccp_ccid(&self, ccid: u8) -> io::Result<()> {
+        unsafe { setsockopt(self.as_raw(), libc::SOL_DCCP, libc::DCCP_SOCKOPT_CCID, ccid) }
+    }
+
+    /// Get the value of the `DCCP_SOCKOPT_TX_CCID` option on this socket.
+    ///
+    /// For more information about this option see [`set_dccp_ccid`].
+    ///
+    /// [`set_dccp_ccid`]: crate::Socket::set_dccp_ccid
+    #[cfg(all(feature = "all", target_os = "linux"))]
+    #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_os = "linux"))))]
+    pub fn dccp_tx_ccid(&self) -> io::Result<u32> {
+        unsafe { getsockopt(self.as_raw(), libc::SOL_DCCP, libc::DCCP_SOCKOPT_TX_CCID) }
+    }
+
+    /// Get the value of the `DCCP_SOCKOPT_RX_CCID` option on this socket.
+    ///
+    /// For more information about this option see [`set_dccp_ccid`].
+    ///
+    /// [`set_dccp_ccid`]: crate::Socket::set_dccp_ccid
+    #[cfg(all(feature = "all", target_os = "linux"))]
+    #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_os = "linux"))))]
+    pub fn dccp_xx_ccid(&self) -> io::Result<u32> {
+        unsafe { getsockopt(self.as_raw(), libc::SOL_DCCP, libc::DCCP_SOCKOPT_RX_CCID) }
+    }
+
+    /// Set value for the `DCCP_SOCKOPT_SERVER_TIMEWAIT` option on this socket.
+    ///
+    /// Enables a listening socket to hold timewait state when closing the
+    /// connection. This option must be set after `accept` returns.
+    #[cfg(all(feature = "all", target_os = "linux"))]
+    #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_os = "linux"))))]
+    pub fn set_dccp_server_timewait(&self, hold_timewait: bool) -> io::Result<()> {
+        unsafe {
+            setsockopt(
+                self.as_raw(),
+                libc::SOL_DCCP,
+                libc::DCCP_SOCKOPT_SERVER_TIMEWAIT,
+                hold_timewait as c_int,
+            )
+        }
+    }
+
+    /// Get the value of the `DCCP_SOCKOPT_SERVER_TIMEWAIT` option on this socket.
+    ///
+    /// For more information see [`set_dccp_server_timewait`]
+    ///
+    /// [`set_dccp_server_timewait`]: crate::Socket::set_dccp_server_timewait
+    #[cfg(all(feature = "all", target_os = "linux"))]
+    #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_os = "linux"))))]
+    pub fn dccp_server_timewait(&self) -> io::Result<bool> {
+        unsafe {
+            getsockopt(
+                self.as_raw(),
+                libc::SOL_DCCP,
+                libc::DCCP_SOCKOPT_SERVER_TIMEWAIT,
+            )
+        }
+    }
+
+    /// Set value for the `DCCP_SOCKOPT_SEND_CSCOV` option on this socket.
+    ///
+    /// Both this option and `DCCP_SOCKOPT_RECV_CSCOV` are used for setting the
+    /// partial checksum coverage. The default is that checksums always cover
+    /// the entire packet and that only fully covered application data is
+    /// accepted by the receiver. Hence, when using this feature on the sender,
+    /// it must be enabled at the receiver too, with suitable choice of CsCov.
+    #[cfg(all(feature = "all", target_os = "linux"))]
+    #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_os = "linux"))))]
+    pub fn set_dccp_send_cscov(&self, level: u32) -> io::Result<()> {
+        unsafe {
+            setsockopt(
+                self.as_raw(),
+                libc::SOL_DCCP,
+                libc::DCCP_SOCKOPT_SEND_CSCOV,
+                level,
+            )
+        }
+    }
+
+    /// Get the value of the `DCCP_SOCKOPT_SEND_CSCOV` option on this socket.
+    ///
+    /// For more information on this option see [`set_dccp_send_cscov`].
+    ///
+    /// [`set_dccp_send_cscov`]: crate::Socket::set_dccp_send_cscov
+    #[cfg(all(feature = "all", target_os = "linux"))]
+    #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_os = "linux"))))]
+    pub fn dccp_send_cscov(&self) -> io::Result<u32> {
+        unsafe { getsockopt(self.as_raw(), libc::SOL_DCCP, libc::DCCP_SOCKOPT_SEND_CSCOV) }
+    }
+
+    /// Set the value of the `DCCP_SOCKOPT_RECV_CSCOV` option on this socket.
+    ///
+    /// This option is only useful when combined with [`set_dccp_send_cscov`].
+    ///
+    /// [`set_dccp_send_cscov`]: crate::Socket::set_dccp_send_cscov
+    #[cfg(all(feature = "all", target_os = "linux"))]
+    #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_os = "linux"))))]
+    pub fn set_dccp_recv_cscov(&self, level: u32) -> io::Result<()> {
+        unsafe {
+            setsockopt(
+                self.as_raw(),
+                libc::SOL_DCCP,
+                libc::DCCP_SOCKOPT_RECV_CSCOV,
+                level,
+            )
+        }
+    }
+
+    /// Get the value of the `DCCP_SOCKOPT_RECV_CSCOV` option on this socket.
+    ///
+    /// For more information on this option see [`set_dccp_recv_cscov`].
+    ///
+    /// [`set_dccp_recv_cscov`]: crate::Socket::set_dccp_recv_cscov
+    #[cfg(all(feature = "all", target_os = "linux"))]
+    #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_os = "linux"))))]
+    pub fn dccp_recv_cscov(&self) -> io::Result<u32> {
+        unsafe { getsockopt(self.as_raw(), libc::SOL_DCCP, libc::DCCP_SOCKOPT_RECV_CSCOV) }
+    }
+
+    /// Set value for the `DCCP_SOCKOPT_QPOLICY_TXQLEN` option on this socket.
+    ///
+    /// This option sets the maximum length of the output queue. A zero value is
+    /// interpreted as unbounded queue length.
+    #[cfg(all(feature = "all", target_os = "linux"))]
+    #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_os = "linux"))))]
+    pub fn set_dccp_qpolicy_txqlen(&self, length: u32) -> io::Result<()> {
+        unsafe {
+            setsockopt(
+                self.as_raw(),
+                libc::SOL_DCCP,
+                libc::DCCP_SOCKOPT_QPOLICY_TXQLEN,
+                length,
+            )
+        }
+    }
+
+    /// Get the value of the `DCCP_SOCKOPT_QPOLICY_TXQLEN` on this socket.
+    ///
+    /// For more information on this option see [`set_dccp_qpolicy_txqlen`].
+    ///
+    /// [`set_dccp_qpolicy_txqlen`]: crate::Socket::set_dccp_qpolicy_txqlen
+    #[cfg(all(feature = "all", target_os = "linux"))]
+    #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_os = "linux"))))]
+    pub fn dccp_qpolicy_txqlen(&self) -> io::Result<u32> {
+        unsafe {
+            getsockopt(
+                self.as_raw(),
+                libc::SOL_DCCP,
+                libc::DCCP_SOCKOPT_QPOLICY_TXQLEN,
+            )
+        }
+    }
+
+    /// Get the value of the `DCCP_SOCKOPT_AVAILABLE_CCIDS` option on this socket.
+    ///
+    /// Returns the list of CCIDs supported by the endpoint.
+    ///
+    /// The parameter `N` is used to get the maximum number of supported
+    /// endpoints. The [documentation] recommends a minimum of four at the time
+    /// of writing.
+    ///
+    /// [documentation]: https://www.kernel.org/doc/html/latest/networking/dccp.html
+    #[cfg(all(feature = "all", target_os = "linux"))]
+    #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_os = "linux"))))]
+    pub fn dccp_available_ccids<const N: usize>(&self) -> io::Result<CcidEndpoints<N>> {
+        let mut endpoints = [0; N];
+        let mut length = endpoints.len() as libc::socklen_t;
+        syscall!(getsockopt(
+            self.as_raw(),
+            libc::SOL_DCCP,
+            libc::DCCP_SOCKOPT_AVAILABLE_CCIDS,
+            endpoints.as_mut_ptr().cast(),
+            &mut length,
+        ))?;
+        Ok(CcidEndpoints { endpoints, length })
+    }
+
+    /// Get the value of the `DCCP_SOCKOPT_GET_CUR_MPS` option on this socket.
+    ///
+    /// This option retrieves the current maximum packet size (application
+    /// payload size) in bytes.
+    #[cfg(all(feature = "all", target_os = "linux"))]
+    #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_os = "linux"))))]
+    pub fn dccp_cur_mps(&self) -> io::Result<u32> {
+        unsafe {
+            getsockopt(
+                self.as_raw(),
+                libc::SOL_DCCP,
+                libc::DCCP_SOCKOPT_GET_CUR_MPS,
+            )
+        }
+    }
+}
+
+/// See [`Socket::dccp_available_ccids`].
+#[cfg(all(feature = "all", target_os = "linux"))]
+#[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_os = "linux"))))]
+#[derive(Debug)]
+pub struct CcidEndpoints<const N: usize> {
+    endpoints: [u8; N],
+    length: u32,
+}
+
+#[cfg(all(feature = "all", target_os = "linux"))]
+#[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_os = "linux"))))]
+impl<const N: usize> std::ops::Deref for CcidEndpoints<N> {
+    type Target = [u8];
+
+    fn deref(&self) -> &[u8] {
+        &self.endpoints[0..self.length as usize]
+    }
+}
+
+#[cfg_attr(docsrs, doc(cfg(unix)))]
+impl AsFd for crate::Socket {
+    fn as_fd(&self) -> BorrowedFd<'_> {
+        // SAFETY: lifetime is bound by self.
+        unsafe { BorrowedFd::borrow_raw(self.as_raw()) }
+    }
+}
+
+#[cfg_attr(docsrs, doc(cfg(unix)))]
+impl AsRawFd for crate::Socket {
+    fn as_raw_fd(&self) -> c_int {
+        self.as_raw()
+    }
+}
+
+#[cfg_attr(docsrs, doc(cfg(unix)))]
+impl From<crate::Socket> for OwnedFd {
+    fn from(sock: crate::Socket) -> OwnedFd {
+        // SAFETY: sock.into_raw() always returns a valid fd.
+        unsafe { OwnedFd::from_raw_fd(sock.into_raw()) }
+    }
+}
+
+#[cfg_attr(docsrs, doc(cfg(unix)))]
+impl IntoRawFd for crate::Socket {
+    fn into_raw_fd(self) -> c_int {
+        self.into_raw()
+    }
+}
+
+#[cfg_attr(docsrs, doc(cfg(unix)))]
+impl From<OwnedFd> for crate::Socket {
+    fn from(fd: OwnedFd) -> crate::Socket {
+        // SAFETY: `OwnedFd` ensures the fd is valid.
+        unsafe { crate::Socket::from_raw_fd(fd.into_raw_fd()) }
+    }
+}
+
+#[cfg_attr(docsrs, doc(cfg(unix)))]
+impl FromRawFd for crate::Socket {
+    unsafe fn from_raw_fd(fd: c_int) -> crate::Socket {
+        crate::Socket::from_raw(fd)
+    }
+}
+
+#[cfg(feature = "all")]
+from!(UnixStream, crate::Socket);
+#[cfg(feature = "all")]
+from!(UnixListener, crate::Socket);
+#[cfg(feature = "all")]
+from!(UnixDatagram, crate::Socket);
+#[cfg(feature = "all")]
+from!(crate::Socket, UnixStream);
+#[cfg(feature = "all")]
+from!(crate::Socket, UnixListener);
+#[cfg(feature = "all")]
+from!(crate::Socket, UnixDatagram);
+
+#[test]
+fn in_addr_convertion() {
+    let ip = Ipv4Addr::new(127, 0, 0, 1);
+    let raw = to_in_addr(&ip);
+    // NOTE: `in_addr` is packed on NetBSD and it's unsafe to borrow.
+    let a = raw.s_addr;
+    assert_eq!(a, u32::from_ne_bytes([127, 0, 0, 1]));
+    assert_eq!(from_in_addr(raw), ip);
+
+    let ip = Ipv4Addr::new(127, 34, 4, 12);
+    let raw = to_in_addr(&ip);
+    let a = raw.s_addr;
+    assert_eq!(a, u32::from_ne_bytes([127, 34, 4, 12]));
+    assert_eq!(from_in_addr(raw), ip);
+}
+
+#[test]
+fn in6_addr_convertion() {
+    let ip = Ipv6Addr::new(0x2000, 1, 2, 3, 4, 5, 6, 7);
+    let raw = to_in6_addr(&ip);
+    let want = [32, 0, 0, 1, 0, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7];
+    assert_eq!(raw.s6_addr, want);
+    assert_eq!(from_in6_addr(raw), ip);
+}
diff --git a/crates/socket2/src/sys/windows.rs b/crates/socket2/src/sys/windows.rs
new file mode 100644
index 0000000..4c5d987
--- /dev/null
+++ b/crates/socket2/src/sys/windows.rs
@@ -0,0 +1,1019 @@
+// Copyright 2015 The Rust Project Developers.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::cmp::min;
+use std::io::{self, IoSlice};
+use std::marker::PhantomData;
+use std::mem::{self, size_of, MaybeUninit};
+use std::net::{self, Ipv4Addr, Ipv6Addr, Shutdown};
+use std::os::windows::io::{
+    AsRawSocket, AsSocket, BorrowedSocket, FromRawSocket, IntoRawSocket, OwnedSocket, RawSocket,
+};
+use std::path::Path;
+use std::sync::Once;
+use std::time::{Duration, Instant};
+use std::{process, ptr, slice};
+
+use windows_sys::Win32::Foundation::{SetHandleInformation, HANDLE, HANDLE_FLAG_INHERIT};
+#[cfg(feature = "all")]
+use windows_sys::Win32::Networking::WinSock::SO_PROTOCOL_INFOW;
+use windows_sys::Win32::Networking::WinSock::{
+    self, tcp_keepalive, FIONBIO, IN6_ADDR, IN6_ADDR_0, INVALID_SOCKET, IN_ADDR, IN_ADDR_0,
+    POLLERR, POLLHUP, POLLRDNORM, POLLWRNORM, SD_BOTH, SD_RECEIVE, SD_SEND, SIO_KEEPALIVE_VALS,
+    SOCKET_ERROR, WSABUF, WSAEMSGSIZE, WSAESHUTDOWN, WSAPOLLFD, WSAPROTOCOL_INFOW,
+    WSA_FLAG_NO_HANDLE_INHERIT, WSA_FLAG_OVERLAPPED,
+};
+use windows_sys::Win32::System::Threading::INFINITE;
+
+use crate::{MsgHdr, RecvFlags, SockAddr, TcpKeepalive, Type};
+
+#[allow(non_camel_case_types)]
+pub(crate) type c_int = std::os::raw::c_int;
+
+/// Fake MSG_TRUNC flag for the [`RecvFlags`] struct.
+///
+/// The flag is enabled when a `WSARecv[From]` call returns `WSAEMSGSIZE`. The
+/// value of the flag is defined by us.
+pub(crate) const MSG_TRUNC: c_int = 0x01;
+
+// Used in `Domain`.
+pub(crate) const AF_INET: c_int = windows_sys::Win32::Networking::WinSock::AF_INET as c_int;
+pub(crate) const AF_INET6: c_int = windows_sys::Win32::Networking::WinSock::AF_INET6 as c_int;
+pub(crate) const AF_UNIX: c_int = windows_sys::Win32::Networking::WinSock::AF_UNIX as c_int;
+pub(crate) const AF_UNSPEC: c_int = windows_sys::Win32::Networking::WinSock::AF_UNSPEC as c_int;
+// Used in `Type`.
+pub(crate) const SOCK_STREAM: c_int = windows_sys::Win32::Networking::WinSock::SOCK_STREAM as c_int;
+pub(crate) const SOCK_DGRAM: c_int = windows_sys::Win32::Networking::WinSock::SOCK_DGRAM as c_int;
+pub(crate) const SOCK_RAW: c_int = windows_sys::Win32::Networking::WinSock::SOCK_RAW as c_int;
+const SOCK_RDM: c_int = windows_sys::Win32::Networking::WinSock::SOCK_RDM as c_int;
+pub(crate) const SOCK_SEQPACKET: c_int =
+    windows_sys::Win32::Networking::WinSock::SOCK_SEQPACKET as c_int;
+// Used in `Protocol`.
+pub(crate) use windows_sys::Win32::Networking::WinSock::{
+    IPPROTO_ICMP, IPPROTO_ICMPV6, IPPROTO_TCP, IPPROTO_UDP,
+};
+// Used in `SockAddr`.
+pub(crate) use windows_sys::Win32::Networking::WinSock::{
+    SOCKADDR as sockaddr, SOCKADDR_IN as sockaddr_in, SOCKADDR_IN6 as sockaddr_in6,
+    SOCKADDR_STORAGE as sockaddr_storage,
+};
+#[allow(non_camel_case_types)]
+pub(crate) type sa_family_t = windows_sys::Win32::Networking::WinSock::ADDRESS_FAMILY;
+#[allow(non_camel_case_types)]
+pub(crate) type socklen_t = windows_sys::Win32::Networking::WinSock::socklen_t;
+// Used in `Socket`.
+#[cfg(feature = "all")]
+pub(crate) use windows_sys::Win32::Networking::WinSock::IP_HDRINCL;
+pub(crate) use windows_sys::Win32::Networking::WinSock::{
+    IPPROTO_IPV6, IPV6_ADD_MEMBERSHIP, IPV6_DROP_MEMBERSHIP, IPV6_MREQ as Ipv6Mreq,
+    IPV6_MULTICAST_HOPS, IPV6_MULTICAST_IF, IPV6_MULTICAST_LOOP, IPV6_RECVTCLASS,
+    IPV6_UNICAST_HOPS, IPV6_V6ONLY, IP_ADD_MEMBERSHIP, IP_ADD_SOURCE_MEMBERSHIP,
+    IP_DROP_MEMBERSHIP, IP_DROP_SOURCE_MEMBERSHIP, IP_MREQ as IpMreq,
+    IP_MREQ_SOURCE as IpMreqSource, IP_MULTICAST_IF, IP_MULTICAST_LOOP, IP_MULTICAST_TTL,
+    IP_RECVTOS, IP_TOS, IP_TTL, LINGER as linger, MSG_OOB, MSG_PEEK, SO_BROADCAST, SO_ERROR,
+    SO_KEEPALIVE, SO_LINGER, SO_OOBINLINE, SO_RCVBUF, SO_RCVTIMEO, SO_REUSEADDR, SO_SNDBUF,
+    SO_SNDTIMEO, SO_TYPE, TCP_NODELAY,
+};
+pub(crate) const IPPROTO_IP: c_int = windows_sys::Win32::Networking::WinSock::IPPROTO_IP as c_int;
+pub(crate) const SOL_SOCKET: c_int = windows_sys::Win32::Networking::WinSock::SOL_SOCKET as c_int;
+
+/// Type used in set/getsockopt to retrieve the `TCP_NODELAY` option.
+///
+/// NOTE: <https://docs.microsoft.com/en-us/windows/win32/api/winsock2/nf-winsock2-getsockopt>
+/// documents that options such as `TCP_NODELAY` and `SO_KEEPALIVE` expect a
+/// `BOOL` (alias for `c_int`, 4 bytes), however in practice this turns out to
+/// be false (or misleading) as a `BOOLEAN` (`c_uchar`, 1 byte) is returned by
+/// `getsockopt`.
+pub(crate) type Bool = windows_sys::Win32::Foundation::BOOLEAN;
+
+/// Maximum size of a buffer passed to system call like `recv` and `send`.
+const MAX_BUF_LEN: usize = c_int::MAX as usize;
+
+/// Helper macro to execute a system call that returns an `io::Result`.
+macro_rules! syscall {
+    ($fn: ident ( $($arg: expr),* $(,)* ), $err_test: path, $err_value: expr) => {{
+        #[allow(unused_unsafe)]
+        let res = unsafe { windows_sys::Win32::Networking::WinSock::$fn($($arg, )*) };
+        if $err_test(&res, &$err_value) {
+            Err(io::Error::last_os_error())
+        } else {
+            Ok(res)
+        }
+    }};
+}
+
+impl_debug!(
+    crate::Domain,
+    self::AF_INET,
+    self::AF_INET6,
+    self::AF_UNIX,
+    self::AF_UNSPEC,
+);
+
+/// Windows only API.
+impl Type {
+    /// Our custom flag to set `WSA_FLAG_NO_HANDLE_INHERIT` on socket creation.
+    /// Trying to mimic `Type::cloexec` on windows.
+    const NO_INHERIT: c_int = 1 << ((size_of::<c_int>() * 8) - 1); // Last bit.
+
+    /// Set `WSA_FLAG_NO_HANDLE_INHERIT` on the socket.
+    #[cfg(feature = "all")]
+    #[cfg_attr(docsrs, doc(cfg(all(windows, feature = "all"))))]
+    pub const fn no_inherit(self) -> Type {
+        self._no_inherit()
+    }
+
+    pub(crate) const fn _no_inherit(self) -> Type {
+        Type(self.0 | Type::NO_INHERIT)
+    }
+}
+
+impl_debug!(
+    crate::Type,
+    self::SOCK_STREAM,
+    self::SOCK_DGRAM,
+    self::SOCK_RAW,
+    self::SOCK_RDM,
+    self::SOCK_SEQPACKET,
+);
+
+impl_debug!(
+    crate::Protocol,
+    WinSock::IPPROTO_ICMP,
+    WinSock::IPPROTO_ICMPV6,
+    WinSock::IPPROTO_TCP,
+    WinSock::IPPROTO_UDP,
+);
+
+impl std::fmt::Debug for RecvFlags {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        f.debug_struct("RecvFlags")
+            .field("is_truncated", &self.is_truncated())
+            .finish()
+    }
+}
+
+#[repr(transparent)]
+pub struct MaybeUninitSlice<'a> {
+    vec: WSABUF,
+    _lifetime: PhantomData<&'a mut [MaybeUninit<u8>]>,
+}
+
+unsafe impl<'a> Send for MaybeUninitSlice<'a> {}
+
+unsafe impl<'a> Sync for MaybeUninitSlice<'a> {}
+
+impl<'a> MaybeUninitSlice<'a> {
+    pub fn new(buf: &'a mut [MaybeUninit<u8>]) -> MaybeUninitSlice<'a> {
+        assert!(buf.len() <= u32::MAX as usize);
+        MaybeUninitSlice {
+            vec: WSABUF {
+                len: buf.len() as u32,
+                buf: buf.as_mut_ptr().cast(),
+            },
+            _lifetime: PhantomData,
+        }
+    }
+
+    pub fn as_slice(&self) -> &[MaybeUninit<u8>] {
+        unsafe { slice::from_raw_parts(self.vec.buf.cast(), self.vec.len as usize) }
+    }
+
+    pub fn as_mut_slice(&mut self) -> &mut [MaybeUninit<u8>] {
+        unsafe { slice::from_raw_parts_mut(self.vec.buf.cast(), self.vec.len as usize) }
+    }
+}
+
+// Used in `MsgHdr`.
+pub(crate) use windows_sys::Win32::Networking::WinSock::WSAMSG as msghdr;
+
+pub(crate) fn set_msghdr_name(msg: &mut msghdr, name: &SockAddr) {
+    msg.name = name.as_ptr() as *mut _;
+    msg.namelen = name.len();
+}
+
+pub(crate) fn set_msghdr_iov(msg: &mut msghdr, ptr: *mut WSABUF, len: usize) {
+    msg.lpBuffers = ptr;
+    msg.dwBufferCount = min(len, u32::MAX as usize) as u32;
+}
+
+pub(crate) fn set_msghdr_control(msg: &mut msghdr, ptr: *mut u8, len: usize) {
+    msg.Control.buf = ptr;
+    msg.Control.len = len as u32;
+}
+
+pub(crate) fn set_msghdr_flags(msg: &mut msghdr, flags: c_int) {
+    msg.dwFlags = flags as u32;
+}
+
+pub(crate) fn msghdr_flags(msg: &msghdr) -> RecvFlags {
+    RecvFlags(msg.dwFlags as c_int)
+}
+
+fn init() {
+    static INIT: Once = Once::new();
+
+    INIT.call_once(|| {
+        // Initialize winsock through the standard library by just creating a
+        // dummy socket. Whether this is successful or not we drop the result as
+        // libstd will be sure to have initialized winsock.
+        let _ = net::UdpSocket::bind("127.0.0.1:34254");
+    });
+}
+
+pub(crate) type Socket = windows_sys::Win32::Networking::WinSock::SOCKET;
+
+pub(crate) unsafe fn socket_from_raw(socket: Socket) -> crate::socket::Inner {
+    crate::socket::Inner::from_raw_socket(socket as RawSocket)
+}
+
+pub(crate) fn socket_as_raw(socket: &crate::socket::Inner) -> Socket {
+    socket.as_raw_socket() as Socket
+}
+
+pub(crate) fn socket_into_raw(socket: crate::socket::Inner) -> Socket {
+    socket.into_raw_socket() as Socket
+}
+
+pub(crate) fn socket(family: c_int, mut ty: c_int, protocol: c_int) -> io::Result<Socket> {
+    init();
+
+    // Check if we set our custom flag.
+    let flags = if ty & Type::NO_INHERIT != 0 {
+        ty = ty & !Type::NO_INHERIT;
+        WSA_FLAG_NO_HANDLE_INHERIT
+    } else {
+        0
+    };
+
+    syscall!(
+        WSASocketW(
+            family,
+            ty,
+            protocol,
+            ptr::null_mut(),
+            0,
+            WSA_FLAG_OVERLAPPED | flags,
+        ),
+        PartialEq::eq,
+        INVALID_SOCKET
+    )
+}
+
+pub(crate) fn bind(socket: Socket, addr: &SockAddr) -> io::Result<()> {
+    syscall!(bind(socket, addr.as_ptr(), addr.len()), PartialEq::ne, 0).map(|_| ())
+}
+
+pub(crate) fn connect(socket: Socket, addr: &SockAddr) -> io::Result<()> {
+    syscall!(connect(socket, addr.as_ptr(), addr.len()), PartialEq::ne, 0).map(|_| ())
+}
+
+pub(crate) fn poll_connect(socket: &crate::Socket, timeout: Duration) -> io::Result<()> {
+    let start = Instant::now();
+
+    let mut fd_array = WSAPOLLFD {
+        fd: socket.as_raw(),
+        events: (POLLRDNORM | POLLWRNORM) as i16,
+        revents: 0,
+    };
+
+    loop {
+        let elapsed = start.elapsed();
+        if elapsed >= timeout {
+            return Err(io::ErrorKind::TimedOut.into());
+        }
+
+        let timeout = (timeout - elapsed).as_millis();
+        let timeout = clamp(timeout, 1, c_int::MAX as u128) as c_int;
+
+        match syscall!(
+            WSAPoll(&mut fd_array, 1, timeout),
+            PartialEq::eq,
+            SOCKET_ERROR
+        ) {
+            Ok(0) => return Err(io::ErrorKind::TimedOut.into()),
+            Ok(_) => {
+                // Error or hang up indicates an error (or failure to connect).
+                if (fd_array.revents & POLLERR as i16) != 0
+                    || (fd_array.revents & POLLHUP as i16) != 0
+                {
+                    match socket.take_error() {
+                        Ok(Some(err)) => return Err(err),
+                        Ok(None) => {
+                            return Err(io::Error::new(
+                                io::ErrorKind::Other,
+                                "no error set after POLLHUP",
+                            ))
+                        }
+                        Err(err) => return Err(err),
+                    }
+                }
+                return Ok(());
+            }
+            // Got interrupted, try again.
+            Err(ref err) if err.kind() == io::ErrorKind::Interrupted => continue,
+            Err(err) => return Err(err),
+        }
+    }
+}
+
+// TODO: use clamp from std lib, stable since 1.50.
+fn clamp<T>(value: T, min: T, max: T) -> T
+where
+    T: Ord,
+{
+    if value <= min {
+        min
+    } else if value >= max {
+        max
+    } else {
+        value
+    }
+}
+
+pub(crate) fn listen(socket: Socket, backlog: c_int) -> io::Result<()> {
+    syscall!(listen(socket, backlog), PartialEq::ne, 0).map(|_| ())
+}
+
+pub(crate) fn accept(socket: Socket) -> io::Result<(Socket, SockAddr)> {
+    // Safety: `accept` initialises the `SockAddr` for us.
+    unsafe {
+        SockAddr::try_init(|storage, len| {
+            syscall!(
+                accept(socket, storage.cast(), len),
+                PartialEq::eq,
+                INVALID_SOCKET
+            )
+        })
+    }
+}
+
+pub(crate) fn getsockname(socket: Socket) -> io::Result<SockAddr> {
+    // Safety: `getsockname` initialises the `SockAddr` for us.
+    unsafe {
+        SockAddr::try_init(|storage, len| {
+            syscall!(
+                getsockname(socket, storage.cast(), len),
+                PartialEq::eq,
+                SOCKET_ERROR
+            )
+        })
+    }
+    .map(|(_, addr)| addr)
+}
+
+pub(crate) fn getpeername(socket: Socket) -> io::Result<SockAddr> {
+    // Safety: `getpeername` initialises the `SockAddr` for us.
+    unsafe {
+        SockAddr::try_init(|storage, len| {
+            syscall!(
+                getpeername(socket, storage.cast(), len),
+                PartialEq::eq,
+                SOCKET_ERROR
+            )
+        })
+    }
+    .map(|(_, addr)| addr)
+}
+
+pub(crate) fn try_clone(socket: Socket) -> io::Result<Socket> {
+    let mut info: MaybeUninit<WSAPROTOCOL_INFOW> = MaybeUninit::uninit();
+    syscall!(
+        // NOTE: `process.id` is the same as `GetCurrentProcessId`.
+        WSADuplicateSocketW(socket, process::id(), info.as_mut_ptr()),
+        PartialEq::eq,
+        SOCKET_ERROR
+    )?;
+    // Safety: `WSADuplicateSocketW` intialised `info` for us.
+    let mut info = unsafe { info.assume_init() };
+
+    syscall!(
+        WSASocketW(
+            info.iAddressFamily,
+            info.iSocketType,
+            info.iProtocol,
+            &mut info,
+            0,
+            WSA_FLAG_OVERLAPPED | WSA_FLAG_NO_HANDLE_INHERIT,
+        ),
+        PartialEq::eq,
+        INVALID_SOCKET
+    )
+}
+
+pub(crate) fn set_nonblocking(socket: Socket, nonblocking: bool) -> io::Result<()> {
+    let mut nonblocking = if nonblocking { 1 } else { 0 };
+    ioctlsocket(socket, FIONBIO, &mut nonblocking)
+}
+
+pub(crate) fn shutdown(socket: Socket, how: Shutdown) -> io::Result<()> {
+    let how = match how {
+        Shutdown::Write => SD_SEND,
+        Shutdown::Read => SD_RECEIVE,
+        Shutdown::Both => SD_BOTH,
+    } as i32;
+    syscall!(shutdown(socket, how), PartialEq::eq, SOCKET_ERROR).map(|_| ())
+}
+
+pub(crate) fn recv(socket: Socket, buf: &mut [MaybeUninit<u8>], flags: c_int) -> io::Result<usize> {
+    let res = syscall!(
+        recv(
+            socket,
+            buf.as_mut_ptr().cast(),
+            min(buf.len(), MAX_BUF_LEN) as c_int,
+            flags,
+        ),
+        PartialEq::eq,
+        SOCKET_ERROR
+    );
+    match res {
+        Ok(n) => Ok(n as usize),
+        Err(ref err) if err.raw_os_error() == Some(WSAESHUTDOWN as i32) => Ok(0),
+        Err(err) => Err(err),
+    }
+}
+
+pub(crate) fn recv_vectored(
+    socket: Socket,
+    bufs: &mut [crate::MaybeUninitSlice<'_>],
+    flags: c_int,
+) -> io::Result<(usize, RecvFlags)> {
+    let mut nread = 0;
+    let mut flags = flags as u32;
+    let res = syscall!(
+        WSARecv(
+            socket,
+            bufs.as_mut_ptr().cast(),
+            min(bufs.len(), u32::MAX as usize) as u32,
+            &mut nread,
+            &mut flags,
+            ptr::null_mut(),
+            None,
+        ),
+        PartialEq::eq,
+        SOCKET_ERROR
+    );
+    match res {
+        Ok(_) => Ok((nread as usize, RecvFlags(0))),
+        Err(ref err) if err.raw_os_error() == Some(WSAESHUTDOWN as i32) => Ok((0, RecvFlags(0))),
+        Err(ref err) if err.raw_os_error() == Some(WSAEMSGSIZE as i32) => {
+            Ok((nread as usize, RecvFlags(MSG_TRUNC)))
+        }
+        Err(err) => Err(err),
+    }
+}
+
+pub(crate) fn recv_from(
+    socket: Socket,
+    buf: &mut [MaybeUninit<u8>],
+    flags: c_int,
+) -> io::Result<(usize, SockAddr)> {
+    // Safety: `recvfrom` initialises the `SockAddr` for us.
+    unsafe {
+        SockAddr::try_init(|storage, addrlen| {
+            let res = syscall!(
+                recvfrom(
+                    socket,
+                    buf.as_mut_ptr().cast(),
+                    min(buf.len(), MAX_BUF_LEN) as c_int,
+                    flags,
+                    storage.cast(),
+                    addrlen,
+                ),
+                PartialEq::eq,
+                SOCKET_ERROR
+            );
+            match res {
+                Ok(n) => Ok(n as usize),
+                Err(ref err) if err.raw_os_error() == Some(WSAESHUTDOWN as i32) => Ok(0),
+                Err(err) => Err(err),
+            }
+        })
+    }
+}
+
+pub(crate) fn peek_sender(socket: Socket) -> io::Result<SockAddr> {
+    // Safety: `recvfrom` initialises the `SockAddr` for us.
+    let ((), sender) = unsafe {
+        SockAddr::try_init(|storage, addrlen| {
+            let res = syscall!(
+                recvfrom(
+                    socket,
+                    // Windows *appears* not to care if you pass a null pointer.
+                    ptr::null_mut(),
+                    0,
+                    MSG_PEEK,
+                    storage.cast(),
+                    addrlen,
+                ),
+                PartialEq::eq,
+                SOCKET_ERROR
+            );
+            match res {
+                Ok(_n) => Ok(()),
+                Err(e) => match e.raw_os_error() {
+                    Some(code) if code == (WSAESHUTDOWN as i32) || code == (WSAEMSGSIZE as i32) => {
+                        Ok(())
+                    }
+                    _ => Err(e),
+                },
+            }
+        })
+    }?;
+
+    Ok(sender)
+}
+
+pub(crate) fn recv_from_vectored(
+    socket: Socket,
+    bufs: &mut [crate::MaybeUninitSlice<'_>],
+    flags: c_int,
+) -> io::Result<(usize, RecvFlags, SockAddr)> {
+    // Safety: `recvfrom` initialises the `SockAddr` for us.
+    unsafe {
+        SockAddr::try_init(|storage, addrlen| {
+            let mut nread = 0;
+            let mut flags = flags as u32;
+            let res = syscall!(
+                WSARecvFrom(
+                    socket,
+                    bufs.as_mut_ptr().cast(),
+                    min(bufs.len(), u32::MAX as usize) as u32,
+                    &mut nread,
+                    &mut flags,
+                    storage.cast(),
+                    addrlen,
+                    ptr::null_mut(),
+                    None,
+                ),
+                PartialEq::eq,
+                SOCKET_ERROR
+            );
+            match res {
+                Ok(_) => Ok((nread as usize, RecvFlags(0))),
+                Err(ref err) if err.raw_os_error() == Some(WSAESHUTDOWN as i32) => {
+                    Ok((nread as usize, RecvFlags(0)))
+                }
+                Err(ref err) if err.raw_os_error() == Some(WSAEMSGSIZE as i32) => {
+                    Ok((nread as usize, RecvFlags(MSG_TRUNC)))
+                }
+                Err(err) => Err(err),
+            }
+        })
+    }
+    .map(|((n, recv_flags), addr)| (n, recv_flags, addr))
+}
+
+pub(crate) fn send(socket: Socket, buf: &[u8], flags: c_int) -> io::Result<usize> {
+    syscall!(
+        send(
+            socket,
+            buf.as_ptr().cast(),
+            min(buf.len(), MAX_BUF_LEN) as c_int,
+            flags,
+        ),
+        PartialEq::eq,
+        SOCKET_ERROR
+    )
+    .map(|n| n as usize)
+}
+
+pub(crate) fn send_vectored(
+    socket: Socket,
+    bufs: &[IoSlice<'_>],
+    flags: c_int,
+) -> io::Result<usize> {
+    let mut nsent = 0;
+    syscall!(
+        WSASend(
+            socket,
+            // FIXME: From the `WSASend` docs [1]:
+            // > For a Winsock application, once the WSASend function is called,
+            // > the system owns these buffers and the application may not
+            // > access them.
+            //
+            // So what we're doing is actually UB as `bufs` needs to be `&mut
+            // [IoSlice<'_>]`.
+            //
+            // Tracking issue: https://github.com/rust-lang/socket2-rs/issues/129.
+            //
+            // NOTE: `send_to_vectored` has the same problem.
+            //
+            // [1] https://docs.microsoft.com/en-us/windows/win32/api/winsock2/nf-winsock2-wsasend
+            bufs.as_ptr() as *mut _,
+            min(bufs.len(), u32::MAX as usize) as u32,
+            &mut nsent,
+            flags as u32,
+            std::ptr::null_mut(),
+            None,
+        ),
+        PartialEq::eq,
+        SOCKET_ERROR
+    )
+    .map(|_| nsent as usize)
+}
+
+pub(crate) fn send_to(
+    socket: Socket,
+    buf: &[u8],
+    addr: &SockAddr,
+    flags: c_int,
+) -> io::Result<usize> {
+    syscall!(
+        sendto(
+            socket,
+            buf.as_ptr().cast(),
+            min(buf.len(), MAX_BUF_LEN) as c_int,
+            flags,
+            addr.as_ptr(),
+            addr.len(),
+        ),
+        PartialEq::eq,
+        SOCKET_ERROR
+    )
+    .map(|n| n as usize)
+}
+
+pub(crate) fn send_to_vectored(
+    socket: Socket,
+    bufs: &[IoSlice<'_>],
+    addr: &SockAddr,
+    flags: c_int,
+) -> io::Result<usize> {
+    let mut nsent = 0;
+    syscall!(
+        WSASendTo(
+            socket,
+            // FIXME: Same problem as in `send_vectored`.
+            bufs.as_ptr() as *mut _,
+            bufs.len().min(u32::MAX as usize) as u32,
+            &mut nsent,
+            flags as u32,
+            addr.as_ptr(),
+            addr.len(),
+            ptr::null_mut(),
+            None,
+        ),
+        PartialEq::eq,
+        SOCKET_ERROR
+    )
+    .map(|_| nsent as usize)
+}
+
+pub(crate) fn sendmsg(socket: Socket, msg: &MsgHdr<'_, '_, '_>, flags: c_int) -> io::Result<usize> {
+    let mut nsent = 0;
+    syscall!(
+        WSASendMsg(
+            socket,
+            &msg.inner,
+            flags as u32,
+            &mut nsent,
+            ptr::null_mut(),
+            None,
+        ),
+        PartialEq::eq,
+        SOCKET_ERROR
+    )
+    .map(|_| nsent as usize)
+}
+
+/// Wrapper around `getsockopt` to deal with platform specific timeouts.
+pub(crate) fn timeout_opt(fd: Socket, lvl: c_int, name: i32) -> io::Result<Option<Duration>> {
+    unsafe { getsockopt(fd, lvl, name).map(from_ms) }
+}
+
+fn from_ms(duration: u32) -> Option<Duration> {
+    if duration == 0 {
+        None
+    } else {
+        let secs = duration / 1000;
+        let nsec = (duration % 1000) * 1000000;
+        Some(Duration::new(secs as u64, nsec as u32))
+    }
+}
+
+/// Wrapper around `setsockopt` to deal with platform specific timeouts.
+pub(crate) fn set_timeout_opt(
+    socket: Socket,
+    level: c_int,
+    optname: i32,
+    duration: Option<Duration>,
+) -> io::Result<()> {
+    let duration = into_ms(duration);
+    unsafe { setsockopt(socket, level, optname, duration) }
+}
+
+fn into_ms(duration: Option<Duration>) -> u32 {
+    // Note that a duration is a (u64, u32) (seconds, nanoseconds) pair, and the
+    // timeouts in windows APIs are typically u32 milliseconds. To translate, we
+    // have two pieces to take care of:
+    //
+    // * Nanosecond precision is rounded up
+    // * Greater than u32::MAX milliseconds (50 days) is rounded up to
+    //   INFINITE (never time out).
+    duration.map_or(0, |duration| {
+        min(duration.as_millis(), INFINITE as u128) as u32
+    })
+}
+
+pub(crate) fn set_tcp_keepalive(socket: Socket, keepalive: &TcpKeepalive) -> io::Result<()> {
+    let mut keepalive = tcp_keepalive {
+        onoff: 1,
+        keepalivetime: into_ms(keepalive.time),
+        keepaliveinterval: into_ms(keepalive.interval),
+    };
+    let mut out = 0;
+    syscall!(
+        WSAIoctl(
+            socket,
+            SIO_KEEPALIVE_VALS,
+            &mut keepalive as *mut _ as *mut _,
+            size_of::<tcp_keepalive>() as _,
+            ptr::null_mut(),
+            0,
+            &mut out,
+            ptr::null_mut(),
+            None,
+        ),
+        PartialEq::eq,
+        SOCKET_ERROR
+    )
+    .map(|_| ())
+}
+
+/// Caller must ensure `T` is the correct type for `level` and `optname`.
+// NOTE: `optname` is actually `i32`, but all constants are `u32`.
+pub(crate) unsafe fn getsockopt<T>(socket: Socket, level: c_int, optname: i32) -> io::Result<T> {
+    let mut optval: MaybeUninit<T> = MaybeUninit::uninit();
+    let mut optlen = mem::size_of::<T>() as c_int;
+    syscall!(
+        getsockopt(
+            socket,
+            level as i32,
+            optname,
+            optval.as_mut_ptr().cast(),
+            &mut optlen,
+        ),
+        PartialEq::eq,
+        SOCKET_ERROR
+    )
+    .map(|_| {
+        debug_assert_eq!(optlen as usize, mem::size_of::<T>());
+        // Safety: `getsockopt` initialised `optval` for us.
+        optval.assume_init()
+    })
+}
+
+/// Caller must ensure `T` is the correct type for `level` and `optname`.
+// NOTE: `optname` is actually `i32`, but all constants are `u32`.
+pub(crate) unsafe fn setsockopt<T>(
+    socket: Socket,
+    level: c_int,
+    optname: i32,
+    optval: T,
+) -> io::Result<()> {
+    syscall!(
+        setsockopt(
+            socket,
+            level as i32,
+            optname,
+            (&optval as *const T).cast(),
+            mem::size_of::<T>() as c_int,
+        ),
+        PartialEq::eq,
+        SOCKET_ERROR
+    )
+    .map(|_| ())
+}
+
+fn ioctlsocket(socket: Socket, cmd: i32, payload: &mut u32) -> io::Result<()> {
+    syscall!(
+        ioctlsocket(socket, cmd, payload),
+        PartialEq::eq,
+        SOCKET_ERROR
+    )
+    .map(|_| ())
+}
+
+pub(crate) fn to_in_addr(addr: &Ipv4Addr) -> IN_ADDR {
+    IN_ADDR {
+        S_un: IN_ADDR_0 {
+            // `S_un` is stored as BE on all machines, and the array is in BE
+            // order. So the native endian conversion method is used so that
+            // it's never swapped.
+            S_addr: u32::from_ne_bytes(addr.octets()),
+        },
+    }
+}
+
+pub(crate) fn from_in_addr(in_addr: IN_ADDR) -> Ipv4Addr {
+    Ipv4Addr::from(unsafe { in_addr.S_un.S_addr }.to_ne_bytes())
+}
+
+pub(crate) fn to_in6_addr(addr: &Ipv6Addr) -> IN6_ADDR {
+    IN6_ADDR {
+        u: IN6_ADDR_0 {
+            Byte: addr.octets(),
+        },
+    }
+}
+
+pub(crate) fn from_in6_addr(addr: IN6_ADDR) -> Ipv6Addr {
+    Ipv6Addr::from(unsafe { addr.u.Byte })
+}
+
+pub(crate) fn to_mreqn(
+    multiaddr: &Ipv4Addr,
+    interface: &crate::socket::InterfaceIndexOrAddress,
+) -> IpMreq {
+    IpMreq {
+        imr_multiaddr: to_in_addr(multiaddr),
+        // Per https://docs.microsoft.com/en-us/windows/win32/api/ws2ipdef/ns-ws2ipdef-ip_mreq#members:
+        //
+        // imr_interface
+        //
+        // The local IPv4 address of the interface or the interface index on
+        // which the multicast group should be joined or dropped. This value is
+        // in network byte order. If this member specifies an IPv4 address of
+        // 0.0.0.0, the default IPv4 multicast interface is used.
+        //
+        // To use an interface index of 1 would be the same as an IP address of
+        // 0.0.0.1.
+        imr_interface: match interface {
+            crate::socket::InterfaceIndexOrAddress::Index(interface) => {
+                to_in_addr(&(*interface).into())
+            }
+            crate::socket::InterfaceIndexOrAddress::Address(interface) => to_in_addr(interface),
+        },
+    }
+}
+
+#[allow(unsafe_op_in_unsafe_fn)]
+pub(crate) fn unix_sockaddr(path: &Path) -> io::Result<SockAddr> {
+    // SAFETY: a `sockaddr_storage` of all zeros is valid.
+    let mut storage = unsafe { mem::zeroed::<sockaddr_storage>() };
+    let len = {
+        let storage: &mut windows_sys::Win32::Networking::WinSock::SOCKADDR_UN =
+            unsafe { &mut *(&mut storage as *mut sockaddr_storage).cast() };
+
+        // Windows expects a UTF-8 path here even though Windows paths are
+        // usually UCS-2 encoded. If Rust exposed OsStr's Wtf8 encoded
+        // buffer, this could be used directly, relying on Windows to
+        // validate the path, but Rust hides this implementation detail.
+        //
+        // See <https://github.com/rust-lang/rust/pull/95290>.
+        let bytes = path
+            .to_str()
+            .ok_or_else(|| io::Error::new(io::ErrorKind::InvalidInput, "path must be valid UTF-8"))?
+            .as_bytes();
+
+        // Windows appears to allow non-null-terminated paths, but this is
+        // not documented, so do not rely on it yet.
+        //
+        // See <https://github.com/rust-lang/socket2/issues/331>.
+        if bytes.len() >= storage.sun_path.len() {
+            return Err(io::Error::new(
+                io::ErrorKind::InvalidInput,
+                "path must be shorter than SUN_LEN",
+            ));
+        }
+
+        storage.sun_family = crate::sys::AF_UNIX as sa_family_t;
+        // `storage` was initialized to zero above, so the path is
+        // already null terminated.
+        storage.sun_path[..bytes.len()].copy_from_slice(bytes);
+
+        let base = storage as *const _ as usize;
+        let path = &storage.sun_path as *const _ as usize;
+        let sun_path_offset = path - base;
+        sun_path_offset + bytes.len() + 1
+    };
+    Ok(unsafe { SockAddr::new(storage, len as socklen_t) })
+}
+
+/// Windows only API.
+impl crate::Socket {
+    /// Sets `HANDLE_FLAG_INHERIT` using `SetHandleInformation`.
+    #[cfg(feature = "all")]
+    #[cfg_attr(docsrs, doc(cfg(all(windows, feature = "all"))))]
+    pub fn set_no_inherit(&self, no_inherit: bool) -> io::Result<()> {
+        self._set_no_inherit(no_inherit)
+    }
+
+    pub(crate) fn _set_no_inherit(&self, no_inherit: bool) -> io::Result<()> {
+        // NOTE: can't use `syscall!` because it expects the function in the
+        // `windows_sys::Win32::Networking::WinSock::` path.
+        let res = unsafe {
+            SetHandleInformation(
+                self.as_raw() as HANDLE,
+                HANDLE_FLAG_INHERIT,
+                !no_inherit as _,
+            )
+        };
+        if res == 0 {
+            // Zero means error.
+            Err(io::Error::last_os_error())
+        } else {
+            Ok(())
+        }
+    }
+
+    /// Returns the [`Protocol`] of this socket by checking the `SO_PROTOCOL_INFOW`
+    /// option on this socket.
+    ///
+    /// [`Protocol`]: crate::Protocol
+    #[cfg(feature = "all")]
+    pub fn protocol(&self) -> io::Result<Option<crate::Protocol>> {
+        let info = unsafe {
+            getsockopt::<WSAPROTOCOL_INFOW>(self.as_raw(), SOL_SOCKET, SO_PROTOCOL_INFOW)?
+        };
+        match info.iProtocol {
+            0 => Ok(None),
+            p => Ok(Some(crate::Protocol::from(p))),
+        }
+    }
+}
+
+#[cfg_attr(docsrs, doc(cfg(windows)))]
+impl AsSocket for crate::Socket {
+    fn as_socket(&self) -> BorrowedSocket<'_> {
+        // SAFETY: lifetime is bound by self.
+        unsafe { BorrowedSocket::borrow_raw(self.as_raw() as RawSocket) }
+    }
+}
+
+#[cfg_attr(docsrs, doc(cfg(windows)))]
+impl AsRawSocket for crate::Socket {
+    fn as_raw_socket(&self) -> RawSocket {
+        self.as_raw() as RawSocket
+    }
+}
+
+#[cfg_attr(docsrs, doc(cfg(windows)))]
+impl From<crate::Socket> for OwnedSocket {
+    fn from(sock: crate::Socket) -> OwnedSocket {
+        // SAFETY: sock.into_raw() always returns a valid fd.
+        unsafe { OwnedSocket::from_raw_socket(sock.into_raw() as RawSocket) }
+    }
+}
+
+#[cfg_attr(docsrs, doc(cfg(windows)))]
+impl IntoRawSocket for crate::Socket {
+    fn into_raw_socket(self) -> RawSocket {
+        self.into_raw() as RawSocket
+    }
+}
+
+#[cfg_attr(docsrs, doc(cfg(windows)))]
+impl From<OwnedSocket> for crate::Socket {
+    fn from(fd: OwnedSocket) -> crate::Socket {
+        // SAFETY: `OwnedFd` ensures the fd is valid.
+        unsafe { crate::Socket::from_raw_socket(fd.into_raw_socket()) }
+    }
+}
+
+#[cfg_attr(docsrs, doc(cfg(windows)))]
+impl FromRawSocket for crate::Socket {
+    unsafe fn from_raw_socket(socket: RawSocket) -> crate::Socket {
+        crate::Socket::from_raw(socket as Socket)
+    }
+}
+
+#[test]
+fn in_addr_convertion() {
+    let ip = Ipv4Addr::new(127, 0, 0, 1);
+    let raw = to_in_addr(&ip);
+    assert_eq!(unsafe { raw.S_un.S_addr }, 127 << 0 | 1 << 24);
+    assert_eq!(from_in_addr(raw), ip);
+
+    let ip = Ipv4Addr::new(127, 34, 4, 12);
+    let raw = to_in_addr(&ip);
+    assert_eq!(
+        unsafe { raw.S_un.S_addr },
+        127 << 0 | 34 << 8 | 4 << 16 | 12 << 24
+    );
+    assert_eq!(from_in_addr(raw), ip);
+}
+
+#[test]
+fn in6_addr_convertion() {
+    let ip = Ipv6Addr::new(0x2000, 1, 2, 3, 4, 5, 6, 7);
+    let raw = to_in6_addr(&ip);
+    let want = [
+        0x2000u16.to_be(),
+        1u16.to_be(),
+        2u16.to_be(),
+        3u16.to_be(),
+        4u16.to_be(),
+        5u16.to_be(),
+        6u16.to_be(),
+        7u16.to_be(),
+    ];
+    assert_eq!(unsafe { raw.u.Word }, want);
+    assert_eq!(from_in6_addr(raw), ip);
+}
diff --git a/crates/spin/.cargo-checksum.json b/crates/spin/.cargo-checksum.json
new file mode 100644
index 0000000..05961f1
--- /dev/null
+++ b/crates/spin/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"CHANGELOG.md":"34b35704b01c68214b27cb20f0c4325b3ce6e40ce8fd5aa3d2b884697a27307b","Cargo.lock":"9d6481d9ffe412e6c4d9445d89c0fa3e33a14b61c56aa5c121d9f28656cacfe4","Cargo.toml":"d2a17df2fa85736364744ea8279c6aed7276cabcc63bca2d25667198504e4be7","LICENSE":"6ac8711fb340c62ce0a4ecd463342d3fa0e8e70de697c863a2e1c0c53006003c","README.md":"78f0456ba0f3fa490d2bf38d79c42b293d551a9951da5bef0e1e5043143abf78","benches/mutex.rs":"2fd79239798e88cab0fd982e04f91a44c7c63282f8703d85fbdd81a8a1e71917","examples/debug.rs":"0074651d78f8ed6d6c0274ae832c0d78de6c5c59423936f58e79cb3c98baea2c","script/doc-upload.cfg":"7383ee022197c1dade9a2e6888261def4a80df08ecb72b1b4db47d1e8a6ff8e0","src/barrier.rs":"f8569a601d870a713344747eb355e6d840db510a599709173ebd0ccba5fc4028","src/lazy.rs":"45cf9174ee607e8a9acfa652b95c9b0ac71d98e89e73b9850041c79c5f3a726e","src/lib.rs":"85fa19874da6518e7ccd0d8bd3a0549fba52afd820789797e0221af1ac62196a","src/mutex.rs":"530039838850a0e71f4af42b9a7cffa0f922874e8f00001d36d67876842ed68f","src/mutex/fair.rs":"74cf2a0b8ab8e7a95564d97e8a66fc5d3d2641dd655de56a94ae4a93458f82c3","src/mutex/spin.rs":"a11e42d58e87a195e5e2110b46bda766f5a62a0c0b75a2b163f1ce37745bf337","src/mutex/ticket.rs":"1711bb02de48cf92f950b8f31946d5003225542ab6521333be1f525b76710f26","src/once.rs":"e4289220d505b7a15e8f9fb3f0d032ee57df6a5ee1d50123d4187e1afc6fb98b","src/relax.rs":"919c345bf55c3b2f9a7d9cd4aec2b2b3db68b6dc40ee2e4de4f6f32027abc290","src/rwlock.rs":"ba3b3dd20cdcd97691f042bcdd6d352e52601db7dc0cc7053c01a5873754b196"},"package":"6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67"}
\ No newline at end of file
diff --git a/crates/spin/Android.bp b/crates/spin/Android.bp
new file mode 100644
index 0000000..1abef28
--- /dev/null
+++ b/crates/spin/Android.bp
@@ -0,0 +1,86 @@
+// This file is generated by cargo_embargo.
+// Do not modify this file because the changes will be overridden on upgrade.
+
+package {
+    default_applicable_licenses: ["external_rust_crates_spin_license"],
+    default_team: "trendy_team_android_rust",
+}
+
+license {
+    name: "external_rust_crates_spin_license",
+    visibility: [":__subpackages__"],
+    license_kinds: ["SPDX-license-identifier-MIT"],
+    license_text: ["LICENSE"],
+}
+
+rust_library {
+    name: "libspin",
+    host_supported: true,
+    crate_name: "spin",
+    cargo_env_compat: true,
+    cargo_pkg_version: "0.9.8",
+    crate_root: "src/lib.rs",
+    edition: "2015",
+    features: [
+        "mutex",
+        "once",
+        "spin_mutex",
+        "std",
+    ],
+    apex_available: [
+        "//apex_available:platform",
+        "//apex_available:anyapex",
+    ],
+    product_available: true,
+    vendor_available: true,
+    min_sdk_version: "29",
+}
+
+rust_test {
+    name: "spin_test_src_lib",
+    host_supported: true,
+    crate_name: "spin",
+    cargo_env_compat: true,
+    cargo_pkg_version: "0.9.8",
+    crate_root: "src/lib.rs",
+    test_suites: ["general-tests"],
+    auto_gen_config: true,
+    test_options: {
+        unit_test: true,
+    },
+    edition: "2015",
+    features: [
+        "mutex",
+        "once",
+        "spin_mutex",
+        "std",
+    ],
+    rustlibs: ["libcriterion"],
+}
+
+rust_library_rlib {
+    name: "libspin_nostd",
+    crate_name: "spin",
+    cargo_env_compat: true,
+    cargo_pkg_version: "0.9.8",
+    crate_root: "src/lib.rs",
+    edition: "2015",
+    features: [
+        "mutex",
+        "once",
+        "spin_mutex",
+    ],
+    apex_available: [
+        "//apex_available:platform",
+        "//apex_available:anyapex",
+    ],
+    prefer_rlib: true,
+    no_stdlibs: true,
+    stdlibs: [
+        "libcompiler_builtins.rust_sysroot",
+        "libcore.rust_sysroot",
+    ],
+    product_available: true,
+    vendor_available: true,
+    min_sdk_version: "29",
+}
diff --git a/crates/spin/CHANGELOG.md b/crates/spin/CHANGELOG.md
new file mode 100644
index 0000000..09f1f68
--- /dev/null
+++ b/crates/spin/CHANGELOG.md
@@ -0,0 +1,146 @@
+# Changelog
+
+All notable changes to this project will be documented in this file.
+
+The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
+and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+
+# Unreleased
+
+### Added
+
+### Changed
+
+### Fixed
+
+# [0.9.8] - 2023-04-03
+
+### Fixed
+
+- Unsoundness in `Once::try_call_once` caused by an `Err(_)` result
+
+# [0.9.7] - 2023-03-27
+
+### Fixed
+
+- Relaxed accidentally restricted `Send`/`Sync` bounds for `Mutex` guards
+
+# [0.9.6] - 2023-03-13
+
+### Fixed
+
+- Relaxed accidentally restricted `Send`/`Sync` bounds for `RwLock` guards
+
+# [0.9.5] - 2023-02-07
+
+### Added
+
+- `FairMutex`, a new mutex implementation that reduces writer starvation.
+- A MSRV policy: Rust 1.38 is currently required
+
+### Changed
+
+- The crate's CI now has full MIRI integration, further improving the confidence you can have in the implementation.
+
+### Fixed
+
+- Ensured that the crate's abstractions comply with stacked borrows rules.
+- Unsoundness in the `RwLock` that could be triggered via a reader overflow
+- Relaxed various `Send`/`Sync` bound requirements to make the crate more flexible
+
+# [0.9.4] - 2022-07-14
+
+### Fixed
+
+- Fixed unsoundness in `RwLock` on reader overflow
+- Relaxed `Send`/`Sync` bounds for `SpinMutex` and `TicketMutex` (doesn't affect `Mutex` itself)
+
+# [0.9.3] - 2022-04-17
+
+### Added
+
+- Implemented `Default` for `Once`
+- `Once::try_call_once`
+
+### Fixed
+
+- Fixed bug that caused `Once::call_once` to incorrectly fail
+
+# [0.9.2] - 2021-07-09
+
+### Changed
+
+- Improved `Once` performance by reducing the memory footprint of internal state to one byte
+
+### Fixed
+
+- Improved performance of `Once` by relaxing ordering guarantees and removing redundant checks
+
+# [0.9.1] - 2021-06-21
+
+### Added
+
+- Default type parameter on `Once` for better ergonomics
+
+# [0.9.0] - 2021-03-18
+
+### Changed
+
+- Placed all major API features behind feature flags
+
+### Fixed
+
+- A compilation bug with the `lock_api` feature
+
+# [0.8.0] - 2021-03-15
+
+### Added
+
+- `Once::get_unchecked`
+- `RelaxStrategy` trait with type parameter on all locks to support switching between relax strategies
+
+### Changed
+
+- `lock_api1` feature is now named `lock_api`
+
+# [0.7.1] - 2021-01-12
+
+### Fixed
+
+- Prevented `Once` leaking the inner value upon drop
+
+# [0.7.0] - 2020-10-18
+
+### Added
+
+- `Once::initialized`
+- `Once::get_mut`
+- `Once::try_into_inner`
+- `Once::poll`
+- `RwLock`, `Mutex` and `Once` now implement `From<T>`
+- `Lazy` type for lazy initialization
+- `TicketMutex`, an alternative mutex implementation
+- `std` feature flag to enable thread yielding instead of spinning
+- `Mutex::is_locked`/`SpinMutex::is_locked`/`TicketMutex::is_locked`
+- `Barrier`
+
+### Changed
+
+- `Once::wait` now spins even if initialization has not yet started
+- `Guard::leak` is now an associated function instead of a method
+- Improved the performance of `SpinMutex` by relaxing unnecessarily conservative
+  ordering requirements
+
+# [0.6.0] - 2020-10-08
+
+### Added
+
+- More dynamic `Send`/`Sync` bounds for lock guards
+- `lock_api` compatibility
+- `Guard::leak` methods
+- `RwLock::reader_count` and `RwLock::writer_count`
+- `Display` implementation for guard types
+
+### Changed
+
+- Made `Debug` impls of lock guards just show the inner type like `std`
diff --git a/crates/spin/Cargo.lock b/crates/spin/Cargo.lock
new file mode 100644
index 0000000..d8bb6cb
--- /dev/null
+++ b/crates/spin/Cargo.lock
@@ -0,0 +1,623 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "anes"
+version = "0.1.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299"
+
+[[package]]
+name = "atty"
+version = "0.2.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
+dependencies = [
+ "hermit-abi 0.1.19",
+ "libc",
+ "winapi",
+]
+
+[[package]]
+name = "autocfg"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
+
+[[package]]
+name = "bitflags"
+version = "1.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
+
+[[package]]
+name = "bumpalo"
+version = "3.12.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0d261e256854913907f67ed06efbc3338dfe6179796deefc1ff763fc1aee5535"
+
+[[package]]
+name = "cast"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
+
+[[package]]
+name = "cfg-if"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
+
+[[package]]
+name = "ciborium"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b0c137568cc60b904a7724001b35ce2630fd00d5d84805fbb608ab89509d788f"
+dependencies = [
+ "ciborium-io",
+ "ciborium-ll",
+ "serde",
+]
+
+[[package]]
+name = "ciborium-io"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "346de753af073cc87b52b2083a506b38ac176a44cfb05497b622e27be899b369"
+
+[[package]]
+name = "ciborium-ll"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "213030a2b5a4e0c0892b6652260cf6ccac84827b83a85a534e178e3906c4cf1b"
+dependencies = [
+ "ciborium-io",
+ "half",
+]
+
+[[package]]
+name = "clap"
+version = "3.2.23"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "71655c45cb9845d3270c9d6df84ebe72b4dad3c2ba3f7023ad47c144e4e473a5"
+dependencies = [
+ "bitflags",
+ "clap_lex",
+ "indexmap",
+ "textwrap",
+]
+
+[[package]]
+name = "clap_lex"
+version = "0.2.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5"
+dependencies = [
+ "os_str_bytes",
+]
+
+[[package]]
+name = "criterion"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e7c76e09c1aae2bc52b3d2f29e13c6572553b30c4aa1b8a49fd70de6412654cb"
+dependencies = [
+ "anes",
+ "atty",
+ "cast",
+ "ciborium",
+ "clap",
+ "criterion-plot",
+ "itertools",
+ "lazy_static",
+ "num-traits",
+ "oorandom",
+ "plotters",
+ "rayon",
+ "regex",
+ "serde",
+ "serde_derive",
+ "serde_json",
+ "tinytemplate",
+ "walkdir",
+]
+
+[[package]]
+name = "criterion-plot"
+version = "0.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1"
+dependencies = [
+ "cast",
+ "itertools",
+]
+
+[[package]]
+name = "crossbeam-channel"
+version = "0.5.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cf2b3e8478797446514c91ef04bafcb59faba183e621ad488df88983cc14128c"
+dependencies = [
+ "cfg-if",
+ "crossbeam-utils",
+]
+
+[[package]]
+name = "crossbeam-deque"
+version = "0.8.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef"
+dependencies = [
+ "cfg-if",
+ "crossbeam-epoch",
+ "crossbeam-utils",
+]
+
+[[package]]
+name = "crossbeam-epoch"
+version = "0.9.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "46bd5f3f85273295a9d14aedfb86f6aadbff6d8f5295c4a9edb08e819dcf5695"
+dependencies = [
+ "autocfg",
+ "cfg-if",
+ "crossbeam-utils",
+ "memoffset",
+ "scopeguard",
+]
+
+[[package]]
+name = "crossbeam-utils"
+version = "0.8.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3c063cd8cc95f5c377ed0d4b49a4b21f632396ff690e8470c29b3359b346984b"
+dependencies = [
+ "cfg-if",
+]
+
+[[package]]
+name = "either"
+version = "1.8.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91"
+
+[[package]]
+name = "half"
+version = "1.8.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7"
+
+[[package]]
+name = "hashbrown"
+version = "0.12.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
+
+[[package]]
+name = "hermit-abi"
+version = "0.1.19"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "hermit-abi"
+version = "0.2.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "indexmap"
+version = "1.9.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99"
+dependencies = [
+ "autocfg",
+ "hashbrown",
+]
+
+[[package]]
+name = "itertools"
+version = "0.10.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473"
+dependencies = [
+ "either",
+]
+
+[[package]]
+name = "itoa"
+version = "1.0.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6"
+
+[[package]]
+name = "js-sys"
+version = "0.3.61"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "445dde2150c55e483f3d8416706b97ec8e8237c307e5b7b4b8dd15e6af2a0730"
+dependencies = [
+ "wasm-bindgen",
+]
+
+[[package]]
+name = "lazy_static"
+version = "1.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
+
+[[package]]
+name = "libc"
+version = "0.2.140"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "99227334921fae1a979cf0bfdfcc6b3e5ce376ef57e16fb6fb3ea2ed6095f80c"
+
+[[package]]
+name = "lock_api"
+version = "0.4.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df"
+dependencies = [
+ "autocfg",
+ "scopeguard",
+]
+
+[[package]]
+name = "log"
+version = "0.4.17"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e"
+dependencies = [
+ "cfg-if",
+]
+
+[[package]]
+name = "memoffset"
+version = "0.8.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d61c719bcfbcf5d62b3a09efa6088de8c54bc0bfcd3ea7ae39fcc186108b8de1"
+dependencies = [
+ "autocfg",
+]
+
+[[package]]
+name = "num-traits"
+version = "0.2.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd"
+dependencies = [
+ "autocfg",
+]
+
+[[package]]
+name = "num_cpus"
+version = "1.15.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0fac9e2da13b5eb447a6ce3d392f23a29d8694bff781bf03a16cd9ac8697593b"
+dependencies = [
+ "hermit-abi 0.2.6",
+ "libc",
+]
+
+[[package]]
+name = "once_cell"
+version = "1.17.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3"
+
+[[package]]
+name = "oorandom"
+version = "11.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575"
+
+[[package]]
+name = "os_str_bytes"
+version = "6.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ceedf44fb00f2d1984b0bc98102627ce622e083e49a5bacdb3e514fa4238e267"
+
+[[package]]
+name = "plotters"
+version = "0.3.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2538b639e642295546c50fcd545198c9d64ee2a38620a628724a3b266d5fbf97"
+dependencies = [
+ "num-traits",
+ "plotters-backend",
+ "plotters-svg",
+ "wasm-bindgen",
+ "web-sys",
+]
+
+[[package]]
+name = "plotters-backend"
+version = "0.3.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "193228616381fecdc1224c62e96946dfbc73ff4384fba576e052ff8c1bea8142"
+
+[[package]]
+name = "plotters-svg"
+version = "0.3.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f9a81d2759aae1dae668f783c308bc5c8ebd191ff4184aaa1b37f65a6ae5a56f"
+dependencies = [
+ "plotters-backend",
+]
+
+[[package]]
+name = "portable-atomic"
+version = "1.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f602a0d1e09a48e4f8e8b4d4042e32807c3676da31f2ecabeac9f96226ec6c45"
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.56"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2b63bdb0cd06f1f4dedf69b254734f9b45af66e4a031e42a7480257d9898b435"
+dependencies = [
+ "unicode-ident",
+]
+
+[[package]]
+name = "quote"
+version = "1.0.26"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc"
+dependencies = [
+ "proc-macro2",
+]
+
+[[package]]
+name = "rayon"
+version = "1.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1d2df5196e37bcc87abebc0053e20787d73847bb33134a69841207dd0a47f03b"
+dependencies = [
+ "either",
+ "rayon-core",
+]
+
+[[package]]
+name = "rayon-core"
+version = "1.11.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d"
+dependencies = [
+ "crossbeam-channel",
+ "crossbeam-deque",
+ "crossbeam-utils",
+ "num_cpus",
+]
+
+[[package]]
+name = "regex"
+version = "1.7.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8b1f693b24f6ac912f4893ef08244d70b6067480d2f1a46e950c9691e6749d1d"
+dependencies = [
+ "regex-syntax",
+]
+
+[[package]]
+name = "regex-syntax"
+version = "0.6.29"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1"
+
+[[package]]
+name = "ryu"
+version = "1.0.13"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041"
+
+[[package]]
+name = "same-file"
+version = "1.0.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
+dependencies = [
+ "winapi-util",
+]
+
+[[package]]
+name = "scopeguard"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
+
+[[package]]
+name = "serde"
+version = "1.0.159"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3c04e8343c3daeec41f58990b9d77068df31209f2af111e059e9fe9646693065"
+dependencies = [
+ "serde_derive",
+]
+
+[[package]]
+name = "serde_derive"
+version = "1.0.159"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4c614d17805b093df4b147b51339e7e44bf05ef59fba1e45d83500bcfb4d8585"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.13",
+]
+
+[[package]]
+name = "serde_json"
+version = "1.0.95"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d721eca97ac802aa7777b701877c8004d950fc142651367300d21c1cc0194744"
+dependencies = [
+ "itoa",
+ "ryu",
+ "serde",
+]
+
+[[package]]
+name = "spin"
+version = "0.9.8"
+dependencies = [
+ "criterion",
+ "lock_api",
+ "portable-atomic",
+]
+
+[[package]]
+name = "syn"
+version = "1.0.109"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
+]
+
+[[package]]
+name = "syn"
+version = "2.0.13"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4c9da457c5285ac1f936ebd076af6dac17a61cfe7826f2076b4d015cf47bc8ec"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
+]
+
+[[package]]
+name = "textwrap"
+version = "0.16.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d"
+
+[[package]]
+name = "tinytemplate"
+version = "1.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc"
+dependencies = [
+ "serde",
+ "serde_json",
+]
+
+[[package]]
+name = "unicode-ident"
+version = "1.0.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4"
+
+[[package]]
+name = "walkdir"
+version = "2.3.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "36df944cda56c7d8d8b7496af378e6b16de9284591917d307c9b4d313c44e698"
+dependencies = [
+ "same-file",
+ "winapi-util",
+]
+
+[[package]]
+name = "wasm-bindgen"
+version = "0.2.84"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "31f8dcbc21f30d9b8f2ea926ecb58f6b91192c17e9d33594b3df58b2007ca53b"
+dependencies = [
+ "cfg-if",
+ "wasm-bindgen-macro",
+]
+
+[[package]]
+name = "wasm-bindgen-backend"
+version = "0.2.84"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "95ce90fd5bcc06af55a641a86428ee4229e44e07033963a2290a8e241607ccb9"
+dependencies = [
+ "bumpalo",
+ "log",
+ "once_cell",
+ "proc-macro2",
+ "quote",
+ "syn 1.0.109",
+ "wasm-bindgen-shared",
+]
+
+[[package]]
+name = "wasm-bindgen-macro"
+version = "0.2.84"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4c21f77c0bedc37fd5dc21f897894a5ca01e7bb159884559461862ae90c0b4c5"
+dependencies = [
+ "quote",
+ "wasm-bindgen-macro-support",
+]
+
+[[package]]
+name = "wasm-bindgen-macro-support"
+version = "0.2.84"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2aff81306fcac3c7515ad4e177f521b5c9a15f2b08f4e32d823066102f35a5f6"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 1.0.109",
+ "wasm-bindgen-backend",
+ "wasm-bindgen-shared",
+]
+
+[[package]]
+name = "wasm-bindgen-shared"
+version = "0.2.84"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0046fef7e28c3804e5e38bfa31ea2a0f73905319b677e57ebe37e49358989b5d"
+
+[[package]]
+name = "web-sys"
+version = "0.3.61"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e33b99f4b23ba3eec1a53ac264e35a755f00e966e0065077d6027c0f575b0b97"
+dependencies = [
+ "js-sys",
+ "wasm-bindgen",
+]
+
+[[package]]
+name = "winapi"
+version = "0.3.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
+dependencies = [
+ "winapi-i686-pc-windows-gnu",
+ "winapi-x86_64-pc-windows-gnu",
+]
+
+[[package]]
+name = "winapi-i686-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
+
+[[package]]
+name = "winapi-util"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178"
+dependencies = [
+ "winapi",
+]
+
+[[package]]
+name = "winapi-x86_64-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
diff --git a/crates/spin/Cargo.toml b/crates/spin/Cargo.toml
new file mode 100644
index 0000000..ff0d151
--- /dev/null
+++ b/crates/spin/Cargo.toml
@@ -0,0 +1,80 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+rust-version = "1.38"
+name = "spin"
+version = "0.9.8"
+authors = [
+    "Mathijs van de Nes <git@mathijs.vd-nes.nl>",
+    "John Ericson <git@JohnEricson.me>",
+    "Joshua Barretto <joshua.s.barretto@gmail.com>",
+]
+description = "Spin-based synchronization primitives"
+readme = "README.md"
+keywords = [
+    "spinlock",
+    "mutex",
+    "rwlock",
+]
+license = "MIT"
+repository = "https://github.com/mvdnes/spin-rs.git"
+
+[package.metadata.docs.rs]
+all-features = true
+rustdoc-args = [
+    "--cfg",
+    "docsrs",
+]
+
+[[bench]]
+name = "mutex"
+harness = false
+required-features = ["ticket_mutex"]
+
+[dependencies.lock_api_crate]
+version = "0.4"
+optional = true
+package = "lock_api"
+
+[dependencies.portable-atomic]
+version = "1"
+optional = true
+default-features = false
+
+[dev-dependencies.criterion]
+version = "0.4"
+
+[features]
+barrier = ["mutex"]
+default = [
+    "lock_api",
+    "mutex",
+    "spin_mutex",
+    "rwlock",
+    "once",
+    "lazy",
+    "barrier",
+]
+fair_mutex = ["mutex"]
+lazy = ["once"]
+lock_api = ["lock_api_crate"]
+mutex = []
+once = []
+portable_atomic = ["portable-atomic"]
+rwlock = []
+spin_mutex = ["mutex"]
+std = []
+ticket_mutex = ["mutex"]
+use_ticket_mutex = [
+    "mutex",
+    "ticket_mutex",
+]
diff --git a/crates/spin/LICENSE b/crates/spin/LICENSE
new file mode 100644
index 0000000..b2d7f7b
--- /dev/null
+++ b/crates/spin/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Mathijs van de Nes
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/crates/spin/METADATA b/crates/spin/METADATA
new file mode 100644
index 0000000..b1dd979
--- /dev/null
+++ b/crates/spin/METADATA
@@ -0,0 +1,20 @@
+# This project was upgraded with external_updater.
+# Usage: tools/external_updater/updater.sh update external/rust/crates/spin
+# For more info, check https://cs.android.com/android/platform/superproject/+/main:tools/external_updater/README.md
+
+name: "spin"
+description: "Spin-based synchronization primitives"
+third_party {
+  license_type: NOTICE
+  last_upgrade_date {
+    year: 2024
+    month: 2
+    day: 5
+  }
+  homepage: "https://crates.io/crates/spin"
+  identifier {
+    type: "Archive"
+    value: "https://static.crates.io/crates/spin/spin-0.9.8.crate"
+    version: "0.9.8"
+  }
+}
diff --git a/crates/spin/MODULE_LICENSE_MIT b/crates/spin/MODULE_LICENSE_MIT
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/crates/spin/MODULE_LICENSE_MIT
diff --git a/crates/spin/README.md b/crates/spin/README.md
new file mode 100644
index 0000000..7fd3780
--- /dev/null
+++ b/crates/spin/README.md
@@ -0,0 +1,143 @@
+# spin-rs
+
+[![Crates.io version](https://img.shields.io/crates/v/spin.svg)](https://crates.io/crates/spin)
+[![docs.rs](https://docs.rs/spin/badge.svg)](https://docs.rs/spin/)
+[![Build Status](https://travis-ci.org/mvdnes/spin-rs.svg)](https://travis-ci.org/mvdnes/spin-rs)
+
+Spin-based synchronization primitives.
+
+This crate provides [spin-based](https://en.wikipedia.org/wiki/Spinlock)
+versions of the primitives in `std::sync`. Because synchronization is done
+through spinning, the primitives are suitable for use in `no_std` environments.
+
+Before deciding to use `spin`, we recommend reading
+[this superb blog post](https://matklad.github.io/2020/01/02/spinlocks-considered-harmful.html)
+by [@matklad](https://github.com/matklad/) that discusses the pros and cons of
+spinlocks. If you have access to `std`, it's likely that the primitives in
+`std::sync` will serve you better except in very specific circumstances.
+
+## Features
+
+- `Mutex`, `RwLock`, `Once`, `Lazy` and `Barrier` equivalents
+- Support for `no_std` environments
+- [`lock_api`](https://crates.io/crates/lock_api) compatibility
+- Upgradeable `RwLock` guards
+- Guards can be sent and shared between threads
+- Guard leaking
+- Ticket locks
+- Different strategies for dealing with contention
+
+## Usage
+
+Include the following under the `[dependencies]` section in your `Cargo.toml` file.
+
+```toml
+spin = "x.y"
+```
+
+## Example
+
+When calling `lock` on a `Mutex` you will get a guard value that provides access
+to the data. When this guard is dropped, the mutex will become available again.
+
+```rust
+extern crate spin;
+use std::{sync::Arc, thread};
+
+fn main() {
+    let counter = Arc::new(spin::Mutex::new(0));
+
+    let thread = thread::spawn({
+        let counter = counter.clone();
+        move || {
+            for _ in 0..100 {
+                *counter.lock() += 1;
+            }
+        }
+    });
+
+    for _ in 0..100 {
+        *counter.lock() += 1;
+    }
+
+    thread.join().unwrap();
+
+    assert_eq!(*counter.lock(), 200);
+}
+```
+
+## Feature flags
+
+The crate comes with a few feature flags that you may wish to use.
+
+- `mutex` enables the `Mutex` type.
+
+- `spin_mutex` enables the `SpinMutex` type.
+
+- `ticket_mutex` enables the `TicketMutex` type.
+
+- `use_ticket_mutex` switches to a ticket lock for the implementation of `Mutex`. This
+  is recommended only on targets for which ordinary spinning locks perform very badly
+  because it will change the implementation used by other crates that depend on `spin`.
+
+- `rwlock` enables the `RwLock` type.
+
+- `once` enables the `Once` type.
+
+- `lazy` enables the `Lazy` type.
+
+- `barrier` enables the `Barrier` type.
+
+- `lock_api` enables support for [`lock_api`](https://crates.io/crates/lock_api)
+
+- `std` enables support for thread yielding instead of spinning.
+
+- `portable_atomic` enables usage of the `portable-atomic` crate
+  to support platforms without native atomic operations (Cortex-M0, etc.).
+  The `portable_atomic_unsafe_assume_single_core` cfg or `critical-section` feature
+  of `portable-atomic` crate must also be set by the final binary crate.
+
+  When using the cfg, this can be done by adapting the following snippet to the `.cargo/config` file:
+  ```
+  [target.<target>]
+  rustflags = [ "--cfg", "portable_atomic_unsafe_assume_single_core" ]
+  ```
+  Note that this cfg is unsafe by nature, and enabling it for multicore systems is unsound.
+
+  When using the `critical-section` feature, you need to implement the critical-section
+  implementation that sound for your system by implementing an unsafe trait.
+  See [the documentation for the `portable-atomic` crate](https://docs.rs/portable-atomic/latest/portable_atomic/#optional-cfg)
+  for more information.
+
+## Remarks
+
+It is often desirable to have a lock shared between threads. Wrapping the lock in an
+`std::sync::Arc` is route through which this might be achieved.
+
+Locks provide zero-overhead access to their data when accessed through a mutable
+reference by using their `get_mut` methods.
+
+The behaviour of these lock is similar to their namesakes in `std::sync`. they
+differ on the following:
+
+- Locks will not be poisoned in case of failure.
+- Threads will not yield to the OS scheduler when encounter a lock that cannot be
+  accessed. Instead, they will 'spin' in a busy loop until the lock becomes available.
+
+Many of the feature flags listed above are enabled by default. If you're writing a
+library, we recommend disabling those that you don't use to avoid increasing compilation
+time for your crate's users. You can do this like so:
+
+```
+[dependencies]
+spin = { version = "x.y", default-features = false, features = [...] }
+```
+
+## Minimum Safe Rust Version (MSRV)
+
+This crate is guaranteed to compile on a Minimum Safe Rust Version (MSRV) of 1.38.0 and above.
+This version will not be changed without a minor version bump.
+
+## License
+
+`spin` is distributed under the MIT License, (See `LICENSE`).
diff --git a/crates/spin/TEST_MAPPING b/crates/spin/TEST_MAPPING
new file mode 100644
index 0000000..c028b97
--- /dev/null
+++ b/crates/spin/TEST_MAPPING
@@ -0,0 +1,27 @@
+// Generated by update_crate_tests.py for tests that depend on this crate.
+{
+  "imports": [
+    {
+      "path": "external/rust/crates/quiche"
+    },
+    {
+      "path": "external/rust/crates/ring"
+    },
+    {
+      "path": "external/rust/crates/webpki"
+    },
+    {
+      "path": "packages/modules/DnsResolver"
+    }
+  ],
+  "presubmit": [
+    {
+      "name": "spin_test_src_lib"
+    }
+  ],
+  "presubmit-rust": [
+    {
+      "name": "spin_test_src_lib"
+    }
+  ]
+}
diff --git a/crates/spin/benches/mutex.rs b/crates/spin/benches/mutex.rs
new file mode 100644
index 0000000..83897bb
--- /dev/null
+++ b/crates/spin/benches/mutex.rs
@@ -0,0 +1,126 @@
+#[macro_use]
+extern crate criterion;
+
+use criterion::{Criterion, Bencher, black_box};
+use std::{
+    ops::DerefMut,
+    sync::Arc,
+};
+
+trait Mutex<T>: Send + Sync + 'static {
+    type Guard<'a>: DerefMut<Target = T> where Self: 'a;
+    fn new(x: T) -> Self;
+    fn lock(&self) -> Self::Guard<'_>;
+}
+
+impl<T: Send + 'static> Mutex<T> for spin::mutex::SpinMutex<T> {
+    type Guard<'a> = spin::mutex::SpinMutexGuard<'a, T> where Self: 'a;
+    fn new(x: T) -> Self { spin::mutex::SpinMutex::new(x) }
+    fn lock(&self) -> Self::Guard<'_> { self.lock() }
+}
+
+impl<T: Send + 'static> Mutex<T> for spin::mutex::TicketMutex<T> {
+    type Guard<'a> = spin::mutex::TicketMutexGuard<'a, T> where Self: 'a;
+    fn new(x: T) -> Self { spin::mutex::TicketMutex::new(x) }
+    fn lock(&self) -> Self::Guard<'_> { self.lock() }
+}
+
+impl<T: Send + 'static> Mutex<T> for std::sync::Mutex<T> {
+    type Guard<'a> = std::sync::MutexGuard<'a, T> where Self: 'a;
+    fn new(x: T) -> Self { std::sync::Mutex::new(x) }
+    fn lock(&self) -> Self::Guard<'_> { self.lock().unwrap() }
+}
+
+fn gen_create<M: Mutex<u32>>(b: &mut Bencher) {
+    b.iter(|| {
+        let n = black_box(42);
+        M::new(n)
+    });
+}
+
+fn gen_lock_unlock<M: Mutex<u32>>(b: &mut Bencher) {
+    let m = M::new(0);
+    b.iter(|| {
+        let mut m = m.lock();
+        *m = m.wrapping_add(1);
+        drop(m);
+    });
+}
+
+fn gen_lock_unlock_read_contention<M: Mutex<u32>>(b: &mut Bencher) {
+    let m = Arc::new(M::new(0));
+    let thread = std::thread::spawn({
+        let m = m.clone();
+        move || {
+            while Arc::strong_count(&m) > 1 {
+                for _ in 0..1000 {
+                    black_box(*m.lock());
+                }
+            }
+        }
+    });
+    b.iter(|| {
+        let mut m = m.lock();
+        *m = m.wrapping_add(1);
+        drop(m);
+    });
+    drop(m);
+    thread.join().unwrap();
+}
+
+fn gen_lock_unlock_write_contention<M: Mutex<u32>>(b: &mut Bencher) {
+    let m = Arc::new(M::new(0));
+    let thread = std::thread::spawn({
+        let m = m.clone();
+        move || {
+            while Arc::strong_count(&m) > 1 {
+                for _ in 0..1000 {
+                    let mut m = m.lock();
+                    *m = m.wrapping_add(1);
+                    drop(m);
+                }
+            }
+        }
+    });
+    b.iter(|| {
+        let mut m = m.lock();
+        *m = m.wrapping_add(1);
+        drop(m);
+    });
+    drop(m);
+    thread.join().unwrap();
+}
+
+fn create(b: &mut Criterion) {
+    b.bench_function("create-spin-spinmutex", |b| gen_create::<spin::mutex::SpinMutex<u32>>(b));
+    b.bench_function("create-spin-ticketmutex", |b| gen_create::<spin::mutex::TicketMutex<u32>>(b));
+    b.bench_function("create-std", |b| gen_create::<std::sync::Mutex<u32>>(b));
+}
+
+fn lock_unlock(b: &mut Criterion) {
+    b.bench_function("lock_unlock-spin-spinmutex", |b| gen_lock_unlock::<spin::mutex::SpinMutex<u32>>(b));
+    b.bench_function("lock_unlock-spin-ticketmutex", |b| gen_lock_unlock::<spin::mutex::TicketMutex<u32>>(b));
+    b.bench_function("lock_unlock-std", |b| gen_lock_unlock::<std::sync::Mutex<u32>>(b));
+}
+
+fn lock_unlock_read_contention(b: &mut Criterion) {
+    b.bench_function("lock_unlock_read_contention-spin-spinmutex", |b| gen_lock_unlock_read_contention::<spin::mutex::SpinMutex<u32>>(b));
+    b.bench_function("lock_unlock_read_contention-spin-ticketmutex", |b| gen_lock_unlock_read_contention::<spin::mutex::TicketMutex<u32>>(b));
+    b.bench_function("lock_unlock_read_contention-std", |b| gen_lock_unlock_read_contention::<std::sync::Mutex<u32>>(b));
+}
+
+fn lock_unlock_write_contention(b: &mut Criterion) {
+    b.bench_function("lock_unlock_write_contention-spin-spinmutex", |b| gen_lock_unlock_write_contention::<spin::mutex::SpinMutex<u32>>(b));
+    b.bench_function("lock_unlock_write_contention-spin-ticketmutex", |b| gen_lock_unlock_write_contention::<spin::mutex::TicketMutex<u32>>(b));
+    b.bench_function("lock_unlock_write_contention-std", |b| gen_lock_unlock_write_contention::<std::sync::Mutex<u32>>(b));
+}
+
+criterion_group!(
+    mutex,
+    create,
+    lock_unlock,
+    lock_unlock_read_contention,
+    lock_unlock_write_contention,
+);
+
+criterion_main!(mutex);
diff --git a/crates/spin/cargo_embargo.json b/crates/spin/cargo_embargo.json
new file mode 100644
index 0000000..928ccdf
--- /dev/null
+++ b/crates/spin/cargo_embargo.json
@@ -0,0 +1,43 @@
+{
+  "min_sdk_version": "29",
+  "run_cargo": false,
+  "variants": [
+    {
+      "features": [
+        "once",
+        "mutex",
+        "spin_mutex",
+        "std"
+      ],
+      "tests": true
+    },
+    {
+      "features": [
+        "once",
+        "mutex",
+        "spin_mutex"
+      ],
+      "module_name_overrides": {
+        "libspin": "libspin_nostd"
+      },
+      "package": {
+        "spin": {
+          "force_rlib": true,
+          "host_supported": false,
+          "no_std": true
+        }
+      }
+    },
+    {
+      "generate_androidbp": false,
+      "generate_rulesmk": true,
+      "features": [
+      ],
+      "package": {
+        "spin": {
+          "no_std": true
+        }
+      }
+    }
+  ]
+}
diff --git a/crates/spin/examples/debug.rs b/crates/spin/examples/debug.rs
new file mode 100644
index 0000000..64654f6
--- /dev/null
+++ b/crates/spin/examples/debug.rs
@@ -0,0 +1,21 @@
+extern crate spin;
+
+fn main() {
+    let mutex = spin::Mutex::new(42);
+    println!("{:?}", mutex);
+    {
+        let x = mutex.lock();
+        println!("{:?}, {:?}", mutex, *x);
+    }
+
+    let rwlock = spin::RwLock::new(42);
+    println!("{:?}", rwlock);
+    {
+        let x = rwlock.read();
+        println!("{:?}, {:?}", rwlock, *x);
+    }
+    {
+        let x = rwlock.write();
+        println!("{:?}, {:?}", rwlock, *x);
+    }
+}
diff --git a/crates/spin/patches/disable_panic_tests.patch b/crates/spin/patches/disable_panic_tests.patch
new file mode 100644
index 0000000..52610fb
--- /dev/null
+++ b/crates/spin/patches/disable_panic_tests.patch
@@ -0,0 +1,48 @@
+diff --git a/src/mutex/spin.rs b/src/mutex/spin.rs
+index 60be1e8..36d65fd 100644
+--- a/src/mutex/spin.rs
++++ b/src/mutex/spin.rs
+@@ -432,6 +432,7 @@ mod tests {
+     }
+ 
+     #[test]
++    #[ignore = "Android uses panic_abort"]
+     fn test_mutex_arc_access_in_unwind() {
+         let arc = Arc::new(SpinMutex::new(1));
+         let arc2 = arc.clone();
+diff --git a/src/mutex/ticket.rs b/src/mutex/ticket.rs
+index df36e95..4186fb8 100644
+--- a/src/mutex/ticket.rs
++++ b/src/mutex/ticket.rs
+@@ -428,6 +428,7 @@ mod tests {
+     }
+ 
+     #[test]
++    #[ignore = "Android uses panic_abort"]
+     fn test_mutex_arc_access_in_unwind() {
+         let arc = Arc::new(TicketMutex::new(1));
+         let arc2 = arc.clone();
+diff --git a/src/once.rs b/src/once.rs
+index 5d4b451..ad60405 100644
+--- a/src/once.rs
++++ b/src/once.rs
+@@ -384,6 +384,7 @@ mod tests {
+     }
+ 
+     #[test]
++    #[ignore = "Android uses panic_abort"]
+     fn panic() {
+         use ::std::panic;
+ 
+diff --git a/src/rwlock.rs b/src/rwlock.rs
+index 5c009cf..ed50407 100644
+--- a/src/rwlock.rs
++++ b/src/rwlock.rs
+@@ -932,6 +932,7 @@ mod tests {
+     }
+ 
+     #[test]
++    #[ignore = "Android uses panic_abort"]
+     fn test_rw_access_in_unwind() {
+         let arc = Arc::new(RwLock::new(1));
+         let arc2 = arc.clone();
diff --git a/crates/spin/rules.mk b/crates/spin/rules.mk
new file mode 100644
index 0000000..31e0128
--- /dev/null
+++ b/crates/spin/rules.mk
@@ -0,0 +1,17 @@
+# This file is generated by cargo_embargo.
+# Do not modify this file after the LOCAL_DIR line
+# because the changes will be overridden on upgrade.
+# Content before the first line starting with LOCAL_DIR is preserved.
+// DO NOT SUBMIT: Add license before submitting.
+LOCAL_DIR := $(GET_LOCAL_DIR)
+MODULE := $(LOCAL_DIR)
+MODULE_CRATE_NAME := spin
+MODULE_RUST_CRATE_TYPES := rlib
+MODULE_SRCS := $(LOCAL_DIR)/src/lib.rs
+MODULE_ADD_IMPLICIT_DEPS := false
+MODULE_RUST_EDITION := 2015
+MODULE_LIBRARY_DEPS := \
+	trusty/user/base/lib/libcompiler_builtins-rust \
+	trusty/user/base/lib/libcore-rust
+
+include make/library.mk
diff --git a/crates/spin/script/doc-upload.cfg b/crates/spin/script/doc-upload.cfg
new file mode 100644
index 0000000..c6dfbdc
--- /dev/null
+++ b/crates/spin/script/doc-upload.cfg
@@ -0,0 +1,3 @@
+PROJECT_NAME=spin-rs
+DOCS_REPO=mvdnes/rust-docs.git
+DOC_RUST_VERSION=stable
diff --git a/crates/spin/src/barrier.rs b/crates/spin/src/barrier.rs
new file mode 100644
index 0000000..c3a1c92
--- /dev/null
+++ b/crates/spin/src/barrier.rs
@@ -0,0 +1,239 @@
+//! Synchronization primitive allowing multiple threads to synchronize the
+//! beginning of some computation.
+//!
+//! Implementation adapted from the 'Barrier' type of the standard library. See:
+//! <https://doc.rust-lang.org/std/sync/struct.Barrier.html>
+//!
+//! Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+//! file at the top-level directory of this distribution and at
+//! <http://rust-lang.org/COPYRIGHT>.
+//!
+//! Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+//! <http://www.apache.org/licenses/LICENSE-2.0>> or the MIT license
+//! <LICENSE-MIT or <http://opensource.org/licenses/MIT>>, at your
+//! option. This file may not be copied, modified, or distributed
+//! except according to those terms.
+
+use crate::{mutex::Mutex, RelaxStrategy, Spin};
+
+/// A primitive that synchronizes the execution of multiple threads.
+///
+/// # Example
+///
+/// ```
+/// use spin;
+/// use std::sync::Arc;
+/// use std::thread;
+///
+/// let mut handles = Vec::with_capacity(10);
+/// let barrier = Arc::new(spin::Barrier::new(10));
+/// for _ in 0..10 {
+///     let c = barrier.clone();
+///     // The same messages will be printed together.
+///     // You will NOT see any interleaving.
+///     handles.push(thread::spawn(move|| {
+///         println!("before wait");
+///         c.wait();
+///         println!("after wait");
+///     }));
+/// }
+/// // Wait for other threads to finish.
+/// for handle in handles {
+///     handle.join().unwrap();
+/// }
+/// ```
+pub struct Barrier<R = Spin> {
+    lock: Mutex<BarrierState, R>,
+    num_threads: usize,
+}
+
+// The inner state of a double barrier
+struct BarrierState {
+    count: usize,
+    generation_id: usize,
+}
+
+/// A `BarrierWaitResult` is returned by [`wait`] when all threads in the [`Barrier`]
+/// have rendezvoused.
+///
+/// [`wait`]: struct.Barrier.html#method.wait
+/// [`Barrier`]: struct.Barrier.html
+///
+/// # Examples
+///
+/// ```
+/// use spin;
+///
+/// let barrier = spin::Barrier::new(1);
+/// let barrier_wait_result = barrier.wait();
+/// ```
+pub struct BarrierWaitResult(bool);
+
+impl<R: RelaxStrategy> Barrier<R> {
+    /// Blocks the current thread until all threads have rendezvoused here.
+    ///
+    /// Barriers are re-usable after all threads have rendezvoused once, and can
+    /// be used continuously.
+    ///
+    /// A single (arbitrary) thread will receive a [`BarrierWaitResult`] that
+    /// returns `true` from [`is_leader`] when returning from this function, and
+    /// all other threads will receive a result that will return `false` from
+    /// [`is_leader`].
+    ///
+    /// [`BarrierWaitResult`]: struct.BarrierWaitResult.html
+    /// [`is_leader`]: struct.BarrierWaitResult.html#method.is_leader
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use spin;
+    /// use std::sync::Arc;
+    /// use std::thread;
+    ///
+    /// let mut handles = Vec::with_capacity(10);
+    /// let barrier = Arc::new(spin::Barrier::new(10));
+    /// for _ in 0..10 {
+    ///     let c = barrier.clone();
+    ///     // The same messages will be printed together.
+    ///     // You will NOT see any interleaving.
+    ///     handles.push(thread::spawn(move|| {
+    ///         println!("before wait");
+    ///         c.wait();
+    ///         println!("after wait");
+    ///     }));
+    /// }
+    /// // Wait for other threads to finish.
+    /// for handle in handles {
+    ///     handle.join().unwrap();
+    /// }
+    /// ```
+    pub fn wait(&self) -> BarrierWaitResult {
+        let mut lock = self.lock.lock();
+        lock.count += 1;
+
+        if lock.count < self.num_threads {
+            // not the leader
+            let local_gen = lock.generation_id;
+
+            while local_gen == lock.generation_id && lock.count < self.num_threads {
+                drop(lock);
+                R::relax();
+                lock = self.lock.lock();
+            }
+            BarrierWaitResult(false)
+        } else {
+            // this thread is the leader,
+            //   and is responsible for incrementing the generation
+            lock.count = 0;
+            lock.generation_id = lock.generation_id.wrapping_add(1);
+            BarrierWaitResult(true)
+        }
+    }
+}
+
+impl<R> Barrier<R> {
+    /// Creates a new barrier that can block a given number of threads.
+    ///
+    /// A barrier will block `n`-1 threads which call [`wait`] and then wake up
+    /// all threads at once when the `n`th thread calls [`wait`]. A Barrier created
+    /// with n = 0 will behave identically to one created with n = 1.
+    ///
+    /// [`wait`]: #method.wait
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use spin;
+    ///
+    /// let barrier = spin::Barrier::new(10);
+    /// ```
+    pub const fn new(n: usize) -> Self {
+        Self {
+            lock: Mutex::new(BarrierState {
+                count: 0,
+                generation_id: 0,
+            }),
+            num_threads: n,
+        }
+    }
+}
+
+impl BarrierWaitResult {
+    /// Returns whether this thread from [`wait`] is the "leader thread".
+    ///
+    /// Only one thread will have `true` returned from their result, all other
+    /// threads will have `false` returned.
+    ///
+    /// [`wait`]: struct.Barrier.html#method.wait
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use spin;
+    ///
+    /// let barrier = spin::Barrier::new(1);
+    /// let barrier_wait_result = barrier.wait();
+    /// println!("{:?}", barrier_wait_result.is_leader());
+    /// ```
+    pub fn is_leader(&self) -> bool {
+        self.0
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use std::prelude::v1::*;
+
+    use std::sync::mpsc::{channel, TryRecvError};
+    use std::sync::Arc;
+    use std::thread;
+
+    type Barrier = super::Barrier;
+
+    fn use_barrier(n: usize, barrier: Arc<Barrier>) {
+        let (tx, rx) = channel();
+
+        let mut ts = Vec::new();
+        for _ in 0..n - 1 {
+            let c = barrier.clone();
+            let tx = tx.clone();
+            ts.push(thread::spawn(move || {
+                tx.send(c.wait().is_leader()).unwrap();
+            }));
+        }
+
+        // At this point, all spawned threads should be blocked,
+        // so we shouldn't get anything from the port
+        assert!(match rx.try_recv() {
+            Err(TryRecvError::Empty) => true,
+            _ => false,
+        });
+
+        let mut leader_found = barrier.wait().is_leader();
+
+        // Now, the barrier is cleared and we should get data.
+        for _ in 0..n - 1 {
+            if rx.recv().unwrap() {
+                assert!(!leader_found);
+                leader_found = true;
+            }
+        }
+        assert!(leader_found);
+
+        for t in ts {
+            t.join().unwrap();
+        }
+    }
+
+    #[test]
+    fn test_barrier() {
+        const N: usize = 10;
+
+        let barrier = Arc::new(Barrier::new(N));
+
+        use_barrier(N, barrier.clone());
+
+        // use barrier twice to ensure it is reusable
+        use_barrier(N, barrier.clone());
+    }
+}
diff --git a/crates/spin/src/lazy.rs b/crates/spin/src/lazy.rs
new file mode 100644
index 0000000..6e5efe4
--- /dev/null
+++ b/crates/spin/src/lazy.rs
@@ -0,0 +1,118 @@
+//! Synchronization primitives for lazy evaluation.
+//!
+//! Implementation adapted from the `SyncLazy` type of the standard library. See:
+//! <https://doc.rust-lang.org/std/lazy/struct.SyncLazy.html>
+
+use crate::{once::Once, RelaxStrategy, Spin};
+use core::{cell::Cell, fmt, ops::Deref};
+
+/// A value which is initialized on the first access.
+///
+/// This type is a thread-safe `Lazy`, and can be used in statics.
+///
+/// # Examples
+///
+/// ```
+/// use std::collections::HashMap;
+/// use spin::Lazy;
+///
+/// static HASHMAP: Lazy<HashMap<i32, String>> = Lazy::new(|| {
+///     println!("initializing");
+///     let mut m = HashMap::new();
+///     m.insert(13, "Spica".to_string());
+///     m.insert(74, "Hoyten".to_string());
+///     m
+/// });
+///
+/// fn main() {
+///     println!("ready");
+///     std::thread::spawn(|| {
+///         println!("{:?}", HASHMAP.get(&13));
+///     }).join().unwrap();
+///     println!("{:?}", HASHMAP.get(&74));
+///
+///     // Prints:
+///     //   ready
+///     //   initializing
+///     //   Some("Spica")
+///     //   Some("Hoyten")
+/// }
+/// ```
+pub struct Lazy<T, F = fn() -> T, R = Spin> {
+    cell: Once<T, R>,
+    init: Cell<Option<F>>,
+}
+
+impl<T: fmt::Debug, F, R> fmt::Debug for Lazy<T, F, R> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        f.debug_struct("Lazy")
+            .field("cell", &self.cell)
+            .field("init", &"..")
+            .finish()
+    }
+}
+
+// We never create a `&F` from a `&Lazy<T, F>` so it is fine
+// to not impl `Sync` for `F`
+// we do create a `&mut Option<F>` in `force`, but this is
+// properly synchronized, so it only happens once
+// so it also does not contribute to this impl.
+unsafe impl<T, F: Send> Sync for Lazy<T, F> where Once<T>: Sync {}
+// auto-derived `Send` impl is OK.
+
+impl<T, F, R> Lazy<T, F, R> {
+    /// Creates a new lazy value with the given initializing
+    /// function.
+    pub const fn new(f: F) -> Self {
+        Self {
+            cell: Once::new(),
+            init: Cell::new(Some(f)),
+        }
+    }
+    /// Retrieves a mutable pointer to the inner data.
+    ///
+    /// This is especially useful when interfacing with low level code or FFI where the caller
+    /// explicitly knows that it has exclusive access to the inner data. Note that reading from
+    /// this pointer is UB until initialized or directly written to.
+    pub fn as_mut_ptr(&self) -> *mut T {
+        self.cell.as_mut_ptr()
+    }
+}
+
+impl<T, F: FnOnce() -> T, R: RelaxStrategy> Lazy<T, F, R> {
+    /// Forces the evaluation of this lazy value and
+    /// returns a reference to result. This is equivalent
+    /// to the `Deref` impl, but is explicit.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use spin::Lazy;
+    ///
+    /// let lazy = Lazy::new(|| 92);
+    ///
+    /// assert_eq!(Lazy::force(&lazy), &92);
+    /// assert_eq!(&*lazy, &92);
+    /// ```
+    pub fn force(this: &Self) -> &T {
+        this.cell.call_once(|| match this.init.take() {
+            Some(f) => f(),
+            None => panic!("Lazy instance has previously been poisoned"),
+        })
+    }
+}
+
+impl<T, F: FnOnce() -> T, R: RelaxStrategy> Deref for Lazy<T, F, R> {
+    type Target = T;
+
+    fn deref(&self) -> &T {
+        Self::force(self)
+    }
+}
+
+impl<T: Default, R> Default for Lazy<T, fn() -> T, R> {
+    /// Creates a new lazy value using `Default` as the initializing function.
+    fn default() -> Self {
+        Self::new(T::default)
+    }
+}
diff --git a/crates/spin/src/lib.rs b/crates/spin/src/lib.rs
new file mode 100644
index 0000000..50768bc
--- /dev/null
+++ b/crates/spin/src/lib.rs
@@ -0,0 +1,221 @@
+#![cfg_attr(all(not(feature = "std"), not(test)), no_std)]
+#![cfg_attr(docsrs, feature(doc_cfg))]
+#![deny(missing_docs)]
+
+//! This crate provides [spin-based](https://en.wikipedia.org/wiki/Spinlock) versions of the
+//! primitives in `std::sync` and `std::lazy`. Because synchronization is done through spinning,
+//! the primitives are suitable for use in `no_std` environments.
+//!
+//! # Features
+//!
+//! - `Mutex`, `RwLock`, `Once`/`SyncOnceCell`, and `SyncLazy` equivalents
+//!
+//! - Support for `no_std` environments
+//!
+//! - [`lock_api`](https://crates.io/crates/lock_api) compatibility
+//!
+//! - Upgradeable `RwLock` guards
+//!
+//! - Guards can be sent and shared between threads
+//!
+//! - Guard leaking
+//!
+//! - Ticket locks
+//!
+//! - Different strategies for dealing with contention
+//!
+//! # Relationship with `std::sync`
+//!
+//! While `spin` is not a drop-in replacement for `std::sync` (and
+//! [should not be considered as such](https://matklad.github.io/2020/01/02/spinlocks-considered-harmful.html))
+//! an effort is made to keep this crate reasonably consistent with `std::sync`.
+//!
+//! Many of the types defined in this crate have 'additional capabilities' when compared to `std::sync`:
+//!
+//! - Because spinning does not depend on the thread-driven model of `std::sync`, guards ([`MutexGuard`],
+//!   [`RwLockReadGuard`], [`RwLockWriteGuard`], etc.) may be sent and shared between threads.
+//!
+//! - [`RwLockUpgradableGuard`] supports being upgraded into a [`RwLockWriteGuard`].
+//!
+//! - Guards support [leaking](https://doc.rust-lang.org/nomicon/leaking.html).
+//!
+//! - [`Once`] owns the value returned by its `call_once` initializer.
+//!
+//! - [`RwLock`] supports counting readers and writers.
+//!
+//! Conversely, the types in this crate do not have some of the features `std::sync` has:
+//!
+//! - Locks do not track [panic poisoning](https://doc.rust-lang.org/nomicon/poisoning.html).
+//!
+//! ## Feature flags
+//!
+//! The crate comes with a few feature flags that you may wish to use.
+//!
+//! - `lock_api` enables support for [`lock_api`](https://crates.io/crates/lock_api)
+//!
+//! - `ticket_mutex` uses a ticket lock for the implementation of `Mutex`
+//!
+//! - `fair_mutex` enables a fairer implementation of `Mutex` that uses eventual fairness to avoid
+//!   starvation
+//!
+//! - `std` enables support for thread yielding instead of spinning
+
+#[cfg(any(test, feature = "std"))]
+extern crate core;
+
+#[cfg(feature = "portable_atomic")]
+extern crate portable_atomic;
+
+#[cfg(not(feature = "portable_atomic"))]
+use core::sync::atomic;
+#[cfg(feature = "portable_atomic")]
+use portable_atomic as atomic;
+
+#[cfg(feature = "barrier")]
+#[cfg_attr(docsrs, doc(cfg(feature = "barrier")))]
+pub mod barrier;
+#[cfg(feature = "lazy")]
+#[cfg_attr(docsrs, doc(cfg(feature = "lazy")))]
+pub mod lazy;
+#[cfg(feature = "mutex")]
+#[cfg_attr(docsrs, doc(cfg(feature = "mutex")))]
+pub mod mutex;
+#[cfg(feature = "once")]
+#[cfg_attr(docsrs, doc(cfg(feature = "once")))]
+pub mod once;
+pub mod relax;
+#[cfg(feature = "rwlock")]
+#[cfg_attr(docsrs, doc(cfg(feature = "rwlock")))]
+pub mod rwlock;
+
+#[cfg(feature = "mutex")]
+#[cfg_attr(docsrs, doc(cfg(feature = "mutex")))]
+pub use mutex::MutexGuard;
+#[cfg(feature = "std")]
+#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
+pub use relax::Yield;
+pub use relax::{RelaxStrategy, Spin};
+#[cfg(feature = "rwlock")]
+#[cfg_attr(docsrs, doc(cfg(feature = "rwlock")))]
+pub use rwlock::RwLockReadGuard;
+
+// Avoid confusing inference errors by aliasing away the relax strategy parameter. Users that need to use a different
+// relax strategy can do so by accessing the types through their fully-qualified path. This is a little bit horrible
+// but sadly adding a default type parameter is *still* a breaking change in Rust (for understandable reasons).
+
+/// A primitive that synchronizes the execution of multiple threads. See [`barrier::Barrier`] for documentation.
+///
+/// A note for advanced users: this alias exists to avoid subtle type inference errors due to the default relax
+/// strategy type parameter. If you need a non-default relax strategy, use the fully-qualified path.
+#[cfg(feature = "barrier")]
+#[cfg_attr(docsrs, doc(cfg(feature = "barrier")))]
+pub type Barrier = crate::barrier::Barrier;
+
+/// A value which is initialized on the first access. See [`lazy::Lazy`] for documentation.
+///
+/// A note for advanced users: this alias exists to avoid subtle type inference errors due to the default relax
+/// strategy type parameter. If you need a non-default relax strategy, use the fully-qualified path.
+#[cfg(feature = "lazy")]
+#[cfg_attr(docsrs, doc(cfg(feature = "lazy")))]
+pub type Lazy<T, F = fn() -> T> = crate::lazy::Lazy<T, F>;
+
+/// A primitive that synchronizes the execution of multiple threads. See [`mutex::Mutex`] for documentation.
+///
+/// A note for advanced users: this alias exists to avoid subtle type inference errors due to the default relax
+/// strategy type parameter. If you need a non-default relax strategy, use the fully-qualified path.
+#[cfg(feature = "mutex")]
+#[cfg_attr(docsrs, doc(cfg(feature = "mutex")))]
+pub type Mutex<T> = crate::mutex::Mutex<T>;
+
+/// A primitive that provides lazy one-time initialization. See [`once::Once`] for documentation.
+///
+/// A note for advanced users: this alias exists to avoid subtle type inference errors due to the default relax
+/// strategy type parameter. If you need a non-default relax strategy, use the fully-qualified path.
+#[cfg(feature = "once")]
+#[cfg_attr(docsrs, doc(cfg(feature = "once")))]
+pub type Once<T = ()> = crate::once::Once<T>;
+
+/// A lock that provides data access to either one writer or many readers. See [`rwlock::RwLock`] for documentation.
+///
+/// A note for advanced users: this alias exists to avoid subtle type inference errors due to the default relax
+/// strategy type parameter. If you need a non-default relax strategy, use the fully-qualified path.
+#[cfg(feature = "rwlock")]
+#[cfg_attr(docsrs, doc(cfg(feature = "rwlock")))]
+pub type RwLock<T> = crate::rwlock::RwLock<T>;
+
+/// A guard that provides immutable data access but can be upgraded to [`RwLockWriteGuard`]. See
+/// [`rwlock::RwLockUpgradableGuard`] for documentation.
+///
+/// A note for advanced users: this alias exists to avoid subtle type inference errors due to the default relax
+/// strategy type parameter. If you need a non-default relax strategy, use the fully-qualified path.
+#[cfg(feature = "rwlock")]
+#[cfg_attr(docsrs, doc(cfg(feature = "rwlock")))]
+pub type RwLockUpgradableGuard<'a, T> = crate::rwlock::RwLockUpgradableGuard<'a, T>;
+
+/// A guard that provides mutable data access. See [`rwlock::RwLockWriteGuard`] for documentation.
+///
+/// A note for advanced users: this alias exists to avoid subtle type inference errors due to the default relax
+/// strategy type parameter. If you need a non-default relax strategy, use the fully-qualified path.
+#[cfg(feature = "rwlock")]
+#[cfg_attr(docsrs, doc(cfg(feature = "rwlock")))]
+pub type RwLockWriteGuard<'a, T> = crate::rwlock::RwLockWriteGuard<'a, T>;
+
+/// Spin synchronisation primitives, but compatible with [`lock_api`](https://crates.io/crates/lock_api).
+#[cfg(feature = "lock_api")]
+#[cfg_attr(docsrs, doc(cfg(feature = "lock_api")))]
+pub mod lock_api {
+    /// A lock that provides mutually exclusive data access (compatible with [`lock_api`](https://crates.io/crates/lock_api)).
+    #[cfg(feature = "mutex")]
+    #[cfg_attr(docsrs, doc(cfg(feature = "mutex")))]
+    pub type Mutex<T> = lock_api_crate::Mutex<crate::Mutex<()>, T>;
+
+    /// A guard that provides mutable data access (compatible with [`lock_api`](https://crates.io/crates/lock_api)).
+    #[cfg(feature = "mutex")]
+    #[cfg_attr(docsrs, doc(cfg(feature = "mutex")))]
+    pub type MutexGuard<'a, T> = lock_api_crate::MutexGuard<'a, crate::Mutex<()>, T>;
+
+    /// A lock that provides data access to either one writer or many readers (compatible with [`lock_api`](https://crates.io/crates/lock_api)).
+    #[cfg(feature = "rwlock")]
+    #[cfg_attr(docsrs, doc(cfg(feature = "rwlock")))]
+    pub type RwLock<T> = lock_api_crate::RwLock<crate::RwLock<()>, T>;
+
+    /// A guard that provides immutable data access (compatible with [`lock_api`](https://crates.io/crates/lock_api)).
+    #[cfg(feature = "rwlock")]
+    #[cfg_attr(docsrs, doc(cfg(feature = "rwlock")))]
+    pub type RwLockReadGuard<'a, T> = lock_api_crate::RwLockReadGuard<'a, crate::RwLock<()>, T>;
+
+    /// A guard that provides mutable data access (compatible with [`lock_api`](https://crates.io/crates/lock_api)).
+    #[cfg(feature = "rwlock")]
+    #[cfg_attr(docsrs, doc(cfg(feature = "rwlock")))]
+    pub type RwLockWriteGuard<'a, T> = lock_api_crate::RwLockWriteGuard<'a, crate::RwLock<()>, T>;
+
+    /// A guard that provides immutable data access but can be upgraded to [`RwLockWriteGuard`] (compatible with [`lock_api`](https://crates.io/crates/lock_api)).
+    #[cfg(feature = "rwlock")]
+    #[cfg_attr(docsrs, doc(cfg(feature = "rwlock")))]
+    pub type RwLockUpgradableReadGuard<'a, T> =
+        lock_api_crate::RwLockUpgradableReadGuard<'a, crate::RwLock<()>, T>;
+}
+
+/// In the event of an invalid operation, it's best to abort the current process.
+#[cfg(feature = "fair_mutex")]
+fn abort() -> ! {
+    #[cfg(not(feature = "std"))]
+    {
+        // Panicking while panicking is defined by Rust to result in an abort.
+        struct Panic;
+
+        impl Drop for Panic {
+            fn drop(&mut self) {
+                panic!("aborting due to invalid operation");
+            }
+        }
+
+        let _panic = Panic;
+        panic!("aborting due to invalid operation");
+    }
+
+    #[cfg(feature = "std")]
+    {
+        std::process::abort();
+    }
+}
diff --git a/crates/spin/src/mutex.rs b/crates/spin/src/mutex.rs
new file mode 100644
index 0000000..e333d8a
--- /dev/null
+++ b/crates/spin/src/mutex.rs
@@ -0,0 +1,340 @@
+//! Locks that have the same behaviour as a mutex.
+//!
+//! The [`Mutex`] in the root of the crate, can be configured using the `ticket_mutex` feature.
+//! If it's enabled, [`TicketMutex`] and [`TicketMutexGuard`] will be re-exported as [`Mutex`]
+//! and [`MutexGuard`], otherwise the [`SpinMutex`] and guard will be re-exported.
+//!
+//! `ticket_mutex` is disabled by default.
+//!
+//! [`Mutex`]: ../struct.Mutex.html
+//! [`MutexGuard`]: ../struct.MutexGuard.html
+//! [`TicketMutex`]: ./struct.TicketMutex.html
+//! [`TicketMutexGuard`]: ./struct.TicketMutexGuard.html
+//! [`SpinMutex`]: ./struct.SpinMutex.html
+//! [`SpinMutexGuard`]: ./struct.SpinMutexGuard.html
+
+#[cfg(feature = "spin_mutex")]
+#[cfg_attr(docsrs, doc(cfg(feature = "spin_mutex")))]
+pub mod spin;
+#[cfg(feature = "spin_mutex")]
+#[cfg_attr(docsrs, doc(cfg(feature = "spin_mutex")))]
+pub use self::spin::{SpinMutex, SpinMutexGuard};
+
+#[cfg(feature = "ticket_mutex")]
+#[cfg_attr(docsrs, doc(cfg(feature = "ticket_mutex")))]
+pub mod ticket;
+#[cfg(feature = "ticket_mutex")]
+#[cfg_attr(docsrs, doc(cfg(feature = "ticket_mutex")))]
+pub use self::ticket::{TicketMutex, TicketMutexGuard};
+
+#[cfg(feature = "fair_mutex")]
+#[cfg_attr(docsrs, doc(cfg(feature = "fair_mutex")))]
+pub mod fair;
+#[cfg(feature = "fair_mutex")]
+#[cfg_attr(docsrs, doc(cfg(feature = "fair_mutex")))]
+pub use self::fair::{FairMutex, FairMutexGuard, Starvation};
+
+use crate::{RelaxStrategy, Spin};
+use core::{
+    fmt,
+    ops::{Deref, DerefMut},
+};
+
+#[cfg(all(not(feature = "spin_mutex"), not(feature = "use_ticket_mutex")))]
+compile_error!("The `mutex` feature flag was used (perhaps through another feature?) without either `spin_mutex` or `use_ticket_mutex`. One of these is required.");
+
+#[cfg(all(not(feature = "use_ticket_mutex"), feature = "spin_mutex"))]
+type InnerMutex<T, R> = self::spin::SpinMutex<T, R>;
+#[cfg(all(not(feature = "use_ticket_mutex"), feature = "spin_mutex"))]
+type InnerMutexGuard<'a, T> = self::spin::SpinMutexGuard<'a, T>;
+
+#[cfg(feature = "use_ticket_mutex")]
+type InnerMutex<T, R> = self::ticket::TicketMutex<T, R>;
+#[cfg(feature = "use_ticket_mutex")]
+type InnerMutexGuard<'a, T> = self::ticket::TicketMutexGuard<'a, T>;
+
+/// A spin-based lock providing mutually exclusive access to data.
+///
+/// The implementation uses either a ticket mutex or a regular spin mutex depending on whether the `spin_mutex` or
+/// `ticket_mutex` feature flag is enabled.
+///
+/// # Example
+///
+/// ```
+/// use spin;
+///
+/// let lock = spin::Mutex::new(0);
+///
+/// // Modify the data
+/// *lock.lock() = 2;
+///
+/// // Read the data
+/// let answer = *lock.lock();
+/// assert_eq!(answer, 2);
+/// ```
+///
+/// # Thread safety example
+///
+/// ```
+/// use spin;
+/// use std::sync::{Arc, Barrier};
+///
+/// let thread_count = 1000;
+/// let spin_mutex = Arc::new(spin::Mutex::new(0));
+///
+/// // We use a barrier to ensure the readout happens after all writing
+/// let barrier = Arc::new(Barrier::new(thread_count + 1));
+///
+/// # let mut ts = Vec::new();
+/// for _ in (0..thread_count) {
+///     let my_barrier = barrier.clone();
+///     let my_lock = spin_mutex.clone();
+/// # let t =
+///     std::thread::spawn(move || {
+///         let mut guard = my_lock.lock();
+///         *guard += 1;
+///
+///         // Release the lock to prevent a deadlock
+///         drop(guard);
+///         my_barrier.wait();
+///     });
+/// # ts.push(t);
+/// }
+///
+/// barrier.wait();
+///
+/// let answer = { *spin_mutex.lock() };
+/// assert_eq!(answer, thread_count);
+///
+/// # for t in ts {
+/// #     t.join().unwrap();
+/// # }
+/// ```
+pub struct Mutex<T: ?Sized, R = Spin> {
+    inner: InnerMutex<T, R>,
+}
+
+unsafe impl<T: ?Sized + Send, R> Sync for Mutex<T, R> {}
+unsafe impl<T: ?Sized + Send, R> Send for Mutex<T, R> {}
+
+/// A generic guard that will protect some data access and
+/// uses either a ticket lock or a normal spin mutex.
+///
+/// For more info see [`TicketMutexGuard`] or [`SpinMutexGuard`].
+///
+/// [`TicketMutexGuard`]: ./struct.TicketMutexGuard.html
+/// [`SpinMutexGuard`]: ./struct.SpinMutexGuard.html
+pub struct MutexGuard<'a, T: 'a + ?Sized> {
+    inner: InnerMutexGuard<'a, T>,
+}
+
+impl<T, R> Mutex<T, R> {
+    /// Creates a new [`Mutex`] wrapping the supplied data.
+    ///
+    /// # Example
+    ///
+    /// ```
+    /// use spin::Mutex;
+    ///
+    /// static MUTEX: Mutex<()> = Mutex::new(());
+    ///
+    /// fn demo() {
+    ///     let lock = MUTEX.lock();
+    ///     // do something with lock
+    ///     drop(lock);
+    /// }
+    /// ```
+    #[inline(always)]
+    pub const fn new(value: T) -> Self {
+        Self {
+            inner: InnerMutex::new(value),
+        }
+    }
+
+    /// Consumes this [`Mutex`] and unwraps the underlying data.
+    ///
+    /// # Example
+    ///
+    /// ```
+    /// let lock = spin::Mutex::new(42);
+    /// assert_eq!(42, lock.into_inner());
+    /// ```
+    #[inline(always)]
+    pub fn into_inner(self) -> T {
+        self.inner.into_inner()
+    }
+}
+
+impl<T: ?Sized, R: RelaxStrategy> Mutex<T, R> {
+    /// Locks the [`Mutex`] and returns a guard that permits access to the inner data.
+    ///
+    /// The returned value may be dereferenced for data access
+    /// and the lock will be dropped when the guard falls out of scope.
+    ///
+    /// ```
+    /// let lock = spin::Mutex::new(0);
+    /// {
+    ///     let mut data = lock.lock();
+    ///     // The lock is now locked and the data can be accessed
+    ///     *data += 1;
+    ///     // The lock is implicitly dropped at the end of the scope
+    /// }
+    /// ```
+    #[inline(always)]
+    pub fn lock(&self) -> MutexGuard<T> {
+        MutexGuard {
+            inner: self.inner.lock(),
+        }
+    }
+}
+
+impl<T: ?Sized, R> Mutex<T, R> {
+    /// Returns `true` if the lock is currently held.
+    ///
+    /// # Safety
+    ///
+    /// This function provides no synchronization guarantees and so its result should be considered 'out of date'
+    /// the instant it is called. Do not use it for synchronization purposes. However, it may be useful as a heuristic.
+    #[inline(always)]
+    pub fn is_locked(&self) -> bool {
+        self.inner.is_locked()
+    }
+
+    /// Force unlock this [`Mutex`].
+    ///
+    /// # Safety
+    ///
+    /// This is *extremely* unsafe if the lock is not held by the current
+    /// thread. However, this can be useful in some instances for exposing the
+    /// lock to FFI that doesn't know how to deal with RAII.
+    #[inline(always)]
+    pub unsafe fn force_unlock(&self) {
+        self.inner.force_unlock()
+    }
+
+    /// Try to lock this [`Mutex`], returning a lock guard if successful.
+    ///
+    /// # Example
+    ///
+    /// ```
+    /// let lock = spin::Mutex::new(42);
+    ///
+    /// let maybe_guard = lock.try_lock();
+    /// assert!(maybe_guard.is_some());
+    ///
+    /// // `maybe_guard` is still held, so the second call fails
+    /// let maybe_guard2 = lock.try_lock();
+    /// assert!(maybe_guard2.is_none());
+    /// ```
+    #[inline(always)]
+    pub fn try_lock(&self) -> Option<MutexGuard<T>> {
+        self.inner
+            .try_lock()
+            .map(|guard| MutexGuard { inner: guard })
+    }
+
+    /// Returns a mutable reference to the underlying data.
+    ///
+    /// Since this call borrows the [`Mutex`] mutably, and a mutable reference is guaranteed to be exclusive in Rust,
+    /// no actual locking needs to take place -- the mutable borrow statically guarantees no locks exist. As such,
+    /// this is a 'zero-cost' operation.
+    ///
+    /// # Example
+    ///
+    /// ```
+    /// let mut lock = spin::Mutex::new(0);
+    /// *lock.get_mut() = 10;
+    /// assert_eq!(*lock.lock(), 10);
+    /// ```
+    #[inline(always)]
+    pub fn get_mut(&mut self) -> &mut T {
+        self.inner.get_mut()
+    }
+}
+
+impl<T: ?Sized + fmt::Debug, R> fmt::Debug for Mutex<T, R> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        fmt::Debug::fmt(&self.inner, f)
+    }
+}
+
+impl<T: ?Sized + Default, R> Default for Mutex<T, R> {
+    fn default() -> Self {
+        Self::new(Default::default())
+    }
+}
+
+impl<T, R> From<T> for Mutex<T, R> {
+    fn from(data: T) -> Self {
+        Self::new(data)
+    }
+}
+
+impl<'a, T: ?Sized> MutexGuard<'a, T> {
+    /// Leak the lock guard, yielding a mutable reference to the underlying data.
+    ///
+    /// Note that this function will permanently lock the original [`Mutex`].
+    ///
+    /// ```
+    /// let mylock = spin::Mutex::new(0);
+    ///
+    /// let data: &mut i32 = spin::MutexGuard::leak(mylock.lock());
+    ///
+    /// *data = 1;
+    /// assert_eq!(*data, 1);
+    /// ```
+    #[inline(always)]
+    pub fn leak(this: Self) -> &'a mut T {
+        InnerMutexGuard::leak(this.inner)
+    }
+}
+
+impl<'a, T: ?Sized + fmt::Debug> fmt::Debug for MutexGuard<'a, T> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        fmt::Debug::fmt(&**self, f)
+    }
+}
+
+impl<'a, T: ?Sized + fmt::Display> fmt::Display for MutexGuard<'a, T> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        fmt::Display::fmt(&**self, f)
+    }
+}
+
+impl<'a, T: ?Sized> Deref for MutexGuard<'a, T> {
+    type Target = T;
+    fn deref(&self) -> &T {
+        &*self.inner
+    }
+}
+
+impl<'a, T: ?Sized> DerefMut for MutexGuard<'a, T> {
+    fn deref_mut(&mut self) -> &mut T {
+        &mut *self.inner
+    }
+}
+
+#[cfg(feature = "lock_api")]
+unsafe impl<R: RelaxStrategy> lock_api_crate::RawMutex for Mutex<(), R> {
+    type GuardMarker = lock_api_crate::GuardSend;
+
+    const INIT: Self = Self::new(());
+
+    fn lock(&self) {
+        // Prevent guard destructor running
+        core::mem::forget(Self::lock(self));
+    }
+
+    fn try_lock(&self) -> bool {
+        // Prevent guard destructor running
+        Self::try_lock(self).map(core::mem::forget).is_some()
+    }
+
+    unsafe fn unlock(&self) {
+        self.force_unlock();
+    }
+
+    fn is_locked(&self) -> bool {
+        self.inner.is_locked()
+    }
+}
diff --git a/crates/spin/src/mutex/fair.rs b/crates/spin/src/mutex/fair.rs
new file mode 100644
index 0000000..db07ad6
--- /dev/null
+++ b/crates/spin/src/mutex/fair.rs
@@ -0,0 +1,735 @@
+//! A spinning mutex with a fairer unlock algorithm.
+//!
+//! This mutex is similar to the `SpinMutex` in that it uses spinning to avoid
+//! context switches. However, it uses a fairer unlock algorithm that avoids
+//! starvation of threads that are waiting for the lock.
+
+use crate::{
+    atomic::{AtomicUsize, Ordering},
+    RelaxStrategy, Spin,
+};
+use core::{
+    cell::UnsafeCell,
+    fmt,
+    marker::PhantomData,
+    mem::ManuallyDrop,
+    ops::{Deref, DerefMut},
+};
+
+// The lowest bit of `lock` is used to indicate whether the mutex is locked or not. The rest of the bits are used to
+// store the number of starving threads.
+const LOCKED: usize = 1;
+const STARVED: usize = 2;
+
+/// Number chosen by fair roll of the dice, adjust as needed.
+const STARVATION_SPINS: usize = 1024;
+
+/// A [spin lock](https://en.m.wikipedia.org/wiki/Spinlock) providing mutually exclusive access to data, but with a fairer
+/// algorithm.
+///
+/// # Example
+///
+/// ```
+/// use spin;
+///
+/// let lock = spin::mutex::FairMutex::<_>::new(0);
+///
+/// // Modify the data
+/// *lock.lock() = 2;
+///
+/// // Read the data
+/// let answer = *lock.lock();
+/// assert_eq!(answer, 2);
+/// ```
+///
+/// # Thread safety example
+///
+/// ```
+/// use spin;
+/// use std::sync::{Arc, Barrier};
+///
+/// let thread_count = 1000;
+/// let spin_mutex = Arc::new(spin::mutex::FairMutex::<_>::new(0));
+///
+/// // We use a barrier to ensure the readout happens after all writing
+/// let barrier = Arc::new(Barrier::new(thread_count + 1));
+///
+/// for _ in (0..thread_count) {
+///     let my_barrier = barrier.clone();
+///     let my_lock = spin_mutex.clone();
+///     std::thread::spawn(move || {
+///         let mut guard = my_lock.lock();
+///         *guard += 1;
+///
+///         // Release the lock to prevent a deadlock
+///         drop(guard);
+///         my_barrier.wait();
+///     });
+/// }
+///
+/// barrier.wait();
+///
+/// let answer = { *spin_mutex.lock() };
+/// assert_eq!(answer, thread_count);
+/// ```
+pub struct FairMutex<T: ?Sized, R = Spin> {
+    phantom: PhantomData<R>,
+    pub(crate) lock: AtomicUsize,
+    data: UnsafeCell<T>,
+}
+
+/// A guard that provides mutable data access.
+///
+/// When the guard falls out of scope it will release the lock.
+pub struct FairMutexGuard<'a, T: ?Sized + 'a> {
+    lock: &'a AtomicUsize,
+    data: *mut T,
+}
+
+/// A handle that indicates that we have been trying to acquire the lock for a while.
+///
+/// This handle is used to prevent starvation.
+pub struct Starvation<'a, T: ?Sized + 'a, R> {
+    lock: &'a FairMutex<T, R>,
+}
+
+/// Indicates whether a lock was rejected due to the lock being held by another thread or due to starvation.
+#[derive(Debug)]
+pub enum LockRejectReason {
+    /// The lock was rejected due to the lock being held by another thread.
+    Locked,
+
+    /// The lock was rejected due to starvation.
+    Starved,
+}
+
+// Same unsafe impls as `std::sync::Mutex`
+unsafe impl<T: ?Sized + Send, R> Sync for FairMutex<T, R> {}
+unsafe impl<T: ?Sized + Send, R> Send for FairMutex<T, R> {}
+
+unsafe impl<T: ?Sized + Sync> Sync for FairMutexGuard<'_, T> {}
+unsafe impl<T: ?Sized + Send> Send for FairMutexGuard<'_, T> {}
+
+impl<T, R> FairMutex<T, R> {
+    /// Creates a new [`FairMutex`] wrapping the supplied data.
+    ///
+    /// # Example
+    ///
+    /// ```
+    /// use spin::mutex::FairMutex;
+    ///
+    /// static MUTEX: FairMutex<()> = FairMutex::<_>::new(());
+    ///
+    /// fn demo() {
+    ///     let lock = MUTEX.lock();
+    ///     // do something with lock
+    ///     drop(lock);
+    /// }
+    /// ```
+    #[inline(always)]
+    pub const fn new(data: T) -> Self {
+        FairMutex {
+            lock: AtomicUsize::new(0),
+            data: UnsafeCell::new(data),
+            phantom: PhantomData,
+        }
+    }
+
+    /// Consumes this [`FairMutex`] and unwraps the underlying data.
+    ///
+    /// # Example
+    ///
+    /// ```
+    /// let lock = spin::mutex::FairMutex::<_>::new(42);
+    /// assert_eq!(42, lock.into_inner());
+    /// ```
+    #[inline(always)]
+    pub fn into_inner(self) -> T {
+        // We know statically that there are no outstanding references to
+        // `self` so there's no need to lock.
+        let FairMutex { data, .. } = self;
+        data.into_inner()
+    }
+
+    /// Returns a mutable pointer to the underlying data.
+    ///
+    /// This is mostly meant to be used for applications which require manual unlocking, but where
+    /// storing both the lock and the pointer to the inner data gets inefficient.
+    ///
+    /// # Example
+    /// ```
+    /// let lock = spin::mutex::FairMutex::<_>::new(42);
+    ///
+    /// unsafe {
+    ///     core::mem::forget(lock.lock());
+    ///
+    ///     assert_eq!(lock.as_mut_ptr().read(), 42);
+    ///     lock.as_mut_ptr().write(58);
+    ///
+    ///     lock.force_unlock();
+    /// }
+    ///
+    /// assert_eq!(*lock.lock(), 58);
+    ///
+    /// ```
+    #[inline(always)]
+    pub fn as_mut_ptr(&self) -> *mut T {
+        self.data.get()
+    }
+}
+
+impl<T: ?Sized, R: RelaxStrategy> FairMutex<T, R> {
+    /// Locks the [`FairMutex`] and returns a guard that permits access to the inner data.
+    ///
+    /// The returned value may be dereferenced for data access
+    /// and the lock will be dropped when the guard falls out of scope.
+    ///
+    /// ```
+    /// let lock = spin::mutex::FairMutex::<_>::new(0);
+    /// {
+    ///     let mut data = lock.lock();
+    ///     // The lock is now locked and the data can be accessed
+    ///     *data += 1;
+    ///     // The lock is implicitly dropped at the end of the scope
+    /// }
+    /// ```
+    #[inline(always)]
+    pub fn lock(&self) -> FairMutexGuard<T> {
+        // Can fail to lock even if the spinlock is not locked. May be more efficient than `try_lock`
+        // when called in a loop.
+        let mut spins = 0;
+        while self
+            .lock
+            .compare_exchange_weak(0, 1, Ordering::Acquire, Ordering::Relaxed)
+            .is_err()
+        {
+            // Wait until the lock looks unlocked before retrying
+            while self.is_locked() {
+                R::relax();
+
+                // If we've been spinning for a while, switch to a fairer strategy that will prevent
+                // newer users from stealing our lock from us.
+                if spins > STARVATION_SPINS {
+                    return self.starve().lock();
+                }
+                spins += 1;
+            }
+        }
+
+        FairMutexGuard {
+            lock: &self.lock,
+            data: unsafe { &mut *self.data.get() },
+        }
+    }
+}
+
+impl<T: ?Sized, R> FairMutex<T, R> {
+    /// Returns `true` if the lock is currently held.
+    ///
+    /// # Safety
+    ///
+    /// This function provides no synchronization guarantees and so its result should be considered 'out of date'
+    /// the instant it is called. Do not use it for synchronization purposes. However, it may be useful as a heuristic.
+    #[inline(always)]
+    pub fn is_locked(&self) -> bool {
+        self.lock.load(Ordering::Relaxed) & LOCKED != 0
+    }
+
+    /// Force unlock this [`FairMutex`].
+    ///
+    /// # Safety
+    ///
+    /// This is *extremely* unsafe if the lock is not held by the current
+    /// thread. However, this can be useful in some instances for exposing the
+    /// lock to FFI that doesn't know how to deal with RAII.
+    #[inline(always)]
+    pub unsafe fn force_unlock(&self) {
+        self.lock.fetch_and(!LOCKED, Ordering::Release);
+    }
+
+    /// Try to lock this [`FairMutex`], returning a lock guard if successful.
+    ///
+    /// # Example
+    ///
+    /// ```
+    /// let lock = spin::mutex::FairMutex::<_>::new(42);
+    ///
+    /// let maybe_guard = lock.try_lock();
+    /// assert!(maybe_guard.is_some());
+    ///
+    /// // `maybe_guard` is still held, so the second call fails
+    /// let maybe_guard2 = lock.try_lock();
+    /// assert!(maybe_guard2.is_none());
+    /// ```
+    #[inline(always)]
+    pub fn try_lock(&self) -> Option<FairMutexGuard<T>> {
+        self.try_lock_starver().ok()
+    }
+
+    /// Tries to lock this [`FairMutex`] and returns a result that indicates whether the lock was
+    /// rejected due to a starver or not.
+    #[inline(always)]
+    pub fn try_lock_starver(&self) -> Result<FairMutexGuard<T>, LockRejectReason> {
+        match self
+            .lock
+            .compare_exchange(0, LOCKED, Ordering::Acquire, Ordering::Relaxed)
+            .unwrap_or_else(|x| x)
+        {
+            0 => Ok(FairMutexGuard {
+                lock: &self.lock,
+                data: unsafe { &mut *self.data.get() },
+            }),
+            LOCKED => Err(LockRejectReason::Locked),
+            _ => Err(LockRejectReason::Starved),
+        }
+    }
+
+    /// Indicates that the current user has been waiting for the lock for a while
+    /// and that the lock should yield to this thread over a newly arriving thread.
+    ///
+    /// # Example
+    ///
+    /// ```
+    /// let lock = spin::mutex::FairMutex::<_>::new(42);
+    ///
+    /// // Lock the mutex to simulate it being used by another user.
+    /// let guard1 = lock.lock();
+    ///
+    /// // Try to lock the mutex.
+    /// let guard2 = lock.try_lock();
+    /// assert!(guard2.is_none());
+    ///
+    /// // Wait for a while.
+    /// wait_for_a_while();
+    ///
+    /// // We are now starved, indicate as such.
+    /// let starve = lock.starve();
+    ///
+    /// // Once the lock is released, another user trying to lock it will
+    /// // fail.
+    /// drop(guard1);
+    /// let guard3 = lock.try_lock();
+    /// assert!(guard3.is_none());
+    ///
+    /// // However, we will be able to lock it.
+    /// let guard4 = starve.try_lock();
+    /// assert!(guard4.is_ok());
+    ///
+    /// # fn wait_for_a_while() {}
+    /// ```
+    pub fn starve(&self) -> Starvation<'_, T, R> {
+        // Add a new starver to the state.
+        if self.lock.fetch_add(STARVED, Ordering::Relaxed) > (core::isize::MAX - 1) as usize {
+            // In the event of a potential lock overflow, abort.
+            crate::abort();
+        }
+
+        Starvation { lock: self }
+    }
+
+    /// Returns a mutable reference to the underlying data.
+    ///
+    /// Since this call borrows the [`FairMutex`] mutably, and a mutable reference is guaranteed to be exclusive in
+    /// Rust, no actual locking needs to take place -- the mutable borrow statically guarantees no locks exist. As
+    /// such, this is a 'zero-cost' operation.
+    ///
+    /// # Example
+    ///
+    /// ```
+    /// let mut lock = spin::mutex::FairMutex::<_>::new(0);
+    /// *lock.get_mut() = 10;
+    /// assert_eq!(*lock.lock(), 10);
+    /// ```
+    #[inline(always)]
+    pub fn get_mut(&mut self) -> &mut T {
+        // We know statically that there are no other references to `self`, so
+        // there's no need to lock the inner mutex.
+        unsafe { &mut *self.data.get() }
+    }
+}
+
+impl<T: ?Sized + fmt::Debug, R> fmt::Debug for FairMutex<T, R> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        struct LockWrapper<'a, T: ?Sized + fmt::Debug>(Option<FairMutexGuard<'a, T>>);
+
+        impl<T: ?Sized + fmt::Debug> fmt::Debug for LockWrapper<'_, T> {
+            fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+                match &self.0 {
+                    Some(guard) => fmt::Debug::fmt(guard, f),
+                    None => f.write_str("<locked>"),
+                }
+            }
+        }
+
+        f.debug_struct("FairMutex")
+            .field("data", &LockWrapper(self.try_lock()))
+            .finish()
+    }
+}
+
+impl<T: ?Sized + Default, R> Default for FairMutex<T, R> {
+    fn default() -> Self {
+        Self::new(Default::default())
+    }
+}
+
+impl<T, R> From<T> for FairMutex<T, R> {
+    fn from(data: T) -> Self {
+        Self::new(data)
+    }
+}
+
+impl<'a, T: ?Sized> FairMutexGuard<'a, T> {
+    /// Leak the lock guard, yielding a mutable reference to the underlying data.
+    ///
+    /// Note that this function will permanently lock the original [`FairMutex`].
+    ///
+    /// ```
+    /// let mylock = spin::mutex::FairMutex::<_>::new(0);
+    ///
+    /// let data: &mut i32 = spin::mutex::FairMutexGuard::leak(mylock.lock());
+    ///
+    /// *data = 1;
+    /// assert_eq!(*data, 1);
+    /// ```
+    #[inline(always)]
+    pub fn leak(this: Self) -> &'a mut T {
+        // Use ManuallyDrop to avoid stacked-borrow invalidation
+        let mut this = ManuallyDrop::new(this);
+        // We know statically that only we are referencing data
+        unsafe { &mut *this.data }
+    }
+}
+
+impl<'a, T: ?Sized + fmt::Debug> fmt::Debug for FairMutexGuard<'a, T> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        fmt::Debug::fmt(&**self, f)
+    }
+}
+
+impl<'a, T: ?Sized + fmt::Display> fmt::Display for FairMutexGuard<'a, T> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        fmt::Display::fmt(&**self, f)
+    }
+}
+
+impl<'a, T: ?Sized> Deref for FairMutexGuard<'a, T> {
+    type Target = T;
+    fn deref(&self) -> &T {
+        // We know statically that only we are referencing data
+        unsafe { &*self.data }
+    }
+}
+
+impl<'a, T: ?Sized> DerefMut for FairMutexGuard<'a, T> {
+    fn deref_mut(&mut self) -> &mut T {
+        // We know statically that only we are referencing data
+        unsafe { &mut *self.data }
+    }
+}
+
+impl<'a, T: ?Sized> Drop for FairMutexGuard<'a, T> {
+    /// The dropping of the MutexGuard will release the lock it was created from.
+    fn drop(&mut self) {
+        self.lock.fetch_and(!LOCKED, Ordering::Release);
+    }
+}
+
+impl<'a, T: ?Sized, R> Starvation<'a, T, R> {
+    /// Attempts the lock the mutex if we are the only starving user.
+    ///
+    /// This allows another user to lock the mutex if they are starving as well.
+    pub fn try_lock_fair(self) -> Result<FairMutexGuard<'a, T>, Self> {
+        // Try to lock the mutex.
+        if self
+            .lock
+            .lock
+            .compare_exchange(
+                STARVED,
+                STARVED | LOCKED,
+                Ordering::Acquire,
+                Ordering::Relaxed,
+            )
+            .is_ok()
+        {
+            // We are the only starving user, lock the mutex.
+            Ok(FairMutexGuard {
+                lock: &self.lock.lock,
+                data: self.lock.data.get(),
+            })
+        } else {
+            // Another user is starving, fail.
+            Err(self)
+        }
+    }
+
+    /// Attempts to lock the mutex.
+    ///
+    /// If the lock is currently held by another thread, this will return `None`.
+    ///
+    /// # Example
+    ///
+    /// ```
+    /// let lock = spin::mutex::FairMutex::<_>::new(42);
+    ///
+    /// // Lock the mutex to simulate it being used by another user.
+    /// let guard1 = lock.lock();
+    ///
+    /// // Try to lock the mutex.
+    /// let guard2 = lock.try_lock();
+    /// assert!(guard2.is_none());
+    ///
+    /// // Wait for a while.
+    /// wait_for_a_while();
+    ///
+    /// // We are now starved, indicate as such.
+    /// let starve = lock.starve();
+    ///
+    /// // Once the lock is released, another user trying to lock it will
+    /// // fail.
+    /// drop(guard1);
+    /// let guard3 = lock.try_lock();
+    /// assert!(guard3.is_none());
+    ///
+    /// // However, we will be able to lock it.
+    /// let guard4 = starve.try_lock();
+    /// assert!(guard4.is_ok());
+    ///
+    /// # fn wait_for_a_while() {}
+    /// ```
+    pub fn try_lock(self) -> Result<FairMutexGuard<'a, T>, Self> {
+        // Try to lock the mutex.
+        if self.lock.lock.fetch_or(LOCKED, Ordering::Acquire) & LOCKED == 0 {
+            // We have successfully locked the mutex.
+            // By dropping `self` here, we decrement the starvation count.
+            Ok(FairMutexGuard {
+                lock: &self.lock.lock,
+                data: self.lock.data.get(),
+            })
+        } else {
+            Err(self)
+        }
+    }
+}
+
+impl<'a, T: ?Sized, R: RelaxStrategy> Starvation<'a, T, R> {
+    /// Locks the mutex.
+    pub fn lock(mut self) -> FairMutexGuard<'a, T> {
+        // Try to lock the mutex.
+        loop {
+            match self.try_lock() {
+                Ok(lock) => return lock,
+                Err(starve) => self = starve,
+            }
+
+            // Relax until the lock is released.
+            while self.lock.is_locked() {
+                R::relax();
+            }
+        }
+    }
+}
+
+impl<'a, T: ?Sized, R> Drop for Starvation<'a, T, R> {
+    fn drop(&mut self) {
+        // As there is no longer a user being starved, we decrement the starver count.
+        self.lock.lock.fetch_sub(STARVED, Ordering::Release);
+    }
+}
+
+impl fmt::Display for LockRejectReason {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        match self {
+            LockRejectReason::Locked => write!(f, "locked"),
+            LockRejectReason::Starved => write!(f, "starved"),
+        }
+    }
+}
+
+#[cfg(feature = "std")]
+impl std::error::Error for LockRejectReason {}
+
+#[cfg(feature = "lock_api")]
+unsafe impl<R: RelaxStrategy> lock_api_crate::RawMutex for FairMutex<(), R> {
+    type GuardMarker = lock_api_crate::GuardSend;
+
+    const INIT: Self = Self::new(());
+
+    fn lock(&self) {
+        // Prevent guard destructor running
+        core::mem::forget(Self::lock(self));
+    }
+
+    fn try_lock(&self) -> bool {
+        // Prevent guard destructor running
+        Self::try_lock(self).map(core::mem::forget).is_some()
+    }
+
+    unsafe fn unlock(&self) {
+        self.force_unlock();
+    }
+
+    fn is_locked(&self) -> bool {
+        Self::is_locked(self)
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use std::prelude::v1::*;
+
+    use std::sync::atomic::{AtomicUsize, Ordering};
+    use std::sync::mpsc::channel;
+    use std::sync::Arc;
+    use std::thread;
+
+    type FairMutex<T> = super::FairMutex<T>;
+
+    #[derive(Eq, PartialEq, Debug)]
+    struct NonCopy(i32);
+
+    #[test]
+    fn smoke() {
+        let m = FairMutex::<_>::new(());
+        drop(m.lock());
+        drop(m.lock());
+    }
+
+    #[test]
+    fn lots_and_lots() {
+        static M: FairMutex<()> = FairMutex::<_>::new(());
+        static mut CNT: u32 = 0;
+        const J: u32 = 1000;
+        const K: u32 = 3;
+
+        fn inc() {
+            for _ in 0..J {
+                unsafe {
+                    let _g = M.lock();
+                    CNT += 1;
+                }
+            }
+        }
+
+        let (tx, rx) = channel();
+        for _ in 0..K {
+            let tx2 = tx.clone();
+            thread::spawn(move || {
+                inc();
+                tx2.send(()).unwrap();
+            });
+            let tx2 = tx.clone();
+            thread::spawn(move || {
+                inc();
+                tx2.send(()).unwrap();
+            });
+        }
+
+        drop(tx);
+        for _ in 0..2 * K {
+            rx.recv().unwrap();
+        }
+        assert_eq!(unsafe { CNT }, J * K * 2);
+    }
+
+    #[test]
+    fn try_lock() {
+        let mutex = FairMutex::<_>::new(42);
+
+        // First lock succeeds
+        let a = mutex.try_lock();
+        assert_eq!(a.as_ref().map(|r| **r), Some(42));
+
+        // Additional lock fails
+        let b = mutex.try_lock();
+        assert!(b.is_none());
+
+        // After dropping lock, it succeeds again
+        ::core::mem::drop(a);
+        let c = mutex.try_lock();
+        assert_eq!(c.as_ref().map(|r| **r), Some(42));
+    }
+
+    #[test]
+    fn test_into_inner() {
+        let m = FairMutex::<_>::new(NonCopy(10));
+        assert_eq!(m.into_inner(), NonCopy(10));
+    }
+
+    #[test]
+    fn test_into_inner_drop() {
+        struct Foo(Arc<AtomicUsize>);
+        impl Drop for Foo {
+            fn drop(&mut self) {
+                self.0.fetch_add(1, Ordering::SeqCst);
+            }
+        }
+        let num_drops = Arc::new(AtomicUsize::new(0));
+        let m = FairMutex::<_>::new(Foo(num_drops.clone()));
+        assert_eq!(num_drops.load(Ordering::SeqCst), 0);
+        {
+            let _inner = m.into_inner();
+            assert_eq!(num_drops.load(Ordering::SeqCst), 0);
+        }
+        assert_eq!(num_drops.load(Ordering::SeqCst), 1);
+    }
+
+    #[test]
+    fn test_mutex_arc_nested() {
+        // Tests nested mutexes and access
+        // to underlying data.
+        let arc = Arc::new(FairMutex::<_>::new(1));
+        let arc2 = Arc::new(FairMutex::<_>::new(arc));
+        let (tx, rx) = channel();
+        let _t = thread::spawn(move || {
+            let lock = arc2.lock();
+            let lock2 = lock.lock();
+            assert_eq!(*lock2, 1);
+            tx.send(()).unwrap();
+        });
+        rx.recv().unwrap();
+    }
+
+    #[test]
+    fn test_mutex_arc_access_in_unwind() {
+        let arc = Arc::new(FairMutex::<_>::new(1));
+        let arc2 = arc.clone();
+        let _ = thread::spawn(move || -> () {
+            struct Unwinder {
+                i: Arc<FairMutex<i32>>,
+            }
+            impl Drop for Unwinder {
+                fn drop(&mut self) {
+                    *self.i.lock() += 1;
+                }
+            }
+            let _u = Unwinder { i: arc2 };
+            panic!();
+        })
+        .join();
+        let lock = arc.lock();
+        assert_eq!(*lock, 2);
+    }
+
+    #[test]
+    fn test_mutex_unsized() {
+        let mutex: &FairMutex<[i32]> = &FairMutex::<_>::new([1, 2, 3]);
+        {
+            let b = &mut *mutex.lock();
+            b[0] = 4;
+            b[2] = 5;
+        }
+        let comp: &[i32] = &[4, 2, 5];
+        assert_eq!(&*mutex.lock(), comp);
+    }
+
+    #[test]
+    fn test_mutex_force_lock() {
+        let lock = FairMutex::<_>::new(());
+        ::std::mem::forget(lock.lock());
+        unsafe {
+            lock.force_unlock();
+        }
+        assert!(lock.try_lock().is_some());
+    }
+}
diff --git a/crates/spin/src/mutex/spin.rs b/crates/spin/src/mutex/spin.rs
new file mode 100644
index 0000000..561d765
--- /dev/null
+++ b/crates/spin/src/mutex/spin.rs
@@ -0,0 +1,544 @@
+//! A naïve spinning mutex.
+//!
+//! Waiting threads hammer an atomic variable until it becomes available. Best-case latency is low, but worst-case
+//! latency is theoretically infinite.
+
+use crate::{
+    atomic::{AtomicBool, Ordering},
+    RelaxStrategy, Spin,
+};
+use core::{
+    cell::UnsafeCell,
+    fmt,
+    marker::PhantomData,
+    mem::ManuallyDrop,
+    ops::{Deref, DerefMut},
+};
+
+/// A [spin lock](https://en.m.wikipedia.org/wiki/Spinlock) providing mutually exclusive access to data.
+///
+/// # Example
+///
+/// ```
+/// use spin;
+///
+/// let lock = spin::mutex::SpinMutex::<_>::new(0);
+///
+/// // Modify the data
+/// *lock.lock() = 2;
+///
+/// // Read the data
+/// let answer = *lock.lock();
+/// assert_eq!(answer, 2);
+/// ```
+///
+/// # Thread safety example
+///
+/// ```
+/// use spin;
+/// use std::sync::{Arc, Barrier};
+///
+/// let thread_count = 1000;
+/// let spin_mutex = Arc::new(spin::mutex::SpinMutex::<_>::new(0));
+///
+/// // We use a barrier to ensure the readout happens after all writing
+/// let barrier = Arc::new(Barrier::new(thread_count + 1));
+///
+/// # let mut ts = Vec::new();
+/// for _ in (0..thread_count) {
+///     let my_barrier = barrier.clone();
+///     let my_lock = spin_mutex.clone();
+/// # let t =
+///     std::thread::spawn(move || {
+///         let mut guard = my_lock.lock();
+///         *guard += 1;
+///
+///         // Release the lock to prevent a deadlock
+///         drop(guard);
+///         my_barrier.wait();
+///     });
+/// # ts.push(t);
+/// }
+///
+/// barrier.wait();
+///
+/// let answer = { *spin_mutex.lock() };
+/// assert_eq!(answer, thread_count);
+///
+/// # for t in ts {
+/// #     t.join().unwrap();
+/// # }
+/// ```
+pub struct SpinMutex<T: ?Sized, R = Spin> {
+    phantom: PhantomData<R>,
+    pub(crate) lock: AtomicBool,
+    data: UnsafeCell<T>,
+}
+
+/// A guard that provides mutable data access.
+///
+/// When the guard falls out of scope it will release the lock.
+pub struct SpinMutexGuard<'a, T: ?Sized + 'a> {
+    lock: &'a AtomicBool,
+    data: *mut T,
+}
+
+// Same unsafe impls as `std::sync::Mutex`
+unsafe impl<T: ?Sized + Send, R> Sync for SpinMutex<T, R> {}
+unsafe impl<T: ?Sized + Send, R> Send for SpinMutex<T, R> {}
+
+unsafe impl<T: ?Sized + Sync> Sync for SpinMutexGuard<'_, T> {}
+unsafe impl<T: ?Sized + Send> Send for SpinMutexGuard<'_, T> {}
+
+impl<T, R> SpinMutex<T, R> {
+    /// Creates a new [`SpinMutex`] wrapping the supplied data.
+    ///
+    /// # Example
+    ///
+    /// ```
+    /// use spin::mutex::SpinMutex;
+    ///
+    /// static MUTEX: SpinMutex<()> = SpinMutex::<_>::new(());
+    ///
+    /// fn demo() {
+    ///     let lock = MUTEX.lock();
+    ///     // do something with lock
+    ///     drop(lock);
+    /// }
+    /// ```
+    #[inline(always)]
+    pub const fn new(data: T) -> Self {
+        SpinMutex {
+            lock: AtomicBool::new(false),
+            data: UnsafeCell::new(data),
+            phantom: PhantomData,
+        }
+    }
+
+    /// Consumes this [`SpinMutex`] and unwraps the underlying data.
+    ///
+    /// # Example
+    ///
+    /// ```
+    /// let lock = spin::mutex::SpinMutex::<_>::new(42);
+    /// assert_eq!(42, lock.into_inner());
+    /// ```
+    #[inline(always)]
+    pub fn into_inner(self) -> T {
+        // We know statically that there are no outstanding references to
+        // `self` so there's no need to lock.
+        let SpinMutex { data, .. } = self;
+        data.into_inner()
+    }
+
+    /// Returns a mutable pointer to the underlying data.
+    ///
+    /// This is mostly meant to be used for applications which require manual unlocking, but where
+    /// storing both the lock and the pointer to the inner data gets inefficient.
+    ///
+    /// # Example
+    /// ```
+    /// let lock = spin::mutex::SpinMutex::<_>::new(42);
+    ///
+    /// unsafe {
+    ///     core::mem::forget(lock.lock());
+    ///
+    ///     assert_eq!(lock.as_mut_ptr().read(), 42);
+    ///     lock.as_mut_ptr().write(58);
+    ///
+    ///     lock.force_unlock();
+    /// }
+    ///
+    /// assert_eq!(*lock.lock(), 58);
+    ///
+    /// ```
+    #[inline(always)]
+    pub fn as_mut_ptr(&self) -> *mut T {
+        self.data.get()
+    }
+}
+
+impl<T: ?Sized, R: RelaxStrategy> SpinMutex<T, R> {
+    /// Locks the [`SpinMutex`] and returns a guard that permits access to the inner data.
+    ///
+    /// The returned value may be dereferenced for data access
+    /// and the lock will be dropped when the guard falls out of scope.
+    ///
+    /// ```
+    /// let lock = spin::mutex::SpinMutex::<_>::new(0);
+    /// {
+    ///     let mut data = lock.lock();
+    ///     // The lock is now locked and the data can be accessed
+    ///     *data += 1;
+    ///     // The lock is implicitly dropped at the end of the scope
+    /// }
+    /// ```
+    #[inline(always)]
+    pub fn lock(&self) -> SpinMutexGuard<T> {
+        // Can fail to lock even if the spinlock is not locked. May be more efficient than `try_lock`
+        // when called in a loop.
+        while self
+            .lock
+            .compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed)
+            .is_err()
+        {
+            // Wait until the lock looks unlocked before retrying
+            while self.is_locked() {
+                R::relax();
+            }
+        }
+
+        SpinMutexGuard {
+            lock: &self.lock,
+            data: unsafe { &mut *self.data.get() },
+        }
+    }
+}
+
+impl<T: ?Sized, R> SpinMutex<T, R> {
+    /// Returns `true` if the lock is currently held.
+    ///
+    /// # Safety
+    ///
+    /// This function provides no synchronization guarantees and so its result should be considered 'out of date'
+    /// the instant it is called. Do not use it for synchronization purposes. However, it may be useful as a heuristic.
+    #[inline(always)]
+    pub fn is_locked(&self) -> bool {
+        self.lock.load(Ordering::Relaxed)
+    }
+
+    /// Force unlock this [`SpinMutex`].
+    ///
+    /// # Safety
+    ///
+    /// This is *extremely* unsafe if the lock is not held by the current
+    /// thread. However, this can be useful in some instances for exposing the
+    /// lock to FFI that doesn't know how to deal with RAII.
+    #[inline(always)]
+    pub unsafe fn force_unlock(&self) {
+        self.lock.store(false, Ordering::Release);
+    }
+
+    /// Try to lock this [`SpinMutex`], returning a lock guard if successful.
+    ///
+    /// # Example
+    ///
+    /// ```
+    /// let lock = spin::mutex::SpinMutex::<_>::new(42);
+    ///
+    /// let maybe_guard = lock.try_lock();
+    /// assert!(maybe_guard.is_some());
+    ///
+    /// // `maybe_guard` is still held, so the second call fails
+    /// let maybe_guard2 = lock.try_lock();
+    /// assert!(maybe_guard2.is_none());
+    /// ```
+    #[inline(always)]
+    pub fn try_lock(&self) -> Option<SpinMutexGuard<T>> {
+        // The reason for using a strong compare_exchange is explained here:
+        // https://github.com/Amanieu/parking_lot/pull/207#issuecomment-575869107
+        if self
+            .lock
+            .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
+            .is_ok()
+        {
+            Some(SpinMutexGuard {
+                lock: &self.lock,
+                data: unsafe { &mut *self.data.get() },
+            })
+        } else {
+            None
+        }
+    }
+
+    /// Returns a mutable reference to the underlying data.
+    ///
+    /// Since this call borrows the [`SpinMutex`] mutably, and a mutable reference is guaranteed to be exclusive in
+    /// Rust, no actual locking needs to take place -- the mutable borrow statically guarantees no locks exist. As
+    /// such, this is a 'zero-cost' operation.
+    ///
+    /// # Example
+    ///
+    /// ```
+    /// let mut lock = spin::mutex::SpinMutex::<_>::new(0);
+    /// *lock.get_mut() = 10;
+    /// assert_eq!(*lock.lock(), 10);
+    /// ```
+    #[inline(always)]
+    pub fn get_mut(&mut self) -> &mut T {
+        // We know statically that there are no other references to `self`, so
+        // there's no need to lock the inner mutex.
+        unsafe { &mut *self.data.get() }
+    }
+}
+
+impl<T: ?Sized + fmt::Debug, R> fmt::Debug for SpinMutex<T, R> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        match self.try_lock() {
+            Some(guard) => write!(f, "Mutex {{ data: ")
+                .and_then(|()| (&*guard).fmt(f))
+                .and_then(|()| write!(f, "}}")),
+            None => write!(f, "Mutex {{ <locked> }}"),
+        }
+    }
+}
+
+impl<T: ?Sized + Default, R> Default for SpinMutex<T, R> {
+    fn default() -> Self {
+        Self::new(Default::default())
+    }
+}
+
+impl<T, R> From<T> for SpinMutex<T, R> {
+    fn from(data: T) -> Self {
+        Self::new(data)
+    }
+}
+
+impl<'a, T: ?Sized> SpinMutexGuard<'a, T> {
+    /// Leak the lock guard, yielding a mutable reference to the underlying data.
+    ///
+    /// Note that this function will permanently lock the original [`SpinMutex`].
+    ///
+    /// ```
+    /// let mylock = spin::mutex::SpinMutex::<_>::new(0);
+    ///
+    /// let data: &mut i32 = spin::mutex::SpinMutexGuard::leak(mylock.lock());
+    ///
+    /// *data = 1;
+    /// assert_eq!(*data, 1);
+    /// ```
+    #[inline(always)]
+    pub fn leak(this: Self) -> &'a mut T {
+        // Use ManuallyDrop to avoid stacked-borrow invalidation
+        let mut this = ManuallyDrop::new(this);
+        // We know statically that only we are referencing data
+        unsafe { &mut *this.data }
+    }
+}
+
+impl<'a, T: ?Sized + fmt::Debug> fmt::Debug for SpinMutexGuard<'a, T> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        fmt::Debug::fmt(&**self, f)
+    }
+}
+
+impl<'a, T: ?Sized + fmt::Display> fmt::Display for SpinMutexGuard<'a, T> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        fmt::Display::fmt(&**self, f)
+    }
+}
+
+impl<'a, T: ?Sized> Deref for SpinMutexGuard<'a, T> {
+    type Target = T;
+    fn deref(&self) -> &T {
+        // We know statically that only we are referencing data
+        unsafe { &*self.data }
+    }
+}
+
+impl<'a, T: ?Sized> DerefMut for SpinMutexGuard<'a, T> {
+    fn deref_mut(&mut self) -> &mut T {
+        // We know statically that only we are referencing data
+        unsafe { &mut *self.data }
+    }
+}
+
+impl<'a, T: ?Sized> Drop for SpinMutexGuard<'a, T> {
+    /// The dropping of the MutexGuard will release the lock it was created from.
+    fn drop(&mut self) {
+        self.lock.store(false, Ordering::Release);
+    }
+}
+
+#[cfg(feature = "lock_api")]
+unsafe impl<R: RelaxStrategy> lock_api_crate::RawMutex for SpinMutex<(), R> {
+    type GuardMarker = lock_api_crate::GuardSend;
+
+    const INIT: Self = Self::new(());
+
+    fn lock(&self) {
+        // Prevent guard destructor running
+        core::mem::forget(Self::lock(self));
+    }
+
+    fn try_lock(&self) -> bool {
+        // Prevent guard destructor running
+        Self::try_lock(self).map(core::mem::forget).is_some()
+    }
+
+    unsafe fn unlock(&self) {
+        self.force_unlock();
+    }
+
+    fn is_locked(&self) -> bool {
+        Self::is_locked(self)
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use std::prelude::v1::*;
+
+    use std::sync::atomic::{AtomicUsize, Ordering};
+    use std::sync::mpsc::channel;
+    use std::sync::Arc;
+    use std::thread;
+
+    type SpinMutex<T> = super::SpinMutex<T>;
+
+    #[derive(Eq, PartialEq, Debug)]
+    struct NonCopy(i32);
+
+    #[test]
+    fn smoke() {
+        let m = SpinMutex::<_>::new(());
+        drop(m.lock());
+        drop(m.lock());
+    }
+
+    #[test]
+    fn lots_and_lots() {
+        static M: SpinMutex<()> = SpinMutex::<_>::new(());
+        static mut CNT: u32 = 0;
+        const J: u32 = 1000;
+        const K: u32 = 3;
+
+        fn inc() {
+            for _ in 0..J {
+                unsafe {
+                    let _g = M.lock();
+                    CNT += 1;
+                }
+            }
+        }
+
+        let (tx, rx) = channel();
+        let mut ts = Vec::new();
+        for _ in 0..K {
+            let tx2 = tx.clone();
+            ts.push(thread::spawn(move || {
+                inc();
+                tx2.send(()).unwrap();
+            }));
+            let tx2 = tx.clone();
+            ts.push(thread::spawn(move || {
+                inc();
+                tx2.send(()).unwrap();
+            }));
+        }
+
+        drop(tx);
+        for _ in 0..2 * K {
+            rx.recv().unwrap();
+        }
+        assert_eq!(unsafe { CNT }, J * K * 2);
+
+        for t in ts {
+            t.join().unwrap();
+        }
+    }
+
+    #[test]
+    fn try_lock() {
+        let mutex = SpinMutex::<_>::new(42);
+
+        // First lock succeeds
+        let a = mutex.try_lock();
+        assert_eq!(a.as_ref().map(|r| **r), Some(42));
+
+        // Additional lock fails
+        let b = mutex.try_lock();
+        assert!(b.is_none());
+
+        // After dropping lock, it succeeds again
+        ::core::mem::drop(a);
+        let c = mutex.try_lock();
+        assert_eq!(c.as_ref().map(|r| **r), Some(42));
+    }
+
+    #[test]
+    fn test_into_inner() {
+        let m = SpinMutex::<_>::new(NonCopy(10));
+        assert_eq!(m.into_inner(), NonCopy(10));
+    }
+
+    #[test]
+    fn test_into_inner_drop() {
+        struct Foo(Arc<AtomicUsize>);
+        impl Drop for Foo {
+            fn drop(&mut self) {
+                self.0.fetch_add(1, Ordering::SeqCst);
+            }
+        }
+        let num_drops = Arc::new(AtomicUsize::new(0));
+        let m = SpinMutex::<_>::new(Foo(num_drops.clone()));
+        assert_eq!(num_drops.load(Ordering::SeqCst), 0);
+        {
+            let _inner = m.into_inner();
+            assert_eq!(num_drops.load(Ordering::SeqCst), 0);
+        }
+        assert_eq!(num_drops.load(Ordering::SeqCst), 1);
+    }
+
+    #[test]
+    fn test_mutex_arc_nested() {
+        // Tests nested mutexes and access
+        // to underlying data.
+        let arc = Arc::new(SpinMutex::<_>::new(1));
+        let arc2 = Arc::new(SpinMutex::<_>::new(arc));
+        let (tx, rx) = channel();
+        let t = thread::spawn(move || {
+            let lock = arc2.lock();
+            let lock2 = lock.lock();
+            assert_eq!(*lock2, 1);
+            tx.send(()).unwrap();
+        });
+        rx.recv().unwrap();
+        t.join().unwrap();
+    }
+
+    #[test]
+    #[ignore = "Android uses panic_abort"]
+    fn test_mutex_arc_access_in_unwind() {
+        let arc = Arc::new(SpinMutex::<_>::new(1));
+        let arc2 = arc.clone();
+        let _ = thread::spawn(move || -> () {
+            struct Unwinder {
+                i: Arc<SpinMutex<i32>>,
+            }
+            impl Drop for Unwinder {
+                fn drop(&mut self) {
+                    *self.i.lock() += 1;
+                }
+            }
+            let _u = Unwinder { i: arc2 };
+            panic!();
+        })
+        .join();
+        let lock = arc.lock();
+        assert_eq!(*lock, 2);
+    }
+
+    #[test]
+    fn test_mutex_unsized() {
+        let mutex: &SpinMutex<[i32]> = &SpinMutex::<_>::new([1, 2, 3]);
+        {
+            let b = &mut *mutex.lock();
+            b[0] = 4;
+            b[2] = 5;
+        }
+        let comp: &[i32] = &[4, 2, 5];
+        assert_eq!(&*mutex.lock(), comp);
+    }
+
+    #[test]
+    fn test_mutex_force_lock() {
+        let lock = SpinMutex::<_>::new(());
+        ::std::mem::forget(lock.lock());
+        unsafe {
+            lock.force_unlock();
+        }
+        assert!(lock.try_lock().is_some());
+    }
+}
diff --git a/crates/spin/src/mutex/ticket.rs b/crates/spin/src/mutex/ticket.rs
new file mode 100644
index 0000000..01b905e
--- /dev/null
+++ b/crates/spin/src/mutex/ticket.rs
@@ -0,0 +1,538 @@
+//! A ticket-based mutex.
+//!
+//! Waiting threads take a 'ticket' from the lock in the order they arrive and gain access to the lock when their
+//! ticket is next in the queue. Best-case latency is slightly worse than a regular spinning mutex, but worse-case
+//! latency is infinitely better. Waiting threads simply need to wait for all threads that come before them in the
+//! queue to finish.
+
+use crate::{
+    atomic::{AtomicUsize, Ordering},
+    RelaxStrategy, Spin,
+};
+use core::{
+    cell::UnsafeCell,
+    fmt,
+    marker::PhantomData,
+    ops::{Deref, DerefMut},
+};
+
+/// A spin-based [ticket lock](https://en.wikipedia.org/wiki/Ticket_lock) providing mutually exclusive access to data.
+///
+/// A ticket lock is analogous to a queue management system for lock requests. When a thread tries to take a lock, it
+/// is assigned a 'ticket'. It then spins until its ticket becomes next in line. When the lock guard is released, the
+/// next ticket will be processed.
+///
+/// Ticket locks significantly reduce the worse-case performance of locking at the cost of slightly higher average-time
+/// overhead.
+///
+/// # Example
+///
+/// ```
+/// use spin;
+///
+/// let lock = spin::mutex::TicketMutex::<_>::new(0);
+///
+/// // Modify the data
+/// *lock.lock() = 2;
+///
+/// // Read the data
+/// let answer = *lock.lock();
+/// assert_eq!(answer, 2);
+/// ```
+///
+/// # Thread safety example
+///
+/// ```
+/// use spin;
+/// use std::sync::{Arc, Barrier};
+///
+/// let thread_count = 1000;
+/// let spin_mutex = Arc::new(spin::mutex::TicketMutex::<_>::new(0));
+///
+/// // We use a barrier to ensure the readout happens after all writing
+/// let barrier = Arc::new(Barrier::new(thread_count + 1));
+///
+/// for _ in (0..thread_count) {
+///     let my_barrier = barrier.clone();
+///     let my_lock = spin_mutex.clone();
+///     std::thread::spawn(move || {
+///         let mut guard = my_lock.lock();
+///         *guard += 1;
+///
+///         // Release the lock to prevent a deadlock
+///         drop(guard);
+///         my_barrier.wait();
+///     });
+/// }
+///
+/// barrier.wait();
+///
+/// let answer = { *spin_mutex.lock() };
+/// assert_eq!(answer, thread_count);
+/// ```
+pub struct TicketMutex<T: ?Sized, R = Spin> {
+    phantom: PhantomData<R>,
+    next_ticket: AtomicUsize,
+    next_serving: AtomicUsize,
+    data: UnsafeCell<T>,
+}
+
+/// A guard that protects some data.
+///
+/// When the guard is dropped, the next ticket will be processed.
+pub struct TicketMutexGuard<'a, T: ?Sized + 'a> {
+    next_serving: &'a AtomicUsize,
+    ticket: usize,
+    data: &'a mut T,
+}
+
+unsafe impl<T: ?Sized + Send, R> Sync for TicketMutex<T, R> {}
+unsafe impl<T: ?Sized + Send, R> Send for TicketMutex<T, R> {}
+
+impl<T, R> TicketMutex<T, R> {
+    /// Creates a new [`TicketMutex`] wrapping the supplied data.
+    ///
+    /// # Example
+    ///
+    /// ```
+    /// use spin::mutex::TicketMutex;
+    ///
+    /// static MUTEX: TicketMutex<()> = TicketMutex::<_>::new(());
+    ///
+    /// fn demo() {
+    ///     let lock = MUTEX.lock();
+    ///     // do something with lock
+    ///     drop(lock);
+    /// }
+    /// ```
+    #[inline(always)]
+    pub const fn new(data: T) -> Self {
+        Self {
+            phantom: PhantomData,
+            next_ticket: AtomicUsize::new(0),
+            next_serving: AtomicUsize::new(0),
+            data: UnsafeCell::new(data),
+        }
+    }
+
+    /// Consumes this [`TicketMutex`] and unwraps the underlying data.
+    ///
+    /// # Example
+    ///
+    /// ```
+    /// let lock = spin::mutex::TicketMutex::<_>::new(42);
+    /// assert_eq!(42, lock.into_inner());
+    /// ```
+    #[inline(always)]
+    pub fn into_inner(self) -> T {
+        self.data.into_inner()
+    }
+    /// Returns a mutable pointer to the underying data.
+    ///
+    /// This is mostly meant to be used for applications which require manual unlocking, but where
+    /// storing both the lock and the pointer to the inner data gets inefficient.
+    ///
+    /// # Example
+    /// ```
+    /// let lock = spin::mutex::SpinMutex::<_>::new(42);
+    ///
+    /// unsafe {
+    ///     core::mem::forget(lock.lock());
+    ///
+    ///     assert_eq!(lock.as_mut_ptr().read(), 42);
+    ///     lock.as_mut_ptr().write(58);
+    ///
+    ///     lock.force_unlock();
+    /// }
+    ///
+    /// assert_eq!(*lock.lock(), 58);
+    ///
+    /// ```
+    #[inline(always)]
+    pub fn as_mut_ptr(&self) -> *mut T {
+        self.data.get()
+    }
+}
+
+impl<T: ?Sized + fmt::Debug, R> fmt::Debug for TicketMutex<T, R> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        match self.try_lock() {
+            Some(guard) => write!(f, "Mutex {{ data: ")
+                .and_then(|()| (&*guard).fmt(f))
+                .and_then(|()| write!(f, "}}")),
+            None => write!(f, "Mutex {{ <locked> }}"),
+        }
+    }
+}
+
+impl<T: ?Sized, R: RelaxStrategy> TicketMutex<T, R> {
+    /// Locks the [`TicketMutex`] and returns a guard that permits access to the inner data.
+    ///
+    /// The returned data may be dereferenced for data access
+    /// and the lock will be dropped when the guard falls out of scope.
+    ///
+    /// ```
+    /// let lock = spin::mutex::TicketMutex::<_>::new(0);
+    /// {
+    ///     let mut data = lock.lock();
+    ///     // The lock is now locked and the data can be accessed
+    ///     *data += 1;
+    ///     // The lock is implicitly dropped at the end of the scope
+    /// }
+    /// ```
+    #[inline(always)]
+    pub fn lock(&self) -> TicketMutexGuard<T> {
+        let ticket = self.next_ticket.fetch_add(1, Ordering::Relaxed);
+
+        while self.next_serving.load(Ordering::Acquire) != ticket {
+            R::relax();
+        }
+
+        TicketMutexGuard {
+            next_serving: &self.next_serving,
+            ticket,
+            // Safety
+            // We know that we are the next ticket to be served,
+            // so there's no other thread accessing the data.
+            //
+            // Every other thread has another ticket number so it's
+            // definitely stuck in the spin loop above.
+            data: unsafe { &mut *self.data.get() },
+        }
+    }
+}
+
+impl<T: ?Sized, R> TicketMutex<T, R> {
+    /// Returns `true` if the lock is currently held.
+    ///
+    /// # Safety
+    ///
+    /// This function provides no synchronization guarantees and so its result should be considered 'out of date'
+    /// the instant it is called. Do not use it for synchronization purposes. However, it may be useful as a heuristic.
+    #[inline(always)]
+    pub fn is_locked(&self) -> bool {
+        let ticket = self.next_ticket.load(Ordering::Relaxed);
+        self.next_serving.load(Ordering::Relaxed) != ticket
+    }
+
+    /// Force unlock this [`TicketMutex`], by serving the next ticket.
+    ///
+    /// # Safety
+    ///
+    /// This is *extremely* unsafe if the lock is not held by the current
+    /// thread. However, this can be useful in some instances for exposing the
+    /// lock to FFI that doesn't know how to deal with RAII.
+    #[inline(always)]
+    pub unsafe fn force_unlock(&self) {
+        self.next_serving.fetch_add(1, Ordering::Release);
+    }
+
+    /// Try to lock this [`TicketMutex`], returning a lock guard if successful.
+    ///
+    /// # Example
+    ///
+    /// ```
+    /// let lock = spin::mutex::TicketMutex::<_>::new(42);
+    ///
+    /// let maybe_guard = lock.try_lock();
+    /// assert!(maybe_guard.is_some());
+    ///
+    /// // `maybe_guard` is still held, so the second call fails
+    /// let maybe_guard2 = lock.try_lock();
+    /// assert!(maybe_guard2.is_none());
+    /// ```
+    #[inline(always)]
+    pub fn try_lock(&self) -> Option<TicketMutexGuard<T>> {
+        let ticket = self
+            .next_ticket
+            .fetch_update(Ordering::SeqCst, Ordering::SeqCst, |ticket| {
+                if self.next_serving.load(Ordering::Acquire) == ticket {
+                    Some(ticket + 1)
+                } else {
+                    None
+                }
+            });
+
+        ticket.ok().map(|ticket| TicketMutexGuard {
+            next_serving: &self.next_serving,
+            ticket,
+            // Safety
+            // We have a ticket that is equal to the next_serving ticket, so we know:
+            // - that no other thread can have the same ticket id as this thread
+            // - that we are the next one to be served so we have exclusive access to the data
+            data: unsafe { &mut *self.data.get() },
+        })
+    }
+
+    /// Returns a mutable reference to the underlying data.
+    ///
+    /// Since this call borrows the [`TicketMutex`] mutably, and a mutable reference is guaranteed to be exclusive in
+    /// Rust, no actual locking needs to take place -- the mutable borrow statically guarantees no locks exist. As
+    /// such, this is a 'zero-cost' operation.
+    ///
+    /// # Example
+    ///
+    /// ```
+    /// let mut lock = spin::mutex::TicketMutex::<_>::new(0);
+    /// *lock.get_mut() = 10;
+    /// assert_eq!(*lock.lock(), 10);
+    /// ```
+    #[inline(always)]
+    pub fn get_mut(&mut self) -> &mut T {
+        // Safety:
+        // We know that there are no other references to `self`,
+        // so it's safe to return a exclusive reference to the data.
+        unsafe { &mut *self.data.get() }
+    }
+}
+
+impl<T: ?Sized + Default, R> Default for TicketMutex<T, R> {
+    fn default() -> Self {
+        Self::new(Default::default())
+    }
+}
+
+impl<T, R> From<T> for TicketMutex<T, R> {
+    fn from(data: T) -> Self {
+        Self::new(data)
+    }
+}
+
+impl<'a, T: ?Sized> TicketMutexGuard<'a, T> {
+    /// Leak the lock guard, yielding a mutable reference to the underlying data.
+    ///
+    /// Note that this function will permanently lock the original [`TicketMutex`].
+    ///
+    /// ```
+    /// let mylock = spin::mutex::TicketMutex::<_>::new(0);
+    ///
+    /// let data: &mut i32 = spin::mutex::TicketMutexGuard::leak(mylock.lock());
+    ///
+    /// *data = 1;
+    /// assert_eq!(*data, 1);
+    /// ```
+    #[inline(always)]
+    pub fn leak(this: Self) -> &'a mut T {
+        let data = this.data as *mut _; // Keep it in pointer form temporarily to avoid double-aliasing
+        core::mem::forget(this);
+        unsafe { &mut *data }
+    }
+}
+
+impl<'a, T: ?Sized + fmt::Debug> fmt::Debug for TicketMutexGuard<'a, T> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        fmt::Debug::fmt(&**self, f)
+    }
+}
+
+impl<'a, T: ?Sized + fmt::Display> fmt::Display for TicketMutexGuard<'a, T> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        fmt::Display::fmt(&**self, f)
+    }
+}
+
+impl<'a, T: ?Sized> Deref for TicketMutexGuard<'a, T> {
+    type Target = T;
+    fn deref(&self) -> &T {
+        self.data
+    }
+}
+
+impl<'a, T: ?Sized> DerefMut for TicketMutexGuard<'a, T> {
+    fn deref_mut(&mut self) -> &mut T {
+        self.data
+    }
+}
+
+impl<'a, T: ?Sized> Drop for TicketMutexGuard<'a, T> {
+    fn drop(&mut self) {
+        let new_ticket = self.ticket + 1;
+        self.next_serving.store(new_ticket, Ordering::Release);
+    }
+}
+
+#[cfg(feature = "lock_api")]
+unsafe impl<R: RelaxStrategy> lock_api_crate::RawMutex for TicketMutex<(), R> {
+    type GuardMarker = lock_api_crate::GuardSend;
+
+    const INIT: Self = Self::new(());
+
+    fn lock(&self) {
+        // Prevent guard destructor running
+        core::mem::forget(Self::lock(self));
+    }
+
+    fn try_lock(&self) -> bool {
+        // Prevent guard destructor running
+        Self::try_lock(self).map(core::mem::forget).is_some()
+    }
+
+    unsafe fn unlock(&self) {
+        self.force_unlock();
+    }
+
+    fn is_locked(&self) -> bool {
+        Self::is_locked(self)
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use std::prelude::v1::*;
+
+    use std::sync::atomic::{AtomicUsize, Ordering};
+    use std::sync::mpsc::channel;
+    use std::sync::Arc;
+    use std::thread;
+
+    type TicketMutex<T> = super::TicketMutex<T>;
+
+    #[derive(Eq, PartialEq, Debug)]
+    struct NonCopy(i32);
+
+    #[test]
+    fn smoke() {
+        let m = TicketMutex::<_>::new(());
+        drop(m.lock());
+        drop(m.lock());
+    }
+
+    #[test]
+    fn lots_and_lots() {
+        static M: TicketMutex<()> = TicketMutex::<_>::new(());
+        static mut CNT: u32 = 0;
+        const J: u32 = 1000;
+        const K: u32 = 3;
+
+        fn inc() {
+            for _ in 0..J {
+                unsafe {
+                    let _g = M.lock();
+                    CNT += 1;
+                }
+            }
+        }
+
+        let (tx, rx) = channel();
+        for _ in 0..K {
+            let tx2 = tx.clone();
+            thread::spawn(move || {
+                inc();
+                tx2.send(()).unwrap();
+            });
+            let tx2 = tx.clone();
+            thread::spawn(move || {
+                inc();
+                tx2.send(()).unwrap();
+            });
+        }
+
+        drop(tx);
+        for _ in 0..2 * K {
+            rx.recv().unwrap();
+        }
+        assert_eq!(unsafe { CNT }, J * K * 2);
+    }
+
+    #[test]
+    fn try_lock() {
+        let mutex = TicketMutex::<_>::new(42);
+
+        // First lock succeeds
+        let a = mutex.try_lock();
+        assert_eq!(a.as_ref().map(|r| **r), Some(42));
+
+        // Additional lock fails
+        let b = mutex.try_lock();
+        assert!(b.is_none());
+
+        // After dropping lock, it succeeds again
+        ::core::mem::drop(a);
+        let c = mutex.try_lock();
+        assert_eq!(c.as_ref().map(|r| **r), Some(42));
+    }
+
+    #[test]
+    fn test_into_inner() {
+        let m = TicketMutex::<_>::new(NonCopy(10));
+        assert_eq!(m.into_inner(), NonCopy(10));
+    }
+
+    #[test]
+    fn test_into_inner_drop() {
+        struct Foo(Arc<AtomicUsize>);
+        impl Drop for Foo {
+            fn drop(&mut self) {
+                self.0.fetch_add(1, Ordering::SeqCst);
+            }
+        }
+        let num_drops = Arc::new(AtomicUsize::new(0));
+        let m = TicketMutex::<_>::new(Foo(num_drops.clone()));
+        assert_eq!(num_drops.load(Ordering::SeqCst), 0);
+        {
+            let _inner = m.into_inner();
+            assert_eq!(num_drops.load(Ordering::SeqCst), 0);
+        }
+        assert_eq!(num_drops.load(Ordering::SeqCst), 1);
+    }
+
+    #[test]
+    fn test_mutex_arc_nested() {
+        // Tests nested mutexes and access
+        // to underlying data.
+        let arc = Arc::new(TicketMutex::<_>::new(1));
+        let arc2 = Arc::new(TicketMutex::<_>::new(arc));
+        let (tx, rx) = channel();
+        let _t = thread::spawn(move || {
+            let lock = arc2.lock();
+            let lock2 = lock.lock();
+            assert_eq!(*lock2, 1);
+            tx.send(()).unwrap();
+        });
+        rx.recv().unwrap();
+    }
+
+    #[test]
+    #[ignore = "Android uses panic_abort"]
+    fn test_mutex_arc_access_in_unwind() {
+        let arc = Arc::new(TicketMutex::<_>::new(1));
+        let arc2 = arc.clone();
+        let _ = thread::spawn(move || -> () {
+            struct Unwinder {
+                i: Arc<TicketMutex<i32>>,
+            }
+            impl Drop for Unwinder {
+                fn drop(&mut self) {
+                    *self.i.lock() += 1;
+                }
+            }
+            let _u = Unwinder { i: arc2 };
+            panic!();
+        })
+        .join();
+        let lock = arc.lock();
+        assert_eq!(*lock, 2);
+    }
+
+    #[test]
+    fn test_mutex_unsized() {
+        let mutex: &TicketMutex<[i32]> = &TicketMutex::<_>::new([1, 2, 3]);
+        {
+            let b = &mut *mutex.lock();
+            b[0] = 4;
+            b[2] = 5;
+        }
+        let comp: &[i32] = &[4, 2, 5];
+        assert_eq!(&*mutex.lock(), comp);
+    }
+
+    #[test]
+    fn is_locked() {
+        let mutex = TicketMutex::<_>::new(());
+        assert!(!mutex.is_locked());
+        let lock = mutex.lock();
+        assert!(mutex.is_locked());
+        drop(lock);
+        assert!(!mutex.is_locked());
+    }
+}
diff --git a/crates/spin/src/once.rs b/crates/spin/src/once.rs
new file mode 100644
index 0000000..b4202d4
--- /dev/null
+++ b/crates/spin/src/once.rs
@@ -0,0 +1,790 @@
+//! Synchronization primitives for one-time evaluation.
+
+use crate::{
+    atomic::{AtomicU8, Ordering},
+    RelaxStrategy, Spin,
+};
+use core::{cell::UnsafeCell, fmt, marker::PhantomData, mem::MaybeUninit};
+
+/// A primitive that provides lazy one-time initialization.
+///
+/// Unlike its `std::sync` equivalent, this is generalized such that the closure returns a
+/// value to be stored by the [`Once`] (`std::sync::Once` can be trivially emulated with
+/// `Once`).
+///
+/// Because [`Once::new`] is `const`, this primitive may be used to safely initialize statics.
+///
+/// # Examples
+///
+/// ```
+/// use spin;
+///
+/// static START: spin::Once = spin::Once::new();
+///
+/// START.call_once(|| {
+///     // run initialization here
+/// });
+/// ```
+pub struct Once<T = (), R = Spin> {
+    phantom: PhantomData<R>,
+    status: AtomicStatus,
+    data: UnsafeCell<MaybeUninit<T>>,
+}
+
+impl<T, R> Default for Once<T, R> {
+    fn default() -> Self {
+        Self::new()
+    }
+}
+
+impl<T: fmt::Debug, R> fmt::Debug for Once<T, R> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        match self.get() {
+            Some(s) => write!(f, "Once {{ data: ")
+                .and_then(|()| s.fmt(f))
+                .and_then(|()| write!(f, "}}")),
+            None => write!(f, "Once {{ <uninitialized> }}"),
+        }
+    }
+}
+
+// Same unsafe impls as `std::sync::RwLock`, because this also allows for
+// concurrent reads.
+unsafe impl<T: Send + Sync, R> Sync for Once<T, R> {}
+unsafe impl<T: Send, R> Send for Once<T, R> {}
+
+mod status {
+    use super::*;
+
+    // SAFETY: This structure has an invariant, namely that the inner atomic u8 must *always* have
+    // a value for which there exists a valid Status. This means that users of this API must only
+    // be allowed to load and store `Status`es.
+    #[repr(transparent)]
+    pub struct AtomicStatus(AtomicU8);
+
+    // Four states that a Once can be in, encoded into the lower bits of `status` in
+    // the Once structure.
+    #[repr(u8)]
+    #[derive(Clone, Copy, Debug, PartialEq)]
+    pub enum Status {
+        Incomplete = 0x00,
+        Running = 0x01,
+        Complete = 0x02,
+        Panicked = 0x03,
+    }
+    impl Status {
+        // Construct a status from an inner u8 integer.
+        //
+        // # Safety
+        //
+        // For this to be safe, the inner number must have a valid corresponding enum variant.
+        unsafe fn new_unchecked(inner: u8) -> Self {
+            core::mem::transmute(inner)
+        }
+    }
+
+    impl AtomicStatus {
+        #[inline(always)]
+        pub const fn new(status: Status) -> Self {
+            // SAFETY: We got the value directly from status, so transmuting back is fine.
+            Self(AtomicU8::new(status as u8))
+        }
+        #[inline(always)]
+        pub fn load(&self, ordering: Ordering) -> Status {
+            // SAFETY: We know that the inner integer must have been constructed from a Status in
+            // the first place.
+            unsafe { Status::new_unchecked(self.0.load(ordering)) }
+        }
+        #[inline(always)]
+        pub fn store(&self, status: Status, ordering: Ordering) {
+            // SAFETY: While not directly unsafe, this is safe because the value was retrieved from
+            // a status, thus making transmutation safe.
+            self.0.store(status as u8, ordering);
+        }
+        #[inline(always)]
+        pub fn compare_exchange(
+            &self,
+            old: Status,
+            new: Status,
+            success: Ordering,
+            failure: Ordering,
+        ) -> Result<Status, Status> {
+            match self
+                .0
+                .compare_exchange(old as u8, new as u8, success, failure)
+            {
+                // SAFETY: A compare exchange will always return a value that was later stored into
+                // the atomic u8, but due to the invariant that it must be a valid Status, we know
+                // that both Ok(_) and Err(_) will be safely transmutable.
+                Ok(ok) => Ok(unsafe { Status::new_unchecked(ok) }),
+                Err(err) => Err(unsafe { Status::new_unchecked(err) }),
+            }
+        }
+        #[inline(always)]
+        pub fn get_mut(&mut self) -> &mut Status {
+            // SAFETY: Since we know that the u8 inside must be a valid Status, we can safely cast
+            // it to a &mut Status.
+            unsafe { &mut *((self.0.get_mut() as *mut u8).cast::<Status>()) }
+        }
+    }
+}
+use self::status::{AtomicStatus, Status};
+
+impl<T, R: RelaxStrategy> Once<T, R> {
+    /// Performs an initialization routine once and only once. The given closure
+    /// will be executed if this is the first time `call_once` has been called,
+    /// and otherwise the routine will *not* be invoked.
+    ///
+    /// This method will block the calling thread if another initialization
+    /// routine is currently running.
+    ///
+    /// When this function returns, it is guaranteed that some initialization
+    /// has run and completed (it may not be the closure specified). The
+    /// returned pointer will point to the result from the closure that was
+    /// run.
+    ///
+    /// # Panics
+    ///
+    /// This function will panic if the [`Once`] previously panicked while attempting
+    /// to initialize. This is similar to the poisoning behaviour of `std::sync`'s
+    /// primitives.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use spin;
+    ///
+    /// static INIT: spin::Once<usize> = spin::Once::new();
+    ///
+    /// fn get_cached_val() -> usize {
+    ///     *INIT.call_once(expensive_computation)
+    /// }
+    ///
+    /// fn expensive_computation() -> usize {
+    ///     // ...
+    /// # 2
+    /// }
+    /// ```
+    pub fn call_once<F: FnOnce() -> T>(&self, f: F) -> &T {
+        match self.try_call_once(|| Ok::<T, core::convert::Infallible>(f())) {
+            Ok(x) => x,
+            Err(void) => match void {},
+        }
+    }
+
+    /// This method is similar to `call_once`, but allows the given closure to
+    /// fail, and lets the `Once` in a uninitialized state if it does.
+    ///
+    /// This method will block the calling thread if another initialization
+    /// routine is currently running.
+    ///
+    /// When this function returns without error, it is guaranteed that some
+    /// initialization has run and completed (it may not be the closure
+    /// specified). The returned reference will point to the result from the
+    /// closure that was run.
+    ///
+    /// # Panics
+    ///
+    /// This function will panic if the [`Once`] previously panicked while attempting
+    /// to initialize. This is similar to the poisoning behaviour of `std::sync`'s
+    /// primitives.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use spin;
+    ///
+    /// static INIT: spin::Once<usize> = spin::Once::new();
+    ///
+    /// fn get_cached_val() -> Result<usize, String> {
+    ///     INIT.try_call_once(expensive_fallible_computation).map(|x| *x)
+    /// }
+    ///
+    /// fn expensive_fallible_computation() -> Result<usize, String> {
+    ///     // ...
+    /// # Ok(2)
+    /// }
+    /// ```
+    pub fn try_call_once<F: FnOnce() -> Result<T, E>, E>(&self, f: F) -> Result<&T, E> {
+        if let Some(value) = self.get() {
+            Ok(value)
+        } else {
+            self.try_call_once_slow(f)
+        }
+    }
+
+    #[cold]
+    fn try_call_once_slow<F: FnOnce() -> Result<T, E>, E>(&self, f: F) -> Result<&T, E> {
+        loop {
+            let xchg = self.status.compare_exchange(
+                Status::Incomplete,
+                Status::Running,
+                Ordering::Acquire,
+                Ordering::Acquire,
+            );
+
+            match xchg {
+                Ok(_must_be_state_incomplete) => {
+                    // Impl is defined after the match for readability
+                }
+                Err(Status::Panicked) => panic!("Once panicked"),
+                Err(Status::Running) => match self.poll() {
+                    Some(v) => return Ok(v),
+                    None => continue,
+                },
+                Err(Status::Complete) => {
+                    return Ok(unsafe {
+                        // SAFETY: The status is Complete
+                        self.force_get()
+                    });
+                }
+                Err(Status::Incomplete) => {
+                    // The compare_exchange failed, so this shouldn't ever be reached,
+                    // however if we decide to switch to compare_exchange_weak it will
+                    // be safer to leave this here than hit an unreachable
+                    continue;
+                }
+            }
+
+            // The compare-exchange succeeded, so we shall initialize it.
+
+            // We use a guard (Finish) to catch panics caused by builder
+            let finish = Finish {
+                status: &self.status,
+            };
+            let val = match f() {
+                Ok(val) => val,
+                Err(err) => {
+                    // If an error occurs, clean up everything and leave.
+                    core::mem::forget(finish);
+                    self.status.store(Status::Incomplete, Ordering::Release);
+                    return Err(err);
+                }
+            };
+            unsafe {
+                // SAFETY:
+                // `UnsafeCell`/deref: currently the only accessor, mutably
+                // and immutably by cas exclusion.
+                // `write`: pointer comes from `MaybeUninit`.
+                (*self.data.get()).as_mut_ptr().write(val);
+            };
+            // If there were to be a panic with unwind enabled, the code would
+            // short-circuit and never reach the point where it writes the inner data.
+            // The destructor for Finish will run, and poison the Once to ensure that other
+            // threads accessing it do not exhibit unwanted behavior, if there were to be
+            // any inconsistency in data structures caused by the panicking thread.
+            //
+            // However, f() is expected in the general case not to panic. In that case, we
+            // simply forget the guard, bypassing its destructor. We could theoretically
+            // clear a flag instead, but this eliminates the call to the destructor at
+            // compile time, and unconditionally poisons during an eventual panic, if
+            // unwinding is enabled.
+            core::mem::forget(finish);
+
+            // SAFETY: Release is required here, so that all memory accesses done in the
+            // closure when initializing, become visible to other threads that perform Acquire
+            // loads.
+            //
+            // And, we also know that the changes this thread has done will not magically
+            // disappear from our cache, so it does not need to be AcqRel.
+            self.status.store(Status::Complete, Ordering::Release);
+
+            // This next line is mainly an optimization.
+            return unsafe { Ok(self.force_get()) };
+        }
+    }
+
+    /// Spins until the [`Once`] contains a value.
+    ///
+    /// Note that in releases prior to `0.7`, this function had the behaviour of [`Once::poll`].
+    ///
+    /// # Panics
+    ///
+    /// This function will panic if the [`Once`] previously panicked while attempting
+    /// to initialize. This is similar to the poisoning behaviour of `std::sync`'s
+    /// primitives.
+    pub fn wait(&self) -> &T {
+        loop {
+            match self.poll() {
+                Some(x) => break x,
+                None => R::relax(),
+            }
+        }
+    }
+
+    /// Like [`Once::get`], but will spin if the [`Once`] is in the process of being
+    /// initialized. If initialization has not even begun, `None` will be returned.
+    ///
+    /// Note that in releases prior to `0.7`, this function was named `wait`.
+    ///
+    /// # Panics
+    ///
+    /// This function will panic if the [`Once`] previously panicked while attempting
+    /// to initialize. This is similar to the poisoning behaviour of `std::sync`'s
+    /// primitives.
+    pub fn poll(&self) -> Option<&T> {
+        loop {
+            // SAFETY: Acquire is safe here, because if the status is COMPLETE, then we want to make
+            // sure that all memory accessed done while initializing that value, are visible when
+            // we return a reference to the inner data after this load.
+            match self.status.load(Ordering::Acquire) {
+                Status::Incomplete => return None,
+                Status::Running => R::relax(), // We spin
+                Status::Complete => return Some(unsafe { self.force_get() }),
+                Status::Panicked => panic!("Once previously poisoned by a panicked"),
+            }
+        }
+    }
+}
+
+impl<T, R> Once<T, R> {
+    /// Initialization constant of [`Once`].
+    #[allow(clippy::declare_interior_mutable_const)]
+    pub const INIT: Self = Self {
+        phantom: PhantomData,
+        status: AtomicStatus::new(Status::Incomplete),
+        data: UnsafeCell::new(MaybeUninit::uninit()),
+    };
+
+    /// Creates a new [`Once`].
+    pub const fn new() -> Self {
+        Self::INIT
+    }
+
+    /// Creates a new initialized [`Once`].
+    pub const fn initialized(data: T) -> Self {
+        Self {
+            phantom: PhantomData,
+            status: AtomicStatus::new(Status::Complete),
+            data: UnsafeCell::new(MaybeUninit::new(data)),
+        }
+    }
+
+    /// Retrieve a pointer to the inner data.
+    ///
+    /// While this method itself is safe, accessing the pointer before the [`Once`] has been
+    /// initialized is UB, unless this method has already been written to from a pointer coming
+    /// from this method.
+    pub fn as_mut_ptr(&self) -> *mut T {
+        // SAFETY:
+        // * MaybeUninit<T> always has exactly the same layout as T
+        self.data.get().cast::<T>()
+    }
+
+    /// Get a reference to the initialized instance. Must only be called once COMPLETE.
+    unsafe fn force_get(&self) -> &T {
+        // SAFETY:
+        // * `UnsafeCell`/inner deref: data never changes again
+        // * `MaybeUninit`/outer deref: data was initialized
+        &*(*self.data.get()).as_ptr()
+    }
+
+    /// Get a reference to the initialized instance. Must only be called once COMPLETE.
+    unsafe fn force_get_mut(&mut self) -> &mut T {
+        // SAFETY:
+        // * `UnsafeCell`/inner deref: data never changes again
+        // * `MaybeUninit`/outer deref: data was initialized
+        &mut *(*self.data.get()).as_mut_ptr()
+    }
+
+    /// Get a reference to the initialized instance. Must only be called once COMPLETE.
+    unsafe fn force_into_inner(self) -> T {
+        // SAFETY:
+        // * `UnsafeCell`/inner deref: data never changes again
+        // * `MaybeUninit`/outer deref: data was initialized
+        (*self.data.get()).as_ptr().read()
+    }
+
+    /// Returns a reference to the inner value if the [`Once`] has been initialized.
+    pub fn get(&self) -> Option<&T> {
+        // SAFETY: Just as with `poll`, Acquire is safe here because we want to be able to see the
+        // nonatomic stores done when initializing, once we have loaded and checked the status.
+        match self.status.load(Ordering::Acquire) {
+            Status::Complete => Some(unsafe { self.force_get() }),
+            _ => None,
+        }
+    }
+
+    /// Returns a reference to the inner value on the unchecked assumption that the  [`Once`] has been initialized.
+    ///
+    /// # Safety
+    ///
+    /// This is *extremely* unsafe if the `Once` has not already been initialized because a reference to uninitialized
+    /// memory will be returned, immediately triggering undefined behaviour (even if the reference goes unused).
+    /// However, this can be useful in some instances for exposing the `Once` to FFI or when the overhead of atomically
+    /// checking initialization is unacceptable and the `Once` has already been initialized.
+    pub unsafe fn get_unchecked(&self) -> &T {
+        debug_assert_eq!(
+            self.status.load(Ordering::SeqCst),
+            Status::Complete,
+            "Attempted to access an uninitialized Once. If this was run without debug checks, this would be undefined behaviour. This is a serious bug and you must fix it.",
+        );
+        self.force_get()
+    }
+
+    /// Returns a mutable reference to the inner value if the [`Once`] has been initialized.
+    ///
+    /// Because this method requires a mutable reference to the [`Once`], no synchronization
+    /// overhead is required to access the inner value. In effect, it is zero-cost.
+    pub fn get_mut(&mut self) -> Option<&mut T> {
+        match *self.status.get_mut() {
+            Status::Complete => Some(unsafe { self.force_get_mut() }),
+            _ => None,
+        }
+    }
+
+    /// Returns a mutable reference to the inner value
+    ///
+    /// # Safety
+    ///
+    /// This is *extremely* unsafe if the `Once` has not already been initialized because a reference to uninitialized
+    /// memory will be returned, immediately triggering undefined behaviour (even if the reference goes unused).
+    /// However, this can be useful in some instances for exposing the `Once` to FFI or when the overhead of atomically
+    /// checking initialization is unacceptable and the `Once` has already been initialized.
+    pub unsafe fn get_mut_unchecked(&mut self) -> &mut T {
+        debug_assert_eq!(
+            self.status.load(Ordering::SeqCst),
+            Status::Complete,
+            "Attempted to access an unintialized Once.  If this was to run without debug checks, this would be undefined behavior.  This is a serious bug and you must fix it.",
+        );
+        self.force_get_mut()
+    }
+
+    /// Returns a the inner value if the [`Once`] has been initialized.
+    ///
+    /// Because this method requires ownership of the [`Once`], no synchronization overhead
+    /// is required to access the inner value. In effect, it is zero-cost.
+    pub fn try_into_inner(mut self) -> Option<T> {
+        match *self.status.get_mut() {
+            Status::Complete => Some(unsafe { self.force_into_inner() }),
+            _ => None,
+        }
+    }
+
+    /// Returns a the inner value if the [`Once`] has been initialized.  
+    /// # Safety
+    ///
+    /// This is *extremely* unsafe if the `Once` has not already been initialized because a reference to uninitialized
+    /// memory will be returned, immediately triggering undefined behaviour (even if the reference goes unused)
+    /// This can be useful, if `Once` has already been initialized, and you want to bypass an
+    /// option check.
+    pub unsafe fn into_inner_unchecked(self) -> T {
+        debug_assert_eq!(
+            self.status.load(Ordering::SeqCst),
+            Status::Complete,
+            "Attempted to access an unintialized Once.  If this was to run without debug checks, this would be undefined behavior.  This is a serious bug and you must fix it.",
+        );
+        self.force_into_inner()
+    }
+
+    /// Checks whether the value has been initialized.
+    ///
+    /// This is done using [`Acquire`](core::sync::atomic::Ordering::Acquire) ordering, and
+    /// therefore it is safe to access the value directly via
+    /// [`get_unchecked`](Self::get_unchecked) if this returns true.
+    pub fn is_completed(&self) -> bool {
+        // TODO: Add a similar variant for Relaxed?
+        self.status.load(Ordering::Acquire) == Status::Complete
+    }
+}
+
+impl<T, R> From<T> for Once<T, R> {
+    fn from(data: T) -> Self {
+        Self::initialized(data)
+    }
+}
+
+impl<T, R> Drop for Once<T, R> {
+    fn drop(&mut self) {
+        // No need to do any atomic access here, we have &mut!
+        if *self.status.get_mut() == Status::Complete {
+            unsafe {
+                //TODO: Use MaybeUninit::assume_init_drop once stabilised
+                core::ptr::drop_in_place((*self.data.get()).as_mut_ptr());
+            }
+        }
+    }
+}
+
+struct Finish<'a> {
+    status: &'a AtomicStatus,
+}
+
+impl<'a> Drop for Finish<'a> {
+    fn drop(&mut self) {
+        // While using Relaxed here would most likely not be an issue, we use SeqCst anyway.
+        // This is mainly because panics are not meant to be fast at all, but also because if
+        // there were to be a compiler bug which reorders accesses within the same thread,
+        // where it should not, we want to be sure that the panic really is handled, and does
+        // not cause additional problems. SeqCst will therefore help guarding against such
+        // bugs.
+        self.status.store(Status::Panicked, Ordering::SeqCst);
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use std::prelude::v1::*;
+
+    use std::sync::atomic::AtomicU32;
+    use std::sync::mpsc::channel;
+    use std::sync::Arc;
+    use std::thread;
+
+    use super::*;
+
+    #[test]
+    fn smoke_once() {
+        static O: Once = Once::new();
+        let mut a = 0;
+        O.call_once(|| a += 1);
+        assert_eq!(a, 1);
+        O.call_once(|| a += 1);
+        assert_eq!(a, 1);
+    }
+
+    #[test]
+    fn smoke_once_value() {
+        static O: Once<usize> = Once::new();
+        let a = O.call_once(|| 1);
+        assert_eq!(*a, 1);
+        let b = O.call_once(|| 2);
+        assert_eq!(*b, 1);
+    }
+
+    #[test]
+    fn stampede_once() {
+        static O: Once = Once::new();
+        static mut RUN: bool = false;
+
+        let (tx, rx) = channel();
+        let mut ts = Vec::new();
+        for _ in 0..10 {
+            let tx = tx.clone();
+            ts.push(thread::spawn(move || {
+                for _ in 0..4 {
+                    thread::yield_now()
+                }
+                unsafe {
+                    O.call_once(|| {
+                        assert!(!RUN);
+                        RUN = true;
+                    });
+                    assert!(RUN);
+                }
+                tx.send(()).unwrap();
+            }));
+        }
+
+        unsafe {
+            O.call_once(|| {
+                assert!(!RUN);
+                RUN = true;
+            });
+            assert!(RUN);
+        }
+
+        for _ in 0..10 {
+            rx.recv().unwrap();
+        }
+
+        for t in ts {
+            t.join().unwrap();
+        }
+    }
+
+    #[test]
+    fn get() {
+        static INIT: Once<usize> = Once::new();
+
+        assert!(INIT.get().is_none());
+        INIT.call_once(|| 2);
+        assert_eq!(INIT.get().map(|r| *r), Some(2));
+    }
+
+    #[test]
+    fn get_no_wait() {
+        static INIT: Once<usize> = Once::new();
+
+        assert!(INIT.get().is_none());
+        let t = thread::spawn(move || {
+            INIT.call_once(|| {
+                thread::sleep(std::time::Duration::from_secs(3));
+                42
+            });
+        });
+        assert!(INIT.get().is_none());
+
+        t.join().unwrap();
+    }
+
+    #[test]
+    fn poll() {
+        static INIT: Once<usize> = Once::new();
+
+        assert!(INIT.poll().is_none());
+        INIT.call_once(|| 3);
+        assert_eq!(INIT.poll().map(|r| *r), Some(3));
+    }
+
+    #[test]
+    fn wait() {
+        static INIT: Once<usize> = Once::new();
+
+        let t = std::thread::spawn(|| {
+            assert_eq!(*INIT.wait(), 3);
+            assert!(INIT.is_completed());
+        });
+
+        for _ in 0..4 {
+            thread::yield_now()
+        }
+
+        assert!(INIT.poll().is_none());
+        INIT.call_once(|| 3);
+
+        t.join().unwrap();
+    }
+
+    #[test]
+    #[ignore = "Android uses panic_abort"]
+    fn panic() {
+        use std::panic;
+
+        static INIT: Once = Once::new();
+
+        // poison the once
+        let t = panic::catch_unwind(|| {
+            INIT.call_once(|| panic!());
+        });
+        assert!(t.is_err());
+
+        // poisoning propagates
+        let t = panic::catch_unwind(|| {
+            INIT.call_once(|| {});
+        });
+        assert!(t.is_err());
+    }
+
+    #[test]
+    fn init_constant() {
+        static O: Once = Once::INIT;
+        let mut a = 0;
+        O.call_once(|| a += 1);
+        assert_eq!(a, 1);
+        O.call_once(|| a += 1);
+        assert_eq!(a, 1);
+    }
+
+    static mut CALLED: bool = false;
+
+    struct DropTest {}
+
+    impl Drop for DropTest {
+        fn drop(&mut self) {
+            unsafe {
+                CALLED = true;
+            }
+        }
+    }
+
+    #[test]
+    fn try_call_once_err() {
+        let once = Once::<_, Spin>::new();
+        let shared = Arc::new((once, AtomicU32::new(0)));
+
+        let (tx, rx) = channel();
+
+        let t0 = {
+            let shared = shared.clone();
+            thread::spawn(move || {
+                let (once, called) = &*shared;
+
+                once.try_call_once(|| {
+                    called.fetch_add(1, Ordering::AcqRel);
+                    tx.send(()).unwrap();
+                    thread::sleep(std::time::Duration::from_millis(50));
+                    Err(())
+                })
+                .ok();
+            })
+        };
+
+        let t1 = {
+            let shared = shared.clone();
+            thread::spawn(move || {
+                rx.recv().unwrap();
+                let (once, called) = &*shared;
+                assert_eq!(
+                    called.load(Ordering::Acquire),
+                    1,
+                    "leader thread did not run first"
+                );
+
+                once.call_once(|| {
+                    called.fetch_add(1, Ordering::AcqRel);
+                });
+            })
+        };
+
+        t0.join().unwrap();
+        t1.join().unwrap();
+
+        assert_eq!(shared.1.load(Ordering::Acquire), 2);
+    }
+
+    // This is sort of two test cases, but if we write them as separate test methods
+    // they can be executed concurrently and then fail some small fraction of the
+    // time.
+    #[test]
+    fn drop_occurs_and_skip_uninit_drop() {
+        unsafe {
+            CALLED = false;
+        }
+
+        {
+            let once = Once::<_>::new();
+            once.call_once(|| DropTest {});
+        }
+
+        assert!(unsafe { CALLED });
+        // Now test that we skip drops for the uninitialized case.
+        unsafe {
+            CALLED = false;
+        }
+
+        let once = Once::<DropTest>::new();
+        drop(once);
+
+        assert!(unsafe { !CALLED });
+    }
+
+    #[test]
+    fn call_once_test() {
+        for _ in 0..20 {
+            use std::sync::atomic::AtomicUsize;
+            use std::sync::Arc;
+            use std::time::Duration;
+            let share = Arc::new(AtomicUsize::new(0));
+            let once = Arc::new(Once::<_, Spin>::new());
+            let mut hs = Vec::new();
+            for _ in 0..8 {
+                let h = thread::spawn({
+                    let share = share.clone();
+                    let once = once.clone();
+                    move || {
+                        thread::sleep(Duration::from_millis(10));
+                        once.call_once(|| {
+                            share.fetch_add(1, Ordering::SeqCst);
+                        });
+                    }
+                });
+                hs.push(h);
+            }
+            for h in hs {
+                h.join().unwrap();
+            }
+            assert_eq!(1, share.load(Ordering::SeqCst));
+        }
+    }
+}
diff --git a/crates/spin/src/relax.rs b/crates/spin/src/relax.rs
new file mode 100644
index 0000000..8842f80
--- /dev/null
+++ b/crates/spin/src/relax.rs
@@ -0,0 +1,61 @@
+//! Strategies that determine the behaviour of locks when encountering contention.
+
+/// A trait implemented by spinning relax strategies.
+pub trait RelaxStrategy {
+    /// Perform the relaxing operation during a period of contention.
+    fn relax();
+}
+
+/// A strategy that rapidly spins while informing the CPU that it should power down non-essential components via
+/// [`core::hint::spin_loop`].
+///
+/// Note that spinning is a 'dumb' strategy and most schedulers cannot correctly differentiate it from useful work,
+/// thereby misallocating even more CPU time to the spinning process. This is known as
+/// ['priority inversion'](https://matklad.github.io/2020/01/02/spinlocks-considered-harmful.html).
+///
+/// If you see signs that priority inversion is occurring, consider switching to [`Yield`] or, even better, not using a
+/// spinlock at all and opting for a proper scheduler-aware lock. Remember also that different targets, operating
+/// systems, schedulers, and even the same scheduler with different workloads will exhibit different behaviour. Just
+/// because priority inversion isn't occurring in your tests does not mean that it will not occur. Use a scheduler-
+/// aware lock if at all possible.
+pub struct Spin;
+
+impl RelaxStrategy for Spin {
+    #[inline(always)]
+    fn relax() {
+        // Use the deprecated spin_loop_hint() to ensure that we don't get
+        // a higher MSRV than we need to.
+        #[allow(deprecated)]
+        core::sync::atomic::spin_loop_hint();
+    }
+}
+
+/// A strategy that yields the current time slice to the scheduler in favour of other threads or processes.
+///
+/// This is generally used as a strategy for minimising power consumption and priority inversion on targets that have a
+/// standard library available. Note that such targets have scheduler-integrated concurrency primitives available, and
+/// you should generally use these instead, except in rare circumstances.
+#[cfg(feature = "std")]
+#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
+pub struct Yield;
+
+#[cfg(feature = "std")]
+#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
+impl RelaxStrategy for Yield {
+    #[inline(always)]
+    fn relax() {
+        std::thread::yield_now();
+    }
+}
+
+/// A strategy that rapidly spins, without telling the CPU to do any powering down.
+///
+/// You almost certainly do not want to use this. Use [`Spin`] instead. It exists for completeness and for targets
+/// that, for some reason, miscompile or do not support spin hint intrinsics despite attempting to generate code for
+/// them (i.e: this is a workaround for possible compiler bugs).
+pub struct Loop;
+
+impl RelaxStrategy for Loop {
+    #[inline(always)]
+    fn relax() {}
+}
diff --git a/crates/spin/src/rwlock.rs b/crates/spin/src/rwlock.rs
new file mode 100644
index 0000000..beae5c1
--- /dev/null
+++ b/crates/spin/src/rwlock.rs
@@ -0,0 +1,1166 @@
+//! A lock that provides data access to either one writer or many readers.
+
+use crate::{
+    atomic::{AtomicUsize, Ordering},
+    RelaxStrategy, Spin,
+};
+use core::{
+    cell::UnsafeCell,
+    fmt,
+    marker::PhantomData,
+    mem,
+    mem::ManuallyDrop,
+    ops::{Deref, DerefMut},
+};
+
+/// A lock that provides data access to either one writer or many readers.
+///
+/// This lock behaves in a similar manner to its namesake `std::sync::RwLock` but uses
+/// spinning for synchronisation instead. Unlike its namespace, this lock does not
+/// track lock poisoning.
+///
+/// This type of lock allows a number of readers or at most one writer at any
+/// point in time. The write portion of this lock typically allows modification
+/// of the underlying data (exclusive access) and the read portion of this lock
+/// typically allows for read-only access (shared access).
+///
+/// The type parameter `T` represents the data that this lock protects. It is
+/// required that `T` satisfies `Send` to be shared across tasks and `Sync` to
+/// allow concurrent access through readers. The RAII guards returned from the
+/// locking methods implement `Deref` (and `DerefMut` for the `write` methods)
+/// to allow access to the contained of the lock.
+///
+/// An [`RwLockUpgradableGuard`](RwLockUpgradableGuard) can be upgraded to a
+/// writable guard through the [`RwLockUpgradableGuard::upgrade`](RwLockUpgradableGuard::upgrade)
+/// [`RwLockUpgradableGuard::try_upgrade`](RwLockUpgradableGuard::try_upgrade) functions.
+/// Writable or upgradeable guards can be downgraded through their respective `downgrade`
+/// functions.
+///
+/// Based on Facebook's
+/// [`folly/RWSpinLock.h`](https://github.com/facebook/folly/blob/a0394d84f2d5c3e50ebfd0566f9d3acb52cfab5a/folly/synchronization/RWSpinLock.h).
+/// This implementation is unfair to writers - if the lock always has readers, then no writers will
+/// ever get a chance. Using an upgradeable lock guard can *somewhat* alleviate this issue as no
+/// new readers are allowed when an upgradeable guard is held, but upgradeable guards can be taken
+/// when there are existing readers. However if the lock is that highly contended and writes are
+/// crucial then this implementation may be a poor choice.
+///
+/// # Examples
+///
+/// ```
+/// use spin;
+///
+/// let lock = spin::RwLock::new(5);
+///
+/// // many reader locks can be held at once
+/// {
+///     let r1 = lock.read();
+///     let r2 = lock.read();
+///     assert_eq!(*r1, 5);
+///     assert_eq!(*r2, 5);
+/// } // read locks are dropped at this point
+///
+/// // only one write lock may be held, however
+/// {
+///     let mut w = lock.write();
+///     *w += 1;
+///     assert_eq!(*w, 6);
+/// } // write lock is dropped here
+/// ```
+pub struct RwLock<T: ?Sized, R = Spin> {
+    phantom: PhantomData<R>,
+    lock: AtomicUsize,
+    data: UnsafeCell<T>,
+}
+
+const READER: usize = 1 << 2;
+const UPGRADED: usize = 1 << 1;
+const WRITER: usize = 1;
+
+/// A guard that provides immutable data access.
+///
+/// When the guard falls out of scope it will decrement the read count,
+/// potentially releasing the lock.
+pub struct RwLockReadGuard<'a, T: 'a + ?Sized> {
+    lock: &'a AtomicUsize,
+    data: *const T,
+}
+
+/// A guard that provides mutable data access.
+///
+/// When the guard falls out of scope it will release the lock.
+pub struct RwLockWriteGuard<'a, T: 'a + ?Sized, R = Spin> {
+    phantom: PhantomData<R>,
+    inner: &'a RwLock<T, R>,
+    data: *mut T,
+}
+
+/// A guard that provides immutable data access but can be upgraded to [`RwLockWriteGuard`].
+///
+/// No writers or other upgradeable guards can exist while this is in scope. New reader
+/// creation is prevented (to alleviate writer starvation) but there may be existing readers
+/// when the lock is acquired.
+///
+/// When the guard falls out of scope it will release the lock.
+pub struct RwLockUpgradableGuard<'a, T: 'a + ?Sized, R = Spin> {
+    phantom: PhantomData<R>,
+    inner: &'a RwLock<T, R>,
+    data: *const T,
+}
+
+// Same unsafe impls as `std::sync::RwLock`
+unsafe impl<T: ?Sized + Send, R> Send for RwLock<T, R> {}
+unsafe impl<T: ?Sized + Send + Sync, R> Sync for RwLock<T, R> {}
+
+unsafe impl<T: ?Sized + Send + Sync, R> Send for RwLockWriteGuard<'_, T, R> {}
+unsafe impl<T: ?Sized + Send + Sync, R> Sync for RwLockWriteGuard<'_, T, R> {}
+
+unsafe impl<T: ?Sized + Sync> Send for RwLockReadGuard<'_, T> {}
+unsafe impl<T: ?Sized + Sync> Sync for RwLockReadGuard<'_, T> {}
+
+unsafe impl<T: ?Sized + Send + Sync, R> Send for RwLockUpgradableGuard<'_, T, R> {}
+unsafe impl<T: ?Sized + Send + Sync, R> Sync for RwLockUpgradableGuard<'_, T, R> {}
+
+impl<T, R> RwLock<T, R> {
+    /// Creates a new spinlock wrapping the supplied data.
+    ///
+    /// May be used statically:
+    ///
+    /// ```
+    /// use spin;
+    ///
+    /// static RW_LOCK: spin::RwLock<()> = spin::RwLock::new(());
+    ///
+    /// fn demo() {
+    ///     let lock = RW_LOCK.read();
+    ///     // do something with lock
+    ///     drop(lock);
+    /// }
+    /// ```
+    #[inline]
+    pub const fn new(data: T) -> Self {
+        RwLock {
+            phantom: PhantomData,
+            lock: AtomicUsize::new(0),
+            data: UnsafeCell::new(data),
+        }
+    }
+
+    /// Consumes this `RwLock`, returning the underlying data.
+    #[inline]
+    pub fn into_inner(self) -> T {
+        // We know statically that there are no outstanding references to
+        // `self` so there's no need to lock.
+        let RwLock { data, .. } = self;
+        data.into_inner()
+    }
+    /// Returns a mutable pointer to the underying data.
+    ///
+    /// This is mostly meant to be used for applications which require manual unlocking, but where
+    /// storing both the lock and the pointer to the inner data gets inefficient.
+    ///
+    /// While this is safe, writing to the data is undefined behavior unless the current thread has
+    /// acquired a write lock, and reading requires either a read or write lock.
+    ///
+    /// # Example
+    /// ```
+    /// let lock = spin::RwLock::new(42);
+    ///
+    /// unsafe {
+    ///     core::mem::forget(lock.write());
+    ///
+    ///     assert_eq!(lock.as_mut_ptr().read(), 42);
+    ///     lock.as_mut_ptr().write(58);
+    ///
+    ///     lock.force_write_unlock();
+    /// }
+    ///
+    /// assert_eq!(*lock.read(), 58);
+    ///
+    /// ```
+    #[inline(always)]
+    pub fn as_mut_ptr(&self) -> *mut T {
+        self.data.get()
+    }
+}
+
+impl<T: ?Sized, R: RelaxStrategy> RwLock<T, R> {
+    /// Locks this rwlock with shared read access, blocking the current thread
+    /// until it can be acquired.
+    ///
+    /// The calling thread will be blocked until there are no more writers which
+    /// hold the lock. There may be other readers currently inside the lock when
+    /// this method returns. This method does not provide any guarantees with
+    /// respect to the ordering of whether contentious readers or writers will
+    /// acquire the lock first.
+    ///
+    /// Returns an RAII guard which will release this thread's shared access
+    /// once it is dropped.
+    ///
+    /// ```
+    /// let mylock = spin::RwLock::new(0);
+    /// {
+    ///     let mut data = mylock.read();
+    ///     // The lock is now locked and the data can be read
+    ///     println!("{}", *data);
+    ///     // The lock is dropped
+    /// }
+    /// ```
+    #[inline]
+    pub fn read(&self) -> RwLockReadGuard<T> {
+        loop {
+            match self.try_read() {
+                Some(guard) => return guard,
+                None => R::relax(),
+            }
+        }
+    }
+
+    /// Lock this rwlock with exclusive write access, blocking the current
+    /// thread until it can be acquired.
+    ///
+    /// This function will not return while other writers or other readers
+    /// currently have access to the lock.
+    ///
+    /// Returns an RAII guard which will drop the write access of this rwlock
+    /// when dropped.
+    ///
+    /// ```
+    /// let mylock = spin::RwLock::new(0);
+    /// {
+    ///     let mut data = mylock.write();
+    ///     // The lock is now locked and the data can be written
+    ///     *data += 1;
+    ///     // The lock is dropped
+    /// }
+    /// ```
+    #[inline]
+    pub fn write(&self) -> RwLockWriteGuard<T, R> {
+        loop {
+            match self.try_write_internal(false) {
+                Some(guard) => return guard,
+                None => R::relax(),
+            }
+        }
+    }
+
+    /// Obtain a readable lock guard that can later be upgraded to a writable lock guard.
+    /// Upgrades can be done through the [`RwLockUpgradableGuard::upgrade`](RwLockUpgradableGuard::upgrade) method.
+    #[inline]
+    pub fn upgradeable_read(&self) -> RwLockUpgradableGuard<T, R> {
+        loop {
+            match self.try_upgradeable_read() {
+                Some(guard) => return guard,
+                None => R::relax(),
+            }
+        }
+    }
+}
+
+impl<T: ?Sized, R> RwLock<T, R> {
+    // Acquire a read lock, returning the new lock value.
+    fn acquire_reader(&self) -> usize {
+        // An arbitrary cap that allows us to catch overflows long before they happen
+        const MAX_READERS: usize = core::usize::MAX / READER / 2;
+
+        let value = self.lock.fetch_add(READER, Ordering::Acquire);
+
+        if value > MAX_READERS * READER {
+            self.lock.fetch_sub(READER, Ordering::Relaxed);
+            panic!("Too many lock readers, cannot safely proceed");
+        } else {
+            value
+        }
+    }
+
+    /// Attempt to acquire this lock with shared read access.
+    ///
+    /// This function will never block and will return immediately if `read`
+    /// would otherwise succeed. Returns `Some` of an RAII guard which will
+    /// release the shared access of this thread when dropped, or `None` if the
+    /// access could not be granted. This method does not provide any
+    /// guarantees with respect to the ordering of whether contentious readers
+    /// or writers will acquire the lock first.
+    ///
+    /// ```
+    /// let mylock = spin::RwLock::new(0);
+    /// {
+    ///     match mylock.try_read() {
+    ///         Some(data) => {
+    ///             // The lock is now locked and the data can be read
+    ///             println!("{}", *data);
+    ///             // The lock is dropped
+    ///         },
+    ///         None => (), // no cigar
+    ///     };
+    /// }
+    /// ```
+    #[inline]
+    pub fn try_read(&self) -> Option<RwLockReadGuard<T>> {
+        let value = self.acquire_reader();
+
+        // We check the UPGRADED bit here so that new readers are prevented when an UPGRADED lock is held.
+        // This helps reduce writer starvation.
+        if value & (WRITER | UPGRADED) != 0 {
+            // Lock is taken, undo.
+            self.lock.fetch_sub(READER, Ordering::Release);
+            None
+        } else {
+            Some(RwLockReadGuard {
+                lock: &self.lock,
+                data: unsafe { &*self.data.get() },
+            })
+        }
+    }
+
+    /// Return the number of readers that currently hold the lock (including upgradable readers).
+    ///
+    /// # Safety
+    ///
+    /// This function provides no synchronization guarantees and so its result should be considered 'out of date'
+    /// the instant it is called. Do not use it for synchronization purposes. However, it may be useful as a heuristic.
+    pub fn reader_count(&self) -> usize {
+        let state = self.lock.load(Ordering::Relaxed);
+        state / READER + (state & UPGRADED) / UPGRADED
+    }
+
+    /// Return the number of writers that currently hold the lock.
+    ///
+    /// Because [`RwLock`] guarantees exclusive mutable access, this function may only return either `0` or `1`.
+    ///
+    /// # Safety
+    ///
+    /// This function provides no synchronization guarantees and so its result should be considered 'out of date'
+    /// the instant it is called. Do not use it for synchronization purposes. However, it may be useful as a heuristic.
+    pub fn writer_count(&self) -> usize {
+        (self.lock.load(Ordering::Relaxed) & WRITER) / WRITER
+    }
+
+    /// Force decrement the reader count.
+    ///
+    /// # Safety
+    ///
+    /// This is *extremely* unsafe if there are outstanding `RwLockReadGuard`s
+    /// live, or if called more times than `read` has been called, but can be
+    /// useful in FFI contexts where the caller doesn't know how to deal with
+    /// RAII. The underlying atomic operation uses `Ordering::Release`.
+    #[inline]
+    pub unsafe fn force_read_decrement(&self) {
+        debug_assert!(self.lock.load(Ordering::Relaxed) & !WRITER > 0);
+        self.lock.fetch_sub(READER, Ordering::Release);
+    }
+
+    /// Force unlock exclusive write access.
+    ///
+    /// # Safety
+    ///
+    /// This is *extremely* unsafe if there are outstanding `RwLockWriteGuard`s
+    /// live, or if called when there are current readers, but can be useful in
+    /// FFI contexts where the caller doesn't know how to deal with RAII. The
+    /// underlying atomic operation uses `Ordering::Release`.
+    #[inline]
+    pub unsafe fn force_write_unlock(&self) {
+        debug_assert_eq!(self.lock.load(Ordering::Relaxed) & !(WRITER | UPGRADED), 0);
+        self.lock.fetch_and(!(WRITER | UPGRADED), Ordering::Release);
+    }
+
+    #[inline(always)]
+    fn try_write_internal(&self, strong: bool) -> Option<RwLockWriteGuard<T, R>> {
+        if compare_exchange(
+            &self.lock,
+            0,
+            WRITER,
+            Ordering::Acquire,
+            Ordering::Relaxed,
+            strong,
+        )
+        .is_ok()
+        {
+            Some(RwLockWriteGuard {
+                phantom: PhantomData,
+                inner: self,
+                data: unsafe { &mut *self.data.get() },
+            })
+        } else {
+            None
+        }
+    }
+
+    /// Attempt to lock this rwlock with exclusive write access.
+    ///
+    /// This function does not ever block, and it will return `None` if a call
+    /// to `write` would otherwise block. If successful, an RAII guard is
+    /// returned.
+    ///
+    /// ```
+    /// let mylock = spin::RwLock::new(0);
+    /// {
+    ///     match mylock.try_write() {
+    ///         Some(mut data) => {
+    ///             // The lock is now locked and the data can be written
+    ///             *data += 1;
+    ///             // The lock is implicitly dropped
+    ///         },
+    ///         None => (), // no cigar
+    ///     };
+    /// }
+    /// ```
+    #[inline]
+    pub fn try_write(&self) -> Option<RwLockWriteGuard<T, R>> {
+        self.try_write_internal(true)
+    }
+
+    /// Tries to obtain an upgradeable lock guard.
+    #[inline]
+    pub fn try_upgradeable_read(&self) -> Option<RwLockUpgradableGuard<T, R>> {
+        if self.lock.fetch_or(UPGRADED, Ordering::Acquire) & (WRITER | UPGRADED) == 0 {
+            Some(RwLockUpgradableGuard {
+                phantom: PhantomData,
+                inner: self,
+                data: unsafe { &*self.data.get() },
+            })
+        } else {
+            // We can't unflip the UPGRADED bit back just yet as there is another upgradeable or write lock.
+            // When they unlock, they will clear the bit.
+            None
+        }
+    }
+
+    /// Returns a mutable reference to the underlying data.
+    ///
+    /// Since this call borrows the `RwLock` mutably, no actual locking needs to
+    /// take place -- the mutable borrow statically guarantees no locks exist.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// let mut lock = spin::RwLock::new(0);
+    /// *lock.get_mut() = 10;
+    /// assert_eq!(*lock.read(), 10);
+    /// ```
+    pub fn get_mut(&mut self) -> &mut T {
+        // We know statically that there are no other references to `self`, so
+        // there's no need to lock the inner lock.
+        unsafe { &mut *self.data.get() }
+    }
+}
+
+impl<T: ?Sized + fmt::Debug, R> fmt::Debug for RwLock<T, R> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        match self.try_read() {
+            Some(guard) => write!(f, "RwLock {{ data: ")
+                .and_then(|()| (&*guard).fmt(f))
+                .and_then(|()| write!(f, "}}")),
+            None => write!(f, "RwLock {{ <locked> }}"),
+        }
+    }
+}
+
+impl<T: ?Sized + Default, R> Default for RwLock<T, R> {
+    fn default() -> Self {
+        Self::new(Default::default())
+    }
+}
+
+impl<T, R> From<T> for RwLock<T, R> {
+    fn from(data: T) -> Self {
+        Self::new(data)
+    }
+}
+
+impl<'rwlock, T: ?Sized> RwLockReadGuard<'rwlock, T> {
+    /// Leak the lock guard, yielding a reference to the underlying data.
+    ///
+    /// Note that this function will permanently lock the original lock for all but reading locks.
+    ///
+    /// ```
+    /// let mylock = spin::RwLock::new(0);
+    ///
+    /// let data: &i32 = spin::RwLockReadGuard::leak(mylock.read());
+    ///
+    /// assert_eq!(*data, 0);
+    /// ```
+    #[inline]
+    pub fn leak(this: Self) -> &'rwlock T {
+        let this = ManuallyDrop::new(this);
+        // Safety: We know statically that only we are referencing data
+        unsafe { &*this.data }
+    }
+}
+
+impl<'rwlock, T: ?Sized + fmt::Debug> fmt::Debug for RwLockReadGuard<'rwlock, T> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        fmt::Debug::fmt(&**self, f)
+    }
+}
+
+impl<'rwlock, T: ?Sized + fmt::Display> fmt::Display for RwLockReadGuard<'rwlock, T> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        fmt::Display::fmt(&**self, f)
+    }
+}
+
+impl<'rwlock, T: ?Sized, R: RelaxStrategy> RwLockUpgradableGuard<'rwlock, T, R> {
+    /// Upgrades an upgradeable lock guard to a writable lock guard.
+    ///
+    /// ```
+    /// let mylock = spin::RwLock::new(0);
+    ///
+    /// let upgradeable = mylock.upgradeable_read(); // Readable, but not yet writable
+    /// let writable = upgradeable.upgrade();
+    /// ```
+    #[inline]
+    pub fn upgrade(mut self) -> RwLockWriteGuard<'rwlock, T, R> {
+        loop {
+            self = match self.try_upgrade_internal(false) {
+                Ok(guard) => return guard,
+                Err(e) => e,
+            };
+
+            R::relax();
+        }
+    }
+}
+
+impl<'rwlock, T: ?Sized, R> RwLockUpgradableGuard<'rwlock, T, R> {
+    #[inline(always)]
+    fn try_upgrade_internal(self, strong: bool) -> Result<RwLockWriteGuard<'rwlock, T, R>, Self> {
+        if compare_exchange(
+            &self.inner.lock,
+            UPGRADED,
+            WRITER,
+            Ordering::Acquire,
+            Ordering::Relaxed,
+            strong,
+        )
+        .is_ok()
+        {
+            let inner = self.inner;
+
+            // Forget the old guard so its destructor doesn't run (before mutably aliasing data below)
+            mem::forget(self);
+
+            // Upgrade successful
+            Ok(RwLockWriteGuard {
+                phantom: PhantomData,
+                inner,
+                data: unsafe { &mut *inner.data.get() },
+            })
+        } else {
+            Err(self)
+        }
+    }
+
+    /// Tries to upgrade an upgradeable lock guard to a writable lock guard.
+    ///
+    /// ```
+    /// let mylock = spin::RwLock::new(0);
+    /// let upgradeable = mylock.upgradeable_read(); // Readable, but not yet writable
+    ///
+    /// match upgradeable.try_upgrade() {
+    ///     Ok(writable) => /* upgrade successful - use writable lock guard */ (),
+    ///     Err(upgradeable) => /* upgrade unsuccessful */ (),
+    /// };
+    /// ```
+    #[inline]
+    pub fn try_upgrade(self) -> Result<RwLockWriteGuard<'rwlock, T, R>, Self> {
+        self.try_upgrade_internal(true)
+    }
+
+    #[inline]
+    /// Downgrades the upgradeable lock guard to a readable, shared lock guard. Cannot fail and is guaranteed not to spin.
+    ///
+    /// ```
+    /// let mylock = spin::RwLock::new(1);
+    ///
+    /// let upgradeable = mylock.upgradeable_read();
+    /// assert!(mylock.try_read().is_none());
+    /// assert_eq!(*upgradeable, 1);
+    ///
+    /// let readable = upgradeable.downgrade(); // This is guaranteed not to spin
+    /// assert!(mylock.try_read().is_some());
+    /// assert_eq!(*readable, 1);
+    /// ```
+    pub fn downgrade(self) -> RwLockReadGuard<'rwlock, T> {
+        // Reserve the read guard for ourselves
+        self.inner.acquire_reader();
+
+        let inner = self.inner;
+
+        // Dropping self removes the UPGRADED bit
+        mem::drop(self);
+
+        RwLockReadGuard {
+            lock: &inner.lock,
+            data: unsafe { &*inner.data.get() },
+        }
+    }
+
+    /// Leak the lock guard, yielding a reference to the underlying data.
+    ///
+    /// Note that this function will permanently lock the original lock.
+    ///
+    /// ```
+    /// let mylock = spin::RwLock::new(0);
+    ///
+    /// let data: &i32 = spin::RwLockUpgradableGuard::leak(mylock.upgradeable_read());
+    ///
+    /// assert_eq!(*data, 0);
+    /// ```
+    #[inline]
+    pub fn leak(this: Self) -> &'rwlock T {
+        let this = ManuallyDrop::new(this);
+        // Safety: We know statically that only we are referencing data
+        unsafe { &*this.data }
+    }
+}
+
+impl<'rwlock, T: ?Sized + fmt::Debug, R> fmt::Debug for RwLockUpgradableGuard<'rwlock, T, R> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        fmt::Debug::fmt(&**self, f)
+    }
+}
+
+impl<'rwlock, T: ?Sized + fmt::Display, R> fmt::Display for RwLockUpgradableGuard<'rwlock, T, R> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        fmt::Display::fmt(&**self, f)
+    }
+}
+
+impl<'rwlock, T: ?Sized, R> RwLockWriteGuard<'rwlock, T, R> {
+    /// Downgrades the writable lock guard to a readable, shared lock guard. Cannot fail and is guaranteed not to spin.
+    ///
+    /// ```
+    /// let mylock = spin::RwLock::new(0);
+    ///
+    /// let mut writable = mylock.write();
+    /// *writable = 1;
+    ///
+    /// let readable = writable.downgrade(); // This is guaranteed not to spin
+    /// # let readable_2 = mylock.try_read().unwrap();
+    /// assert_eq!(*readable, 1);
+    /// ```
+    #[inline]
+    pub fn downgrade(self) -> RwLockReadGuard<'rwlock, T> {
+        // Reserve the read guard for ourselves
+        self.inner.acquire_reader();
+
+        let inner = self.inner;
+
+        // Dropping self removes the UPGRADED bit
+        mem::drop(self);
+
+        RwLockReadGuard {
+            lock: &inner.lock,
+            data: unsafe { &*inner.data.get() },
+        }
+    }
+
+    /// Downgrades the writable lock guard to an upgradable, shared lock guard. Cannot fail and is guaranteed not to spin.
+    ///
+    /// ```
+    /// let mylock = spin::RwLock::new(0);
+    ///
+    /// let mut writable = mylock.write();
+    /// *writable = 1;
+    ///
+    /// let readable = writable.downgrade_to_upgradeable(); // This is guaranteed not to spin
+    /// assert_eq!(*readable, 1);
+    /// ```
+    #[inline]
+    pub fn downgrade_to_upgradeable(self) -> RwLockUpgradableGuard<'rwlock, T, R> {
+        debug_assert_eq!(
+            self.inner.lock.load(Ordering::Acquire) & (WRITER | UPGRADED),
+            WRITER
+        );
+
+        // Reserve the read guard for ourselves
+        self.inner.lock.store(UPGRADED, Ordering::Release);
+
+        let inner = self.inner;
+
+        // Dropping self removes the UPGRADED bit
+        mem::forget(self);
+
+        RwLockUpgradableGuard {
+            phantom: PhantomData,
+            inner,
+            data: unsafe { &*inner.data.get() },
+        }
+    }
+
+    /// Leak the lock guard, yielding a mutable reference to the underlying data.
+    ///
+    /// Note that this function will permanently lock the original lock.
+    ///
+    /// ```
+    /// let mylock = spin::RwLock::new(0);
+    ///
+    /// let data: &mut i32 = spin::RwLockWriteGuard::leak(mylock.write());
+    ///
+    /// *data = 1;
+    /// assert_eq!(*data, 1);
+    /// ```
+    #[inline]
+    pub fn leak(this: Self) -> &'rwlock mut T {
+        let mut this = ManuallyDrop::new(this);
+        // Safety: We know statically that only we are referencing data
+        unsafe { &mut *this.data }
+    }
+}
+
+impl<'rwlock, T: ?Sized + fmt::Debug, R> fmt::Debug for RwLockWriteGuard<'rwlock, T, R> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        fmt::Debug::fmt(&**self, f)
+    }
+}
+
+impl<'rwlock, T: ?Sized + fmt::Display, R> fmt::Display for RwLockWriteGuard<'rwlock, T, R> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        fmt::Display::fmt(&**self, f)
+    }
+}
+
+impl<'rwlock, T: ?Sized> Deref for RwLockReadGuard<'rwlock, T> {
+    type Target = T;
+
+    fn deref(&self) -> &T {
+        // Safety: We know statically that only we are referencing data
+        unsafe { &*self.data }
+    }
+}
+
+impl<'rwlock, T: ?Sized, R> Deref for RwLockUpgradableGuard<'rwlock, T, R> {
+    type Target = T;
+
+    fn deref(&self) -> &T {
+        // Safety: We know statically that only we are referencing data
+        unsafe { &*self.data }
+    }
+}
+
+impl<'rwlock, T: ?Sized, R> Deref for RwLockWriteGuard<'rwlock, T, R> {
+    type Target = T;
+
+    fn deref(&self) -> &T {
+        // Safety: We know statically that only we are referencing data
+        unsafe { &*self.data }
+    }
+}
+
+impl<'rwlock, T: ?Sized, R> DerefMut for RwLockWriteGuard<'rwlock, T, R> {
+    fn deref_mut(&mut self) -> &mut T {
+        // Safety: We know statically that only we are referencing data
+        unsafe { &mut *self.data }
+    }
+}
+
+impl<'rwlock, T: ?Sized> Drop for RwLockReadGuard<'rwlock, T> {
+    fn drop(&mut self) {
+        debug_assert!(self.lock.load(Ordering::Relaxed) & !(WRITER | UPGRADED) > 0);
+        self.lock.fetch_sub(READER, Ordering::Release);
+    }
+}
+
+impl<'rwlock, T: ?Sized, R> Drop for RwLockUpgradableGuard<'rwlock, T, R> {
+    fn drop(&mut self) {
+        debug_assert_eq!(
+            self.inner.lock.load(Ordering::Relaxed) & (WRITER | UPGRADED),
+            UPGRADED
+        );
+        self.inner.lock.fetch_sub(UPGRADED, Ordering::AcqRel);
+    }
+}
+
+impl<'rwlock, T: ?Sized, R> Drop for RwLockWriteGuard<'rwlock, T, R> {
+    fn drop(&mut self) {
+        debug_assert_eq!(self.inner.lock.load(Ordering::Relaxed) & WRITER, WRITER);
+
+        // Writer is responsible for clearing both WRITER and UPGRADED bits.
+        // The UPGRADED bit may be set if an upgradeable lock attempts an upgrade while this lock is held.
+        self.inner
+            .lock
+            .fetch_and(!(WRITER | UPGRADED), Ordering::Release);
+    }
+}
+
+#[inline(always)]
+fn compare_exchange(
+    atomic: &AtomicUsize,
+    current: usize,
+    new: usize,
+    success: Ordering,
+    failure: Ordering,
+    strong: bool,
+) -> Result<usize, usize> {
+    if strong {
+        atomic.compare_exchange(current, new, success, failure)
+    } else {
+        atomic.compare_exchange_weak(current, new, success, failure)
+    }
+}
+
+#[cfg(feature = "lock_api")]
+unsafe impl<R: RelaxStrategy> lock_api_crate::RawRwLock for RwLock<(), R> {
+    type GuardMarker = lock_api_crate::GuardSend;
+
+    const INIT: Self = Self::new(());
+
+    #[inline(always)]
+    fn lock_exclusive(&self) {
+        // Prevent guard destructor running
+        core::mem::forget(self.write());
+    }
+
+    #[inline(always)]
+    fn try_lock_exclusive(&self) -> bool {
+        // Prevent guard destructor running
+        self.try_write().map(|g| core::mem::forget(g)).is_some()
+    }
+
+    #[inline(always)]
+    unsafe fn unlock_exclusive(&self) {
+        drop(RwLockWriteGuard {
+            inner: self,
+            data: &mut (),
+            phantom: PhantomData,
+        });
+    }
+
+    #[inline(always)]
+    fn lock_shared(&self) {
+        // Prevent guard destructor running
+        core::mem::forget(self.read());
+    }
+
+    #[inline(always)]
+    fn try_lock_shared(&self) -> bool {
+        // Prevent guard destructor running
+        self.try_read().map(|g| core::mem::forget(g)).is_some()
+    }
+
+    #[inline(always)]
+    unsafe fn unlock_shared(&self) {
+        drop(RwLockReadGuard {
+            lock: &self.lock,
+            data: &(),
+        });
+    }
+
+    #[inline(always)]
+    fn is_locked(&self) -> bool {
+        self.lock.load(Ordering::Relaxed) != 0
+    }
+}
+
+#[cfg(feature = "lock_api")]
+unsafe impl<R: RelaxStrategy> lock_api_crate::RawRwLockUpgrade for RwLock<(), R> {
+    #[inline(always)]
+    fn lock_upgradable(&self) {
+        // Prevent guard destructor running
+        core::mem::forget(self.upgradeable_read());
+    }
+
+    #[inline(always)]
+    fn try_lock_upgradable(&self) -> bool {
+        // Prevent guard destructor running
+        self.try_upgradeable_read()
+            .map(|g| core::mem::forget(g))
+            .is_some()
+    }
+
+    #[inline(always)]
+    unsafe fn unlock_upgradable(&self) {
+        drop(RwLockUpgradableGuard {
+            inner: self,
+            data: &(),
+            phantom: PhantomData,
+        });
+    }
+
+    #[inline(always)]
+    unsafe fn upgrade(&self) {
+        let tmp_guard = RwLockUpgradableGuard {
+            inner: self,
+            data: &(),
+            phantom: PhantomData,
+        };
+        core::mem::forget(tmp_guard.upgrade());
+    }
+
+    #[inline(always)]
+    unsafe fn try_upgrade(&self) -> bool {
+        let tmp_guard = RwLockUpgradableGuard {
+            inner: self,
+            data: &(),
+            phantom: PhantomData,
+        };
+        tmp_guard
+            .try_upgrade()
+            .map(|g| core::mem::forget(g))
+            .is_ok()
+    }
+}
+
+#[cfg(feature = "lock_api")]
+unsafe impl<R: RelaxStrategy> lock_api_crate::RawRwLockDowngrade for RwLock<(), R> {
+    unsafe fn downgrade(&self) {
+        let tmp_guard = RwLockWriteGuard {
+            inner: self,
+            data: &mut (),
+            phantom: PhantomData,
+        };
+        core::mem::forget(tmp_guard.downgrade());
+    }
+}
+
+#[cfg(feature = "lock_api1")]
+unsafe impl lock_api::RawRwLockUpgradeDowngrade for RwLock<()> {
+    unsafe fn downgrade_upgradable(&self) {
+        let tmp_guard = RwLockUpgradableGuard {
+            inner: self,
+            data: &(),
+            phantom: PhantomData,
+        };
+        core::mem::forget(tmp_guard.downgrade());
+    }
+
+    unsafe fn downgrade_to_upgradable(&self) {
+        let tmp_guard = RwLockWriteGuard {
+            inner: self,
+            data: &mut (),
+            phantom: PhantomData,
+        };
+        core::mem::forget(tmp_guard.downgrade_to_upgradeable());
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use std::prelude::v1::*;
+
+    use std::sync::atomic::{AtomicUsize, Ordering};
+    use std::sync::mpsc::channel;
+    use std::sync::Arc;
+    use std::thread;
+
+    type RwLock<T> = super::RwLock<T>;
+
+    #[derive(Eq, PartialEq, Debug)]
+    struct NonCopy(i32);
+
+    #[test]
+    fn smoke() {
+        let l = RwLock::new(());
+        drop(l.read());
+        drop(l.write());
+        drop((l.read(), l.read()));
+        drop(l.write());
+    }
+
+    // TODO: needs RNG
+    //#[test]
+    //fn frob() {
+    //    static R: RwLock = RwLock::new();
+    //    const N: usize = 10;
+    //    const M: usize = 1000;
+    //
+    //    let (tx, rx) = channel::<()>();
+    //    for _ in 0..N {
+    //        let tx = tx.clone();
+    //        thread::spawn(move|| {
+    //            let mut rng = rand::thread_rng();
+    //            for _ in 0..M {
+    //                if rng.gen_weighted_bool(N) {
+    //                    drop(R.write());
+    //                } else {
+    //                    drop(R.read());
+    //                }
+    //            }
+    //            drop(tx);
+    //        });
+    //    }
+    //    drop(tx);
+    //    let _ = rx.recv();
+    //    unsafe { R.destroy(); }
+    //}
+
+    #[test]
+    fn test_rw_arc() {
+        let arc = Arc::new(RwLock::new(0));
+        let arc2 = arc.clone();
+        let (tx, rx) = channel();
+
+        let t = thread::spawn(move || {
+            let mut lock = arc2.write();
+            for _ in 0..10 {
+                let tmp = *lock;
+                *lock = -1;
+                thread::yield_now();
+                *lock = tmp + 1;
+            }
+            tx.send(()).unwrap();
+        });
+
+        // Readers try to catch the writer in the act
+        let mut children = Vec::new();
+        for _ in 0..5 {
+            let arc3 = arc.clone();
+            children.push(thread::spawn(move || {
+                let lock = arc3.read();
+                assert!(*lock >= 0);
+            }));
+        }
+
+        // Wait for children to pass their asserts
+        for r in children {
+            assert!(r.join().is_ok());
+        }
+
+        // Wait for writer to finish
+        rx.recv().unwrap();
+        let lock = arc.read();
+        assert_eq!(*lock, 10);
+
+        assert!(t.join().is_ok());
+    }
+
+    #[test]
+    #[ignore = "Android uses panic_abort"]
+    fn test_rw_access_in_unwind() {
+        let arc = Arc::new(RwLock::new(1));
+        let arc2 = arc.clone();
+        let _ = thread::spawn(move || -> () {
+            struct Unwinder {
+                i: Arc<RwLock<isize>>,
+            }
+            impl Drop for Unwinder {
+                fn drop(&mut self) {
+                    let mut lock = self.i.write();
+                    *lock += 1;
+                }
+            }
+            let _u = Unwinder { i: arc2 };
+            panic!();
+        })
+        .join();
+        let lock = arc.read();
+        assert_eq!(*lock, 2);
+    }
+
+    #[test]
+    fn test_rwlock_unsized() {
+        let rw: &RwLock<[i32]> = &RwLock::new([1, 2, 3]);
+        {
+            let b = &mut *rw.write();
+            b[0] = 4;
+            b[2] = 5;
+        }
+        let comp: &[i32] = &[4, 2, 5];
+        assert_eq!(&*rw.read(), comp);
+    }
+
+    #[test]
+    fn test_rwlock_try_write() {
+        use std::mem::drop;
+
+        let lock = RwLock::new(0isize);
+        let read_guard = lock.read();
+
+        let write_result = lock.try_write();
+        match write_result {
+            None => (),
+            Some(_) => assert!(
+                false,
+                "try_write should not succeed while read_guard is in scope"
+            ),
+        }
+
+        drop(read_guard);
+    }
+
+    #[test]
+    fn test_rw_try_read() {
+        let m = RwLock::new(0);
+        ::std::mem::forget(m.write());
+        assert!(m.try_read().is_none());
+    }
+
+    #[test]
+    fn test_into_inner() {
+        let m = RwLock::new(NonCopy(10));
+        assert_eq!(m.into_inner(), NonCopy(10));
+    }
+
+    #[test]
+    fn test_into_inner_drop() {
+        struct Foo(Arc<AtomicUsize>);
+        impl Drop for Foo {
+            fn drop(&mut self) {
+                self.0.fetch_add(1, Ordering::SeqCst);
+            }
+        }
+        let num_drops = Arc::new(AtomicUsize::new(0));
+        let m = RwLock::new(Foo(num_drops.clone()));
+        assert_eq!(num_drops.load(Ordering::SeqCst), 0);
+        {
+            let _inner = m.into_inner();
+            assert_eq!(num_drops.load(Ordering::SeqCst), 0);
+        }
+        assert_eq!(num_drops.load(Ordering::SeqCst), 1);
+    }
+
+    #[test]
+    fn test_force_read_decrement() {
+        let m = RwLock::new(());
+        ::std::mem::forget(m.read());
+        ::std::mem::forget(m.read());
+        ::std::mem::forget(m.read());
+        assert!(m.try_write().is_none());
+        unsafe {
+            m.force_read_decrement();
+            m.force_read_decrement();
+        }
+        assert!(m.try_write().is_none());
+        unsafe {
+            m.force_read_decrement();
+        }
+        assert!(m.try_write().is_some());
+    }
+
+    #[test]
+    fn test_force_write_unlock() {
+        let m = RwLock::new(());
+        ::std::mem::forget(m.write());
+        assert!(m.try_read().is_none());
+        unsafe {
+            m.force_write_unlock();
+        }
+        assert!(m.try_read().is_some());
+    }
+
+    #[test]
+    fn test_upgrade_downgrade() {
+        let m = RwLock::new(());
+        {
+            let _r = m.read();
+            let upg = m.try_upgradeable_read().unwrap();
+            assert!(m.try_read().is_none());
+            assert!(m.try_write().is_none());
+            assert!(upg.try_upgrade().is_err());
+        }
+        {
+            let w = m.write();
+            assert!(m.try_upgradeable_read().is_none());
+            let _r = w.downgrade();
+            assert!(m.try_upgradeable_read().is_some());
+            assert!(m.try_read().is_some());
+            assert!(m.try_write().is_none());
+        }
+        {
+            let _u = m.upgradeable_read();
+            assert!(m.try_upgradeable_read().is_none());
+        }
+
+        assert!(m.try_upgradeable_read().unwrap().try_upgrade().is_ok());
+    }
+}
diff --git a/crates/spki/.cargo-checksum.json b/crates/spki/.cargo-checksum.json
new file mode 100644
index 0000000..c3dc2cd
--- /dev/null
+++ b/crates/spki/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"CHANGELOG.md":"119ebe6bab78b405f61bfd4068a65090d1b641b1971d54aed6286f0f2b81fffc","Cargo.toml":"c2ce81d32a2e9586591d313c324e83bd3822ba13736fdd5209f06da1ac27bd5e","LICENSE-APACHE":"a9040321c3712d8fd0b09cf52b17445de04a23a10165049ae187cd39e5c86be5","LICENSE-MIT":"c995204cc6bad2ed67dd41f7d89bb9f1a9d48e0edd745732b30640d7912089a4","README.md":"0738222d834cb56592ce68453f1ce42687497fd9576cd5a1a2cbbe1be5ad7164","src/algorithm.rs":"70a0990e4321052b98904a8b737389cb618813e5172f072022351183169690af","src/error.rs":"ce99da5f369ae830bbac6a958ee0e93c8045df5ca7b7ea306e55bb3209c1f81f","src/fingerprint.rs":"4cb6f1e1d2ca0df933636ffcae30884fe029e2d581a42477081593271a6aa49b","src/lib.rs":"3da6d5ea63f7578b5e97328604e354d6e2bf6a9002c7a24ea1b66c89e4ba8518","src/spki.rs":"cb6ef0e7723da7e9fb22f7fe691a210504f060230bfa544787de55e44fe61c3c","src/traits.rs":"dd32e84c63ea109cb01d7b776bfbf6a67d8c161aff468788cade402307cba0b0","tests/examples/ed25519-pub.der":"55dd4c74b0e48534e2f4e173ceceb50df8f27a7ac2aa8991cc7ae914e030bced","tests/examples/ed25519-pub.pem":"36d717203cbca1812f05f30e0415251c928b659882092e653221a028571c6853","tests/examples/p256-pub.der":"b9968d56ed8d6aa3fb43b15fa01e355d7a3a0203b1408b3fd2733637c4d1642c","tests/examples/p256-pub.pem":"d1ff198dc495da63f5f909db0254d6e49cff519487fcb26d055a762fc3ca47a1","tests/examples/rsa2048-pub.der":"efeda9bfead9fd0594f6a5cf6fdf6c163116a3b1fad6d73cea05295b68fd1794","tests/examples/rsa2048-pub.pem":"078c3983093e86784590a2a454547acad1d50992419334be697e442e954f02f8","tests/spki.rs":"7d7e717d4591126a1e8e8627d4a02f68dec9ad08db885281c90d6000eaa8ebf5","tests/traits.rs":"8ccaa20167d1ada765bcc938b8636b722fd3fa17c5f185e216053306a21faa7e"},"package":"d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d"}
\ No newline at end of file
diff --git a/crates/spki/Android.bp b/crates/spki/Android.bp
new file mode 100644
index 0000000..b8d5bb5
--- /dev/null
+++ b/crates/spki/Android.bp
@@ -0,0 +1,72 @@
+// This file is generated by cargo_embargo.
+// Do not modify this file because the changes will be overridden on upgrade.
+
+package {
+    default_applicable_licenses: ["external_rust_crates_spki_license"],
+    default_team: "trendy_team_android_rust",
+}
+
+license {
+    name: "external_rust_crates_spki_license",
+    visibility: [":__subpackages__"],
+    license_kinds: ["SPDX-license-identifier-Apache-2.0"],
+    license_text: ["LICENSE"],
+}
+
+rust_library {
+    name: "libspki",
+    host_supported: true,
+    crate_name: "spki",
+    cargo_env_compat: true,
+    cargo_pkg_version: "0.7.3",
+    crate_root: "src/lib.rs",
+    edition: "2021",
+    features: ["alloc"],
+    rustlibs: ["libder"],
+    apex_available: [
+        "//apex_available:platform",
+        "com.android.virt",
+    ],
+    product_available: true,
+    vendor_available: true,
+    visibility: [
+        "//external/rust/android-crates-io/crates/pkcs1:__subpackages__",
+        "//external/rust/android-crates-io/crates/pkcs8:__subpackages__",
+        "//external/rust/crates/x509-cert:__subpackages__",
+        "//packages/modules/Virtualization:__subpackages__",
+        "//system/keymint:__subpackages__",
+    ],
+
+}
+
+rust_library_rlib {
+    name: "libspki_nostd",
+    crate_name: "spki",
+    cargo_env_compat: true,
+    cargo_pkg_version: "0.7.3",
+    crate_root: "src/lib.rs",
+    edition: "2021",
+    features: ["alloc"],
+    rustlibs: ["libder_nostd"],
+    apex_available: [
+        "//apex_available:platform",
+        "com.android.virt",
+    ],
+    prefer_rlib: true,
+    no_stdlibs: true,
+    stdlibs: [
+        "liballoc.rust_sysroot",
+        "libcompiler_builtins.rust_sysroot",
+        "libcore.rust_sysroot",
+    ],
+    product_available: true,
+    vendor_available: true,
+    visibility: [
+        "//external/rust/android-crates-io/crates/pkcs1:__subpackages__",
+        "//external/rust/android-crates-io/crates/pkcs8:__subpackages__",
+        "//external/rust/crates/x509-cert:__subpackages__",
+        "//packages/modules/Virtualization:__subpackages__",
+        "//system/keymint:__subpackages__",
+    ],
+
+}
diff --git a/crates/spki/CHANGELOG.md b/crates/spki/CHANGELOG.md
new file mode 100644
index 0000000..cf3722d
--- /dev/null
+++ b/crates/spki/CHANGELOG.md
@@ -0,0 +1,152 @@
+# Changelog
+All notable changes to this project will be documented in this file.
+
+The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
+and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+
+## 0.7.3 (2023-11-28)
+### Added
+- public key to `SubjectPublicKeyInfoOwned` helper ([#1269])
+
+[#1269]: https://github.com/RustCrypto/formats/pull/1269
+
+## 0.7.2 (2023-05-04)
+
+### Added
+- `AlgorithmIdentifierWithOid` trait ([#986])
+- `SignatureBitStringEncoding` trait ([#1047])
+
+### Changed
+-  Bump `hex-literal` to v0.4.1 ([#999])
+
+[#986]: https://github.com/RustCrypto/formats/pull/986
+[#999]: https://github.com/RustCrypto/formats/pull/999
+[#1047]: https://github.com/RustCrypto/formats/pull/1047
+
+
+## 0.7.1 (2023-04-04)
+### Added
+- `AssociatedAlgorithmIdentifier` trait ([#962], [#966])
+- `DynAssociatedAlgorithmIdentifier` trait ([#962])
+- `SignatureAlgorithmIdentifier` trait ([#967])
+- `DynSignatureAlgorithmIdentifier` trait ([#967])
+
+### Changed
+- Bump `der` dependency to v0.7.2 ([#979])
+
+[#962]: https://github.com/RustCrypto/formats/pull/962
+[#966]: https://github.com/RustCrypto/formats/pull/966
+[#967]: https://github.com/RustCrypto/formats/pull/967
+[#979]: https://github.com/RustCrypto/formats/pull/979
+
+## 0.7.0 (2023-02-26)
+### Changed
+- Make `AlgorithmIdentifier` generic around `Params` ([#769])
+- Use blanket impls for `Decode*` traits ([#785])
+- Make `SubjectPublicKeyInfo` own the public key ([#790])
+- Rename `to_owned` method ([#835])
+- Bump `der` dependency to v0.7 ([#899])
+
+[#769]: https://github.com/RustCrypto/formats/pull/769
+[#785]: https://github.com/RustCrypto/formats/pull/785
+[#790]: https://github.com/RustCrypto/formats/pull/790
+[#835]: https://github.com/RustCrypto/formats/pull/835
+[#899]: https://github.com/RustCrypto/formats/pull/899
+
+## 0.6.0 (2022-05-08)
+### Added
+- `AlgorithmIdentifier::oids()` helper function ([#443])
+- Impl `PartialOrd` for `AlgorithmIdentifier` ([#476])
+- Impl `DecodeValue` for `AlgorithmIdentifier` ([#449])
+- Impl `ValueOrd` for `SubjectPublicKeyInfo` ([#522])
+
+### Changed
+- Replace `PublicKeyDocument` with `der` crate's `Document` type ([#571])
+- Streaming fingerprint builder ([#616])
+- Bump `der` crate dependency to v0.6 ([#653])
+
+### Removed
+- `PublicKeyDocument` ([#571])
+
+[#443]: https://github.com/RustCrypto/formats/pull/443
+[#449]: https://github.com/RustCrypto/formats/pull/449
+[#476]: https://github.com/RustCrypto/formats/pull/476
+[#522]: https://github.com/RustCrypto/formats/pull/522
+[#571]: https://github.com/RustCrypto/formats/pull/571
+[#616]: https://github.com/RustCrypto/formats/pull/616
+[#653]: https://github.com/RustCrypto/formats/pull/653
+
+## 0.5.4 (2022-01-05)
+### Added
+- `Error::KeyMalformed` variant ([#318])
+
+[#318]: https://github.com/RustCrypto/formats/pull/318
+
+## 0.5.3 (2021-12-19)
+### Added
+- Impl `ValueOrd` for `AlgorithmIdentifier` ([#289])
+
+[#289]: https://github.com/RustCrypto/formats/pull/289
+
+## 0.5.2 (2021-11-17)
+### Changed
+- Relax `base64ct` version requirement to `^1` ([#239])
+
+[#239]: https://github.com/RustCrypto/formats/pull/239
+
+## 0.5.1 (2021-11-17)
+### Changed
+- Replace `from_spki` with `TryFrom` ([#231])
+
+[#231]: https://github.com/RustCrypto/formats/pull/231
+
+## 0.5.0 (2021-11-15) [YANKED]
+### Added
+- SPKI fingerprint support ([#36])
+- `PublicKeyDocument` type originally from `pkcs8` crate ([#118])
+- `Error` type ([#143])
+
+### Changed
+- Rename `From/ToPublicKey` => `DecodePublicKey`/`EncodePublicKey` ([#119])
+- Use `der::Document` to impl `PublicKeyDocument` ([#134])
+- Rust 2021 edition upgrade; MSRV 1.56 ([#136])
+- Bump `der` dependency to v0.5 ([#222])
+
+[#36]: https://github.com/RustCrypto/formats/pull/36
+[#118]: https://github.com/RustCrypto/formats/pull/118
+[#119]: https://github.com/RustCrypto/formats/pull/119
+[#134]: https://github.com/RustCrypto/formats/pull/134
+[#136]: https://github.com/RustCrypto/formats/pull/136
+[#143]: https://github.com/RustCrypto/formats/pull/143
+[#222]: https://github.com/RustCrypto/formats/pull/222
+
+## 0.4.1 (2021-09-14)
+### Changed
+- Moved to `formats` repo ([#2])
+
+[#2]: https://github.com/RustCrypto/formats/pull/2
+
+## 0.4.0 (2021-06-07)
+### Added
+- `AlgorithmIdentifier::assert_oids`
+
+### Changed
+- Bump `der` to v0.4
+
+## 0.3.0 (2021-03-22)
+### Changed
+- Bump `der` to v0.3
+
+### Removed
+- `AlgorithmParameters` enum
+
+## 0.2.1 (2021-02-22)
+### Added
+- Impl `Choice` for `AlgorithmParameters`
+
+## 0.2.0 (2021-02-18)
+### Changed
+- Return `Result` from `AlgorithmIdentifier::params_*`
+
+## 0.1.0 (2021-02-16)
+- Initial release
diff --git a/crates/spki/Cargo.lock b/crates/spki/Cargo.lock
new file mode 100644
index 0000000..ab28ef6
--- /dev/null
+++ b/crates/spki/Cargo.lock
@@ -0,0 +1,343 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "arbitrary"
+version = "1.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7d5a26814d8dcb93b0e5a0ff3c6d80a8843bafb21b39e8e18a6f05471870e110"
+dependencies = [
+ "derive_arbitrary",
+]
+
+[[package]]
+name = "base64ct"
+version = "1.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b"
+
+[[package]]
+name = "bitflags"
+version = "2.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de"
+
+[[package]]
+name = "block-buffer"
+version = "0.10.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71"
+dependencies = [
+ "generic-array",
+]
+
+[[package]]
+name = "cfg-if"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
+
+[[package]]
+name = "const-oid"
+version = "0.9.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8"
+dependencies = [
+ "arbitrary",
+]
+
+[[package]]
+name = "cpufeatures"
+version = "0.2.13"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "51e852e6dc9a5bed1fae92dd2375037bf2b768725bf3be87811edee3249d09ad"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "crypto-common"
+version = "0.1.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3"
+dependencies = [
+ "generic-array",
+ "typenum",
+]
+
+[[package]]
+name = "der"
+version = "0.7.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f55bf8e7b65898637379c1b74eb1551107c8294ed26d855ceb9fd1a09cfc9bc0"
+dependencies = [
+ "arbitrary",
+ "const-oid",
+ "pem-rfc7468",
+ "zeroize",
+]
+
+[[package]]
+name = "derive_arbitrary"
+version = "1.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "digest"
+version = "0.10.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292"
+dependencies = [
+ "block-buffer",
+ "crypto-common",
+]
+
+[[package]]
+name = "errno"
+version = "0.3.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba"
+dependencies = [
+ "libc",
+ "windows-sys 0.52.0",
+]
+
+[[package]]
+name = "fastrand"
+version = "2.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6"
+
+[[package]]
+name = "generic-array"
+version = "0.14.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a"
+dependencies = [
+ "typenum",
+ "version_check",
+]
+
+[[package]]
+name = "hex-literal"
+version = "0.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46"
+
+[[package]]
+name = "libc"
+version = "0.2.158"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439"
+
+[[package]]
+name = "linux-raw-sys"
+version = "0.4.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89"
+
+[[package]]
+name = "once_cell"
+version = "1.19.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92"
+
+[[package]]
+name = "pem-rfc7468"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "88b39c9bfcfc231068454382784bb460aae594343fb030d46e9f50a645418412"
+dependencies = [
+ "base64ct",
+]
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.86"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77"
+dependencies = [
+ "unicode-ident",
+]
+
+[[package]]
+name = "quote"
+version = "1.0.37"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af"
+dependencies = [
+ "proc-macro2",
+]
+
+[[package]]
+name = "rustix"
+version = "0.38.35"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a85d50532239da68e9addb745ba38ff4612a242c1c7ceea689c4bc7c2f43c36f"
+dependencies = [
+ "bitflags",
+ "errno",
+ "libc",
+ "linux-raw-sys",
+ "windows-sys 0.52.0",
+]
+
+[[package]]
+name = "sha2"
+version = "0.10.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8"
+dependencies = [
+ "cfg-if",
+ "cpufeatures",
+ "digest",
+]
+
+[[package]]
+name = "spki"
+version = "0.7.3"
+dependencies = [
+ "arbitrary",
+ "base64ct",
+ "der",
+ "hex-literal",
+ "sha2",
+ "tempfile",
+]
+
+[[package]]
+name = "syn"
+version = "2.0.76"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "578e081a14e0cefc3279b0472138c513f37b41a08d5a3cca9b6e4e8ceb6cd525"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
+]
+
+[[package]]
+name = "tempfile"
+version = "3.12.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "04cbcdd0c794ebb0d4cf35e88edd2f7d2c4c3e9a5a6dab322839b321c6a87a64"
+dependencies = [
+ "cfg-if",
+ "fastrand",
+ "once_cell",
+ "rustix",
+ "windows-sys 0.59.0",
+]
+
+[[package]]
+name = "typenum"
+version = "1.17.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825"
+
+[[package]]
+name = "unicode-ident"
+version = "1.0.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
+
+[[package]]
+name = "version_check"
+version = "0.9.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a"
+
+[[package]]
+name = "windows-sys"
+version = "0.52.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d"
+dependencies = [
+ "windows-targets",
+]
+
+[[package]]
+name = "windows-sys"
+version = "0.59.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b"
+dependencies = [
+ "windows-targets",
+]
+
+[[package]]
+name = "windows-targets"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973"
+dependencies = [
+ "windows_aarch64_gnullvm",
+ "windows_aarch64_msvc",
+ "windows_i686_gnu",
+ "windows_i686_gnullvm",
+ "windows_i686_msvc",
+ "windows_x86_64_gnu",
+ "windows_x86_64_gnullvm",
+ "windows_x86_64_msvc",
+]
+
+[[package]]
+name = "windows_aarch64_gnullvm"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"
+
+[[package]]
+name = "windows_aarch64_msvc"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"
+
+[[package]]
+name = "windows_i686_gnu"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b"
+
+[[package]]
+name = "windows_i686_gnullvm"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"
+
+[[package]]
+name = "windows_i686_msvc"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
+
+[[package]]
+name = "windows_x86_64_gnu"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
+
+[[package]]
+name = "windows_x86_64_gnullvm"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
+
+[[package]]
+name = "windows_x86_64_msvc"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
+
+[[package]]
+name = "zeroize"
+version = "1.8.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde"
diff --git a/crates/spki/Cargo.toml b/crates/spki/Cargo.toml
new file mode 100644
index 0000000..1c7f305
--- /dev/null
+++ b/crates/spki/Cargo.toml
@@ -0,0 +1,87 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2021"
+rust-version = "1.65"
+name = "spki"
+version = "0.7.3"
+authors = ["RustCrypto Developers"]
+description = """
+X.509 Subject Public Key Info (RFC5280) describing public keys as well as their
+associated AlgorithmIdentifiers (i.e. OIDs)
+"""
+readme = "README.md"
+keywords = [
+    "crypto",
+    "x509",
+]
+categories = [
+    "cryptography",
+    "data-structures",
+    "encoding",
+    "no-std",
+]
+license = "Apache-2.0 OR MIT"
+repository = "https://github.com/RustCrypto/formats/tree/master/spki"
+
+[package.metadata.docs.rs]
+all-features = true
+rustdoc-args = [
+    "--cfg",
+    "docsrs",
+]
+
+[dependencies.arbitrary]
+version = "1.2"
+features = ["derive"]
+optional = true
+
+[dependencies.base64ct]
+version = "1"
+optional = true
+default-features = false
+
+[dependencies.der]
+version = "0.7.2"
+features = ["oid"]
+
+[dependencies.sha2]
+version = "0.10"
+optional = true
+default-features = false
+
+[dev-dependencies.hex-literal]
+version = "0.4"
+
+[dev-dependencies.tempfile]
+version = "3"
+
+[features]
+alloc = [
+    "base64ct?/alloc",
+    "der/alloc",
+]
+arbitrary = [
+    "std",
+    "dep:arbitrary",
+    "der/arbitrary",
+]
+base64 = ["dep:base64ct"]
+fingerprint = ["sha2"]
+pem = [
+    "alloc",
+    "der/pem",
+]
+std = [
+    "der/std",
+    "alloc",
+]
diff --git a/crates/spki/LICENSE b/crates/spki/LICENSE
new file mode 120000
index 0000000..6b579aa
--- /dev/null
+++ b/crates/spki/LICENSE
@@ -0,0 +1 @@
+LICENSE-APACHE
\ No newline at end of file
diff --git a/crates/spki/LICENSE-APACHE b/crates/spki/LICENSE-APACHE
new file mode 100644
index 0000000..78173fa
--- /dev/null
+++ b/crates/spki/LICENSE-APACHE
@@ -0,0 +1,201 @@
+                              Apache License
+                        Version 2.0, January 2004
+                     http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+   "License" shall mean the terms and conditions for use, reproduction,
+   and distribution as defined by Sections 1 through 9 of this document.
+
+   "Licensor" shall mean the copyright owner or entity authorized by
+   the copyright owner that is granting the License.
+
+   "Legal Entity" shall mean the union of the acting entity and all
+   other entities that control, are controlled by, or are under common
+   control with that entity. For the purposes of this definition,
+   "control" means (i) the power, direct or indirect, to cause the
+   direction or management of such entity, whether by contract or
+   otherwise, or (ii) ownership of fifty percent (50%) or more of the
+   outstanding shares, or (iii) beneficial ownership of such entity.
+
+   "You" (or "Your") shall mean an individual or Legal Entity
+   exercising permissions granted by this License.
+
+   "Source" form shall mean the preferred form for making modifications,
+   including but not limited to software source code, documentation
+   source, and configuration files.
+
+   "Object" form shall mean any form resulting from mechanical
+   transformation or translation of a Source form, including but
+   not limited to compiled object code, generated documentation,
+   and conversions to other media types.
+
+   "Work" shall mean the work of authorship, whether in Source or
+   Object form, made available under the License, as indicated by a
+   copyright notice that is included in or attached to the work
+   (an example is provided in the Appendix below).
+
+   "Derivative Works" shall mean any work, whether in Source or Object
+   form, that is based on (or derived from) the Work and for which the
+   editorial revisions, annotations, elaborations, or other modifications
+   represent, as a whole, an original work of authorship. For the purposes
+   of this License, Derivative Works shall not include works that remain
+   separable from, or merely link (or bind by name) to the interfaces of,
+   the Work and Derivative Works thereof.
+
+   "Contribution" shall mean any work of authorship, including
+   the original version of the Work and any modifications or additions
+   to that Work or Derivative Works thereof, that is intentionally
+   submitted to Licensor for inclusion in the Work by the copyright owner
+   or by an individual or Legal Entity authorized to submit on behalf of
+   the copyright owner. For the purposes of this definition, "submitted"
+   means any form of electronic, verbal, or written communication sent
+   to the Licensor or its representatives, including but not limited to
+   communication on electronic mailing lists, source code control systems,
+   and issue tracking systems that are managed by, or on behalf of, the
+   Licensor for the purpose of discussing and improving the Work, but
+   excluding communication that is conspicuously marked or otherwise
+   designated in writing by the copyright owner as "Not a Contribution."
+
+   "Contributor" shall mean Licensor and any individual or Legal Entity
+   on behalf of whom a Contribution has been received by Licensor and
+   subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+   this License, each Contributor hereby grants to You a perpetual,
+   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+   copyright license to reproduce, prepare Derivative Works of,
+   publicly display, publicly perform, sublicense, and distribute the
+   Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+   this License, each Contributor hereby grants to You a perpetual,
+   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+   (except as stated in this section) patent license to make, have made,
+   use, offer to sell, sell, import, and otherwise transfer the Work,
+   where such license applies only to those patent claims licensable
+   by such Contributor that are necessarily infringed by their
+   Contribution(s) alone or by combination of their Contribution(s)
+   with the Work to which such Contribution(s) was submitted. If You
+   institute patent litigation against any entity (including a
+   cross-claim or counterclaim in a lawsuit) alleging that the Work
+   or a Contribution incorporated within the Work constitutes direct
+   or contributory patent infringement, then any patent licenses
+   granted to You under this License for that Work shall terminate
+   as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+   Work or Derivative Works thereof in any medium, with or without
+   modifications, and in Source or Object form, provided that You
+   meet the following conditions:
+
+   (a) You must give any other recipients of the Work or
+       Derivative Works a copy of this License; and
+
+   (b) You must cause any modified files to carry prominent notices
+       stating that You changed the files; and
+
+   (c) You must retain, in the Source form of any Derivative Works
+       that You distribute, all copyright, patent, trademark, and
+       attribution notices from the Source form of the Work,
+       excluding those notices that do not pertain to any part of
+       the Derivative Works; and
+
+   (d) If the Work includes a "NOTICE" text file as part of its
+       distribution, then any Derivative Works that You distribute must
+       include a readable copy of the attribution notices contained
+       within such NOTICE file, excluding those notices that do not
+       pertain to any part of the Derivative Works, in at least one
+       of the following places: within a NOTICE text file distributed
+       as part of the Derivative Works; within the Source form or
+       documentation, if provided along with the Derivative Works; or,
+       within a display generated by the Derivative Works, if and
+       wherever such third-party notices normally appear. The contents
+       of the NOTICE file are for informational purposes only and
+       do not modify the License. You may add Your own attribution
+       notices within Derivative Works that You distribute, alongside
+       or as an addendum to the NOTICE text from the Work, provided
+       that such additional attribution notices cannot be construed
+       as modifying the License.
+
+   You may add Your own copyright statement to Your modifications and
+   may provide additional or different license terms and conditions
+   for use, reproduction, or distribution of Your modifications, or
+   for any such Derivative Works as a whole, provided Your use,
+   reproduction, and distribution of the Work otherwise complies with
+   the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+   any Contribution intentionally submitted for inclusion in the Work
+   by You to the Licensor shall be under the terms and conditions of
+   this License, without any additional terms or conditions.
+   Notwithstanding the above, nothing herein shall supersede or modify
+   the terms of any separate license agreement you may have executed
+   with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+   names, trademarks, service marks, or product names of the Licensor,
+   except as required for reasonable and customary use in describing the
+   origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+   agreed to in writing, Licensor provides the Work (and each
+   Contributor provides its Contributions) on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+   implied, including, without limitation, any warranties or conditions
+   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+   PARTICULAR PURPOSE. You are solely responsible for determining the
+   appropriateness of using or redistributing the Work and assume any
+   risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+   whether in tort (including negligence), contract, or otherwise,
+   unless required by applicable law (such as deliberate and grossly
+   negligent acts) or agreed to in writing, shall any Contributor be
+   liable to You for damages, including any direct, indirect, special,
+   incidental, or consequential damages of any character arising as a
+   result of this License or out of the use or inability to use the
+   Work (including but not limited to damages for loss of goodwill,
+   work stoppage, computer failure or malfunction, or any and all
+   other commercial damages or losses), even if such Contributor
+   has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+   the Work or Derivative Works thereof, You may choose to offer,
+   and charge a fee for, acceptance of support, warranty, indemnity,
+   or other liability obligations and/or rights consistent with this
+   License. However, in accepting such obligations, You may act only
+   on Your own behalf and on Your sole responsibility, not on behalf
+   of any other Contributor, and only if You agree to indemnify,
+   defend, and hold each Contributor harmless for any liability
+   incurred by, or claims asserted against, such Contributor by reason
+   of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+   To apply the Apache License to your work, attach the following
+   boilerplate notice, with the fields enclosed by brackets "[]"
+   replaced with your own identifying information. (Don't include
+   the brackets!)  The text should be enclosed in the appropriate
+   comment syntax for the file format. We also recommend that a
+   file or class name and description of purpose be included on the
+   same "printed page" as the copyright notice for easier
+   identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/crates/spki/LICENSE-MIT b/crates/spki/LICENSE-MIT
new file mode 100644
index 0000000..3294d74
--- /dev/null
+++ b/crates/spki/LICENSE-MIT
@@ -0,0 +1,25 @@
+Copyright (c) 2021-2023 The RustCrypto Project Developers
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/crates/spki/METADATA b/crates/spki/METADATA
new file mode 100644
index 0000000..0fa8081
--- /dev/null
+++ b/crates/spki/METADATA
@@ -0,0 +1,20 @@
+# This project was upgraded with external_updater.
+# Usage: tools/external_updater/updater.sh update rust/crates/spki
+# For more info, check https://cs.android.com/android/platform/superproject/+/main:tools/external_updater/README.md
+
+name: "spki"
+description: "X.509 Subject Public Key Info types describing public keys as well as their associated AlgorithmIdentifiers (i.e. OIDs)."
+third_party {
+  license_type: NOTICE
+  last_upgrade_date {
+    year: 2023
+    month: 12
+    day: 15
+  }
+  homepage: "https://crates.io/crates/spki"
+  identifier {
+    type: "Archive"
+    value: "https://static.crates.io/crates/spki/spki-0.7.3.crate"
+    version: "0.7.3"
+  }
+}
diff --git a/crates/spki/MODULE_LICENSE_APACHE2 b/crates/spki/MODULE_LICENSE_APACHE2
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/crates/spki/MODULE_LICENSE_APACHE2
diff --git a/crates/spki/README.md b/crates/spki/README.md
new file mode 100644
index 0000000..4ac8554
--- /dev/null
+++ b/crates/spki/README.md
@@ -0,0 +1,56 @@
+# [RustCrypto]: X.509 Subject Public Key Info (SPKI)
+
+[![crate][crate-image]][crate-link]
+[![Docs][docs-image]][docs-link]
+[![Build Status][build-image]][build-link]
+![Apache2/MIT licensed][license-image]
+![Rust Version][rustc-image]
+[![Project Chat][chat-image]][chat-link]
+
+[X.509] Subject Public Key Info types describing public keys as well as their
+associated AlgorithmIdentifiers (i.e. OIDs).
+
+Specified in [RFC 5280 § 4.1].
+
+[Documentation][docs-link]
+
+## Minimum Supported Rust Version
+
+This crate requires **Rust 1.65** at a minimum.
+
+We may change the MSRV in the future, but it will be accompanied by a minor
+version bump.
+
+## License
+
+Licensed under either of:
+
+ * [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0)
+ * [MIT license](http://opensource.org/licenses/MIT)
+
+at your option.
+
+### Contribution
+
+Unless you explicitly state otherwise, any contribution intentionally submitted
+for inclusion in the work by you, as defined in the Apache-2.0 license, shall be
+dual licensed as above, without any additional terms or conditions.
+
+[//]: # (badges)
+
+[crate-image]: https://buildstats.info/crate/spki
+[crate-link]: https://crates.io/crates/spki
+[docs-image]: https://docs.rs/spki/badge.svg
+[docs-link]: https://docs.rs/spki/
+[build-image]: https://github.com/RustCrypto/formats/actions/workflows/spki.yml/badge.svg
+[build-link]: https://github.com/RustCrypto/formats/actions/workflows/spki.yml
+[license-image]: https://img.shields.io/badge/license-Apache2.0/MIT-blue.svg
+[rustc-image]: https://img.shields.io/badge/rustc-1.65+-blue.svg
+[chat-image]: https://img.shields.io/badge/zulip-join_chat-blue.svg
+[chat-link]: https://rustcrypto.zulipchat.com/#narrow/stream/300570-formats
+
+[//]: # (links)
+
+[RustCrypto]: https://github.com/rustcrypto
+[X.509]: https://en.wikipedia.org/wiki/X.509
+[RFC 5280 § 4.1]: https://tools.ietf.org/html/rfc5280#section-4.1
diff --git a/crates/spki/cargo2android_viz.bp b/crates/spki/cargo2android_viz.bp
new file mode 100644
index 0000000..af9e79f
--- /dev/null
+++ b/crates/spki/cargo2android_viz.bp
@@ -0,0 +1,7 @@
+visibility: [
+     "//external/rust/android-crates-io/crates/pkcs1:__subpackages__",
+     "//external/rust/android-crates-io/crates/pkcs8:__subpackages__",
+     "//external/rust/crates/x509-cert:__subpackages__",
+     "//packages/modules/Virtualization:__subpackages__",
+     "//system/keymint:__subpackages__",
+]
diff --git a/crates/spki/cargo_embargo.json b/crates/spki/cargo_embargo.json
new file mode 100644
index 0000000..8987119
--- /dev/null
+++ b/crates/spki/cargo_embargo.json
@@ -0,0 +1,34 @@
+{
+  "apex_available": [
+    "//apex_available:platform",
+    "com.android.virt"
+  ],
+  "features": [
+    "alloc"
+  ],
+  "run_cargo": false,
+  "variants": [
+    {
+      "package": {
+        "spki": {
+          "add_module_block": "cargo2android_viz.bp"
+        }
+      }
+    },
+    {
+      "module_name_overrides": {
+        "libder": "libder_nostd",
+        "libspki": "libspki_nostd"
+      },
+      "package": {
+        "spki": {
+          "add_module_block": "cargo2android_viz.bp",
+          "alloc": true,
+          "force_rlib": true,
+          "host_supported": false,
+          "no_std": true
+        }
+      }
+    }
+  ]
+}
diff --git a/crates/spki/patches/rules.mk.diff b/crates/spki/patches/rules.mk.diff
new file mode 100644
index 0000000..13eac9e
--- /dev/null
+++ b/crates/spki/patches/rules.mk.diff
@@ -0,0 +1,13 @@
+diff --git b/rules.mk a/rules.mk
+index 25bd083..bd3edb9 100644
+--- b/rules.mk
++++ a/rules.mk
+@@ -12,8 +12,6 @@ MODULE_RUSTFLAGS += \
+ 	--cfg 'feature="alloc"' \
+ 
+ MODULE_LIBRARY_DEPS := \
+-	external/rust/crates/base64ct \
+ 	external/rust/crates/der \
+-	external/rust/crates/sha2 \
+ 
+ include make/library.mk
diff --git a/crates/spki/patches/std.diff b/crates/spki/patches/std.diff
new file mode 100644
index 0000000..7ef1d49
--- /dev/null
+++ b/crates/spki/patches/std.diff
@@ -0,0 +1,14 @@
+diff --git a/src/lib.rs b/src/lib.rs
+index f466756..995f3bd 100644
+--- a/src/lib.rs
++++ b/src/lib.rs
+@@ -48,6 +48,10 @@ pub use crate::{
+ };
+ pub use der::{self, asn1::ObjectIdentifier};
+ 
++/// Local Android change: Use std to allow building as a dylib.
++#[cfg(android_dylib)]
++extern crate std;
++
+ #[cfg(feature = "alloc")]
+ pub use {crate::traits::EncodePublicKey, der::Document};
diff --git a/crates/spki/src/algorithm.rs b/crates/spki/src/algorithm.rs
new file mode 100644
index 0000000..5f4b5e8
--- /dev/null
+++ b/crates/spki/src/algorithm.rs
@@ -0,0 +1,194 @@
+//! X.509 `AlgorithmIdentifier`
+
+use crate::{Error, Result};
+use core::cmp::Ordering;
+use der::{
+    asn1::{AnyRef, Choice, ObjectIdentifier},
+    Decode, DecodeValue, DerOrd, Encode, EncodeValue, Header, Length, Reader, Sequence, ValueOrd,
+    Writer,
+};
+
+#[cfg(feature = "alloc")]
+use der::asn1::Any;
+
+/// X.509 `AlgorithmIdentifier` as defined in [RFC 5280 Section 4.1.1.2].
+///
+/// ```text
+/// AlgorithmIdentifier  ::=  SEQUENCE  {
+///      algorithm               OBJECT IDENTIFIER,
+///      parameters              ANY DEFINED BY algorithm OPTIONAL  }
+/// ```
+///
+/// [RFC 5280 Section 4.1.1.2]: https://tools.ietf.org/html/rfc5280#section-4.1.1.2
+#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
+#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord)]
+pub struct AlgorithmIdentifier<Params> {
+    /// Algorithm OID, i.e. the `algorithm` field in the `AlgorithmIdentifier`
+    /// ASN.1 schema.
+    pub oid: ObjectIdentifier,
+
+    /// Algorithm `parameters`.
+    pub parameters: Option<Params>,
+}
+
+impl<'a, Params> DecodeValue<'a> for AlgorithmIdentifier<Params>
+where
+    Params: Choice<'a>,
+{
+    fn decode_value<R: Reader<'a>>(reader: &mut R, header: Header) -> der::Result<Self> {
+        reader.read_nested(header.length, |reader| {
+            Ok(Self {
+                oid: reader.decode()?,
+                parameters: reader.decode()?,
+            })
+        })
+    }
+}
+
+impl<Params> EncodeValue for AlgorithmIdentifier<Params>
+where
+    Params: Encode,
+{
+    fn value_len(&self) -> der::Result<Length> {
+        self.oid.encoded_len()? + self.parameters.encoded_len()?
+    }
+
+    fn encode_value(&self, writer: &mut impl Writer) -> der::Result<()> {
+        self.oid.encode(writer)?;
+        self.parameters.encode(writer)?;
+        Ok(())
+    }
+}
+
+impl<'a, Params> Sequence<'a> for AlgorithmIdentifier<Params> where Params: Choice<'a> + Encode {}
+
+impl<'a, Params> TryFrom<&'a [u8]> for AlgorithmIdentifier<Params>
+where
+    Params: Choice<'a> + Encode,
+{
+    type Error = Error;
+
+    fn try_from(bytes: &'a [u8]) -> Result<Self> {
+        Ok(Self::from_der(bytes)?)
+    }
+}
+
+impl<Params> ValueOrd for AlgorithmIdentifier<Params>
+where
+    Params: DerOrd,
+{
+    fn value_cmp(&self, other: &Self) -> der::Result<Ordering> {
+        match self.oid.der_cmp(&other.oid)? {
+            Ordering::Equal => self.parameters.der_cmp(&other.parameters),
+            other => Ok(other),
+        }
+    }
+}
+
+/// `AlgorithmIdentifier` reference which has `AnyRef` parameters.
+pub type AlgorithmIdentifierRef<'a> = AlgorithmIdentifier<AnyRef<'a>>;
+
+/// `AlgorithmIdentifier` with `ObjectIdentifier` parameters.
+pub type AlgorithmIdentifierWithOid = AlgorithmIdentifier<ObjectIdentifier>;
+
+/// `AlgorithmIdentifier` reference which has `Any` parameters.
+#[cfg(feature = "alloc")]
+pub type AlgorithmIdentifierOwned = AlgorithmIdentifier<Any>;
+
+impl<Params> AlgorithmIdentifier<Params> {
+    /// Assert the `algorithm` OID is an expected value.
+    pub fn assert_algorithm_oid(&self, expected_oid: ObjectIdentifier) -> Result<ObjectIdentifier> {
+        if self.oid == expected_oid {
+            Ok(expected_oid)
+        } else {
+            Err(Error::OidUnknown { oid: expected_oid })
+        }
+    }
+}
+
+impl<'a> AlgorithmIdentifierRef<'a> {
+    /// Assert `parameters` is an OID and has the expected value.
+    pub fn assert_parameters_oid(
+        &self,
+        expected_oid: ObjectIdentifier,
+    ) -> Result<ObjectIdentifier> {
+        let actual_oid = self.parameters_oid()?;
+
+        if actual_oid == expected_oid {
+            Ok(actual_oid)
+        } else {
+            Err(Error::OidUnknown { oid: expected_oid })
+        }
+    }
+
+    /// Assert the values of the `algorithm` and `parameters` OIDs.
+    pub fn assert_oids(
+        &self,
+        algorithm: ObjectIdentifier,
+        parameters: ObjectIdentifier,
+    ) -> Result<()> {
+        self.assert_algorithm_oid(algorithm)?;
+        self.assert_parameters_oid(parameters)?;
+        Ok(())
+    }
+
+    /// Get the `parameters` field as an [`AnyRef`].
+    ///
+    /// Returns an error if `parameters` are `None`.
+    pub fn parameters_any(&self) -> Result<AnyRef<'a>> {
+        self.parameters.ok_or(Error::AlgorithmParametersMissing)
+    }
+
+    /// Get the `parameters` field as an [`ObjectIdentifier`].
+    ///
+    /// Returns an error if it is absent or not an OID.
+    pub fn parameters_oid(&self) -> Result<ObjectIdentifier> {
+        Ok(ObjectIdentifier::try_from(self.parameters_any()?)?)
+    }
+
+    /// Convert to a pair of [`ObjectIdentifier`]s.
+    ///
+    /// This method is helpful for decomposing in match statements. Note in
+    /// particular that `NULL` parameters are treated the same as missing
+    /// parameters.
+    ///
+    /// Returns an error if parameters are present but not an OID.
+    pub fn oids(&self) -> der::Result<(ObjectIdentifier, Option<ObjectIdentifier>)> {
+        Ok((
+            self.oid,
+            match self.parameters {
+                None => None,
+                Some(p) => match p {
+                    AnyRef::NULL => None,
+                    _ => Some(p.decode_as::<ObjectIdentifier>()?),
+                },
+            },
+        ))
+    }
+}
+
+#[cfg(feature = "alloc")]
+mod allocating {
+    use super::*;
+    use der::referenced::*;
+
+    impl<'a> RefToOwned<'a> for AlgorithmIdentifierRef<'a> {
+        type Owned = AlgorithmIdentifierOwned;
+        fn ref_to_owned(&self) -> Self::Owned {
+            AlgorithmIdentifier {
+                oid: self.oid,
+                parameters: self.parameters.ref_to_owned(),
+            }
+        }
+    }
+
+    impl OwnedToRef for AlgorithmIdentifierOwned {
+        type Borrowed<'a> = AlgorithmIdentifierRef<'a>;
+        fn owned_to_ref(&self) -> Self::Borrowed<'_> {
+            AlgorithmIdentifier {
+                oid: self.oid,
+                parameters: self.parameters.owned_to_ref(),
+            }
+        }
+    }
+}
diff --git a/crates/spki/src/error.rs b/crates/spki/src/error.rs
new file mode 100644
index 0000000..9d05990
--- /dev/null
+++ b/crates/spki/src/error.rs
@@ -0,0 +1,68 @@
+//! Error types
+
+use core::fmt;
+use der::asn1::ObjectIdentifier;
+
+/// Result type with `spki` crate's [`Error`] type.
+pub type Result<T> = core::result::Result<T, Error>;
+
+#[cfg(feature = "pem")]
+use der::pem;
+
+/// Error type
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+#[non_exhaustive]
+pub enum Error {
+    /// Algorithm parameters are missing.
+    AlgorithmParametersMissing,
+
+    /// ASN.1 DER-related errors.
+    Asn1(der::Error),
+
+    /// Malformed cryptographic key contained in a SPKI document.
+    ///
+    /// This is intended for relaying errors related to the raw data contained
+    /// in [`SubjectPublicKeyInfo::subject_public_key`][`crate::SubjectPublicKeyInfo::subject_public_key`].
+    KeyMalformed,
+
+    /// Unknown algorithm OID.
+    OidUnknown {
+        /// Unrecognized OID value found in e.g. a SPKI `AlgorithmIdentifier`.
+        oid: ObjectIdentifier,
+    },
+}
+
+impl fmt::Display for Error {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        match self {
+            Error::AlgorithmParametersMissing => {
+                f.write_str("AlgorithmIdentifier parameters missing")
+            }
+            Error::Asn1(err) => write!(f, "ASN.1 error: {}", err),
+            Error::KeyMalformed => f.write_str("SPKI cryptographic key data malformed"),
+            Error::OidUnknown { oid } => {
+                write!(f, "unknown/unsupported algorithm OID: {}", oid)
+            }
+        }
+    }
+}
+
+impl From<der::Error> for Error {
+    fn from(err: der::Error) -> Error {
+        if let der::ErrorKind::OidUnknown { oid } = err.kind() {
+            Error::OidUnknown { oid }
+        } else {
+            Error::Asn1(err)
+        }
+    }
+}
+
+#[cfg(feature = "pem")]
+impl From<pem::Error> for Error {
+    fn from(err: pem::Error) -> Error {
+        der::Error::from(err).into()
+    }
+}
+
+#[cfg(feature = "std")]
+impl std::error::Error for Error {}
diff --git a/crates/spki/src/fingerprint.rs b/crates/spki/src/fingerprint.rs
new file mode 100644
index 0000000..ba06e62
--- /dev/null
+++ b/crates/spki/src/fingerprint.rs
@@ -0,0 +1,42 @@
+//! SPKI fingerprint support.
+
+use der::Writer;
+use sha2::{Digest, Sha256};
+
+/// Size of a SHA-256 SPKI fingerprint in bytes.
+pub(crate) const SIZE: usize = 32;
+
+/// Raw bytes of a SPKI fingerprint i.e. SHA-256 digest of
+/// `SubjectPublicKeyInfo`'s DER encoding.
+///
+/// See [RFC7469 § 2.1.1] for more information.
+///
+/// [RFC7469 § 2.1.1]: https://datatracker.ietf.org/doc/html/rfc7469#section-2.1.1
+pub type FingerprintBytes = [u8; SIZE];
+
+/// Writer newtype which accepts DER being serialized on-the-fly and computes a
+/// hash of the contents.
+#[derive(Clone, Default)]
+pub(crate) struct Builder {
+    /// In-progress digest being computed from streaming DER.
+    digest: Sha256,
+}
+
+impl Builder {
+    /// Create a new fingerprint builder.
+    pub fn new() -> Self {
+        Self::default()
+    }
+
+    /// Finish computing a fingerprint, returning the computed digest.
+    pub fn finish(self) -> FingerprintBytes {
+        self.digest.finalize().into()
+    }
+}
+
+impl Writer for Builder {
+    fn write(&mut self, der_bytes: &[u8]) -> der::Result<()> {
+        self.digest.update(der_bytes);
+        Ok(())
+    }
+}
diff --git a/crates/spki/src/lib.rs b/crates/spki/src/lib.rs
new file mode 100644
index 0000000..a8b7653
--- /dev/null
+++ b/crates/spki/src/lib.rs
@@ -0,0 +1,75 @@
+#![no_std]
+#![cfg_attr(docsrs, feature(doc_auto_cfg))]
+#![doc = include_str!("../README.md")]
+#![doc(
+    html_logo_url = "https://raw.githubusercontent.com/RustCrypto/media/6ee8e381/logo.svg",
+    html_favicon_url = "https://raw.githubusercontent.com/RustCrypto/media/6ee8e381/logo.svg"
+)]
+#![forbid(unsafe_code)]
+#![warn(
+    clippy::mod_module_files,
+    clippy::unwrap_used,
+    missing_docs,
+    rust_2018_idioms,
+    unused_lifetimes,
+    unused_qualifications
+)]
+//! # Usage
+//! The following example demonstrates how to use an OID as the `parameters`
+//! of an [`AlgorithmIdentifier`].
+//!
+//! Borrow the [`ObjectIdentifier`] first then use [`der::AnyRef::from`] or `.into()`:
+//!
+//! ```
+//! use spki::{AlgorithmIdentifier, ObjectIdentifier};
+//!
+//! let alg_oid = "1.2.840.10045.2.1".parse::<ObjectIdentifier>().unwrap();
+//! let params_oid = "1.2.840.10045.3.1.7".parse::<ObjectIdentifier>().unwrap();
+//!
+//! let alg_id = AlgorithmIdentifier {
+//!     oid: alg_oid,
+//!     parameters: Some(params_oid)
+//! };
+//! ```
+
+#[cfg(feature = "alloc")]
+#[allow(unused_extern_crates)]
+extern crate alloc;
+#[cfg(feature = "std")]
+extern crate std;
+
+mod algorithm;
+mod error;
+mod spki;
+mod traits;
+
+#[cfg(feature = "fingerprint")]
+mod fingerprint;
+
+pub use crate::{
+    algorithm::{AlgorithmIdentifier, AlgorithmIdentifierRef, AlgorithmIdentifierWithOid},
+    error::{Error, Result},
+    spki::{SubjectPublicKeyInfo, SubjectPublicKeyInfoRef},
+    traits::{AssociatedAlgorithmIdentifier, DecodePublicKey, SignatureAlgorithmIdentifier},
+};
+pub use der::{self, asn1::ObjectIdentifier};
+
+/// Local Android change: Use std to allow building as a dylib.
+#[cfg(android_dylib)]
+extern crate std;
+
+#[cfg(feature = "alloc")]
+pub use {
+    crate::{
+        algorithm::AlgorithmIdentifierOwned,
+        spki::SubjectPublicKeyInfoOwned,
+        traits::{
+            DynAssociatedAlgorithmIdentifier, DynSignatureAlgorithmIdentifier, EncodePublicKey,
+            SignatureBitStringEncoding,
+        },
+    },
+    der::Document,
+};
+
+#[cfg(feature = "fingerprint")]
+pub use crate::fingerprint::FingerprintBytes;
diff --git a/crates/spki/src/spki.rs b/crates/spki/src/spki.rs
new file mode 100644
index 0000000..b7e4c92
--- /dev/null
+++ b/crates/spki/src/spki.rs
@@ -0,0 +1,217 @@
+//! X.509 `SubjectPublicKeyInfo`
+
+use crate::{AlgorithmIdentifier, Error, Result};
+use core::cmp::Ordering;
+use der::{
+    asn1::{AnyRef, BitStringRef},
+    Choice, Decode, DecodeValue, DerOrd, Encode, EncodeValue, FixedTag, Header, Length, Reader,
+    Sequence, ValueOrd, Writer,
+};
+
+#[cfg(feature = "alloc")]
+use der::{
+    asn1::{Any, BitString},
+    Document,
+};
+
+#[cfg(feature = "fingerprint")]
+use crate::{fingerprint, FingerprintBytes};
+
+#[cfg(feature = "pem")]
+use der::pem::PemLabel;
+
+/// [`SubjectPublicKeyInfo`] with [`AnyRef`] algorithm parameters, and [`BitStringRef`] params.
+pub type SubjectPublicKeyInfoRef<'a> = SubjectPublicKeyInfo<AnyRef<'a>, BitStringRef<'a>>;
+
+/// [`SubjectPublicKeyInfo`] with [`Any`] algorithm parameters, and [`BitString`] params.
+#[cfg(feature = "alloc")]
+pub type SubjectPublicKeyInfoOwned = SubjectPublicKeyInfo<Any, BitString>;
+
+/// X.509 `SubjectPublicKeyInfo` (SPKI) as defined in [RFC 5280 § 4.1.2.7].
+///
+/// ASN.1 structure containing an [`AlgorithmIdentifier`] and public key
+/// data in an algorithm specific format.
+///
+/// ```text
+///    SubjectPublicKeyInfo  ::=  SEQUENCE  {
+///         algorithm            AlgorithmIdentifier,
+///         subjectPublicKey     BIT STRING  }
+/// ```
+///
+/// [RFC 5280 § 4.1.2.7]: https://tools.ietf.org/html/rfc5280#section-4.1.2.7
+#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
+#[derive(Clone, Debug, Eq, PartialEq)]
+pub struct SubjectPublicKeyInfo<Params, Key> {
+    /// X.509 [`AlgorithmIdentifier`] for the public key type
+    pub algorithm: AlgorithmIdentifier<Params>,
+
+    /// Public key data
+    pub subject_public_key: Key,
+}
+
+impl<'a, Params, Key> SubjectPublicKeyInfo<Params, Key>
+where
+    Params: Choice<'a> + Encode,
+    // TODO: replace FixedTag with FixedTag<TAG = { Tag::BitString }> once
+    // https://github.com/rust-lang/rust/issues/92827 is fixed
+    Key: Decode<'a> + Encode + FixedTag,
+{
+    /// Calculate the SHA-256 fingerprint of this [`SubjectPublicKeyInfo`] and
+    /// encode it as a Base64 string.
+    ///
+    /// See [RFC7469 § 2.1.1] for more information.
+    ///
+    /// [RFC7469 § 2.1.1]: https://datatracker.ietf.org/doc/html/rfc7469#section-2.1.1
+    #[cfg(all(feature = "fingerprint", feature = "alloc", feature = "base64"))]
+    pub fn fingerprint_base64(&self) -> Result<alloc::string::String> {
+        use base64ct::{Base64, Encoding};
+        Ok(Base64::encode_string(&self.fingerprint_bytes()?))
+    }
+
+    /// Calculate the SHA-256 fingerprint of this [`SubjectPublicKeyInfo`] as
+    /// a raw byte array.
+    ///
+    /// See [RFC7469 § 2.1.1] for more information.
+    ///
+    /// [RFC7469 § 2.1.1]: https://datatracker.ietf.org/doc/html/rfc7469#section-2.1.1
+    #[cfg(feature = "fingerprint")]
+    pub fn fingerprint_bytes(&self) -> Result<FingerprintBytes> {
+        let mut builder = fingerprint::Builder::new();
+        self.encode(&mut builder)?;
+        Ok(builder.finish())
+    }
+}
+
+impl<'a: 'k, 'k, Params, Key: 'k> DecodeValue<'a> for SubjectPublicKeyInfo<Params, Key>
+where
+    Params: Choice<'a> + Encode,
+    Key: Decode<'a>,
+{
+    fn decode_value<R: Reader<'a>>(reader: &mut R, header: Header) -> der::Result<Self> {
+        reader.read_nested(header.length, |reader| {
+            Ok(Self {
+                algorithm: reader.decode()?,
+                subject_public_key: Key::decode(reader)?,
+            })
+        })
+    }
+}
+
+impl<'a, Params, Key> EncodeValue for SubjectPublicKeyInfo<Params, Key>
+where
+    Params: Choice<'a> + Encode,
+    Key: Encode,
+{
+    fn value_len(&self) -> der::Result<Length> {
+        self.algorithm.encoded_len()? + self.subject_public_key.encoded_len()?
+    }
+
+    fn encode_value(&self, writer: &mut impl Writer) -> der::Result<()> {
+        self.algorithm.encode(writer)?;
+        self.subject_public_key.encode(writer)?;
+        Ok(())
+    }
+}
+
+impl<'a, Params, Key> Sequence<'a> for SubjectPublicKeyInfo<Params, Key>
+where
+    Params: Choice<'a> + Encode,
+    Key: Decode<'a> + Encode + FixedTag,
+{
+}
+
+impl<'a, Params, Key> TryFrom<&'a [u8]> for SubjectPublicKeyInfo<Params, Key>
+where
+    Params: Choice<'a> + Encode,
+    Key: Decode<'a> + Encode + FixedTag,
+{
+    type Error = Error;
+
+    fn try_from(bytes: &'a [u8]) -> Result<Self> {
+        Ok(Self::from_der(bytes)?)
+    }
+}
+
+impl<'a, Params, Key> ValueOrd for SubjectPublicKeyInfo<Params, Key>
+where
+    Params: Choice<'a> + DerOrd + Encode,
+    Key: ValueOrd,
+{
+    fn value_cmp(&self, other: &Self) -> der::Result<Ordering> {
+        match self.algorithm.der_cmp(&other.algorithm)? {
+            Ordering::Equal => self.subject_public_key.value_cmp(&other.subject_public_key),
+            other => Ok(other),
+        }
+    }
+}
+
+#[cfg(feature = "alloc")]
+impl<'a: 'k, 'k, Params, Key: 'k> TryFrom<SubjectPublicKeyInfo<Params, Key>> for Document
+where
+    Params: Choice<'a> + Encode,
+    Key: Decode<'a> + Encode + FixedTag,
+    BitStringRef<'a>: From<&'k Key>,
+{
+    type Error = Error;
+
+    fn try_from(spki: SubjectPublicKeyInfo<Params, Key>) -> Result<Document> {
+        Self::try_from(&spki)
+    }
+}
+
+#[cfg(feature = "alloc")]
+impl<'a: 'k, 'k, Params, Key: 'k> TryFrom<&SubjectPublicKeyInfo<Params, Key>> for Document
+where
+    Params: Choice<'a> + Encode,
+    Key: Decode<'a> + Encode + FixedTag,
+    BitStringRef<'a>: From<&'k Key>,
+{
+    type Error = Error;
+
+    fn try_from(spki: &SubjectPublicKeyInfo<Params, Key>) -> Result<Document> {
+        Ok(Self::encode_msg(spki)?)
+    }
+}
+
+#[cfg(feature = "pem")]
+impl<Params, Key> PemLabel for SubjectPublicKeyInfo<Params, Key> {
+    const PEM_LABEL: &'static str = "PUBLIC KEY";
+}
+
+#[cfg(feature = "alloc")]
+mod allocating {
+    use super::*;
+    use crate::EncodePublicKey;
+    use der::referenced::*;
+
+    impl<'a> RefToOwned<'a> for SubjectPublicKeyInfoRef<'a> {
+        type Owned = SubjectPublicKeyInfoOwned;
+        fn ref_to_owned(&self) -> Self::Owned {
+            SubjectPublicKeyInfo {
+                algorithm: self.algorithm.ref_to_owned(),
+                subject_public_key: self.subject_public_key.ref_to_owned(),
+            }
+        }
+    }
+
+    impl OwnedToRef for SubjectPublicKeyInfoOwned {
+        type Borrowed<'a> = SubjectPublicKeyInfoRef<'a>;
+        fn owned_to_ref(&self) -> Self::Borrowed<'_> {
+            SubjectPublicKeyInfo {
+                algorithm: self.algorithm.owned_to_ref(),
+                subject_public_key: self.subject_public_key.owned_to_ref(),
+            }
+        }
+    }
+
+    impl SubjectPublicKeyInfoOwned {
+        /// Create a [`SubjectPublicKeyInfoOwned`] from any object that implements
+        /// [`EncodePublicKey`].
+        pub fn from_key<T>(source: T) -> Result<Self>
+        where
+            T: EncodePublicKey,
+        {
+            Ok(source.to_public_key_der()?.decode_msg::<Self>()?)
+        }
+    }
+}
diff --git a/crates/spki/src/traits.rs b/crates/spki/src/traits.rs
new file mode 100644
index 0000000..764b02a
--- /dev/null
+++ b/crates/spki/src/traits.rs
@@ -0,0 +1,184 @@
+//! Traits for encoding/decoding SPKI public keys.
+
+use crate::{AlgorithmIdentifier, Error, Result, SubjectPublicKeyInfoRef};
+use der::{EncodeValue, Tagged};
+
+#[cfg(feature = "alloc")]
+use {
+    crate::AlgorithmIdentifierOwned,
+    der::{asn1::BitString, Any, Document},
+};
+
+#[cfg(feature = "pem")]
+use {
+    alloc::string::String,
+    der::pem::{LineEnding, PemLabel},
+};
+
+#[cfg(feature = "std")]
+use std::path::Path;
+
+#[cfg(doc)]
+use crate::SubjectPublicKeyInfo;
+
+/// Parse a public key object from an encoded SPKI document.
+pub trait DecodePublicKey: Sized {
+    /// Deserialize object from ASN.1 DER-encoded [`SubjectPublicKeyInfo`]
+    /// (binary format).
+    fn from_public_key_der(bytes: &[u8]) -> Result<Self>;
+
+    /// Deserialize PEM-encoded [`SubjectPublicKeyInfo`].
+    ///
+    /// Keys in this format begin with the following delimiter:
+    ///
+    /// ```text
+    /// -----BEGIN PUBLIC KEY-----
+    /// ```
+    #[cfg(feature = "pem")]
+    fn from_public_key_pem(s: &str) -> Result<Self> {
+        let (label, doc) = Document::from_pem(s)?;
+        SubjectPublicKeyInfoRef::validate_pem_label(label)?;
+        Self::from_public_key_der(doc.as_bytes())
+    }
+
+    /// Load public key object from an ASN.1 DER-encoded file on the local
+    /// filesystem (binary format).
+    #[cfg(feature = "std")]
+    fn read_public_key_der_file(path: impl AsRef<Path>) -> Result<Self> {
+        let doc = Document::read_der_file(path)?;
+        Self::from_public_key_der(doc.as_bytes())
+    }
+
+    /// Load public key object from a PEM-encoded file on the local filesystem.
+    #[cfg(all(feature = "pem", feature = "std"))]
+    fn read_public_key_pem_file(path: impl AsRef<Path>) -> Result<Self> {
+        let (label, doc) = Document::read_pem_file(path)?;
+        SubjectPublicKeyInfoRef::validate_pem_label(&label)?;
+        Self::from_public_key_der(doc.as_bytes())
+    }
+}
+
+impl<T> DecodePublicKey for T
+where
+    T: for<'a> TryFrom<SubjectPublicKeyInfoRef<'a>, Error = Error>,
+{
+    fn from_public_key_der(bytes: &[u8]) -> Result<Self> {
+        Self::try_from(SubjectPublicKeyInfoRef::try_from(bytes)?)
+    }
+}
+
+/// Serialize a public key object to a SPKI-encoded document.
+#[cfg(feature = "alloc")]
+pub trait EncodePublicKey {
+    /// Serialize a [`Document`] containing a SPKI-encoded public key.
+    fn to_public_key_der(&self) -> Result<Document>;
+
+    /// Serialize this public key as PEM-encoded SPKI with the given [`LineEnding`].
+    #[cfg(feature = "pem")]
+    fn to_public_key_pem(&self, line_ending: LineEnding) -> Result<String> {
+        let doc = self.to_public_key_der()?;
+        Ok(doc.to_pem(SubjectPublicKeyInfoRef::PEM_LABEL, line_ending)?)
+    }
+
+    /// Write ASN.1 DER-encoded public key to the given path
+    #[cfg(feature = "std")]
+    fn write_public_key_der_file(&self, path: impl AsRef<Path>) -> Result<()> {
+        Ok(self.to_public_key_der()?.write_der_file(path)?)
+    }
+
+    /// Write ASN.1 DER-encoded public key to the given path
+    #[cfg(all(feature = "pem", feature = "std"))]
+    fn write_public_key_pem_file(
+        &self,
+        path: impl AsRef<Path>,
+        line_ending: LineEnding,
+    ) -> Result<()> {
+        let doc = self.to_public_key_der()?;
+        Ok(doc.write_pem_file(path, SubjectPublicKeyInfoRef::PEM_LABEL, line_ending)?)
+    }
+}
+
+/// Returns `AlgorithmIdentifier` associated with the structure.
+///
+/// This is useful for e.g. keys for digital signature algorithms.
+pub trait AssociatedAlgorithmIdentifier {
+    /// Algorithm parameters.
+    type Params: Tagged + EncodeValue;
+
+    /// `AlgorithmIdentifier` for this structure.
+    const ALGORITHM_IDENTIFIER: AlgorithmIdentifier<Self::Params>;
+}
+
+/// Returns `AlgorithmIdentifier` associated with the structure.
+///
+/// This is useful for e.g. keys for digital signature algorithms.
+#[cfg(feature = "alloc")]
+pub trait DynAssociatedAlgorithmIdentifier {
+    /// `AlgorithmIdentifier` for this structure.
+    fn algorithm_identifier(&self) -> Result<AlgorithmIdentifierOwned>;
+}
+
+#[cfg(feature = "alloc")]
+impl<T> DynAssociatedAlgorithmIdentifier for T
+where
+    T: AssociatedAlgorithmIdentifier,
+{
+    fn algorithm_identifier(&self) -> Result<AlgorithmIdentifierOwned> {
+        Ok(AlgorithmIdentifierOwned {
+            oid: T::ALGORITHM_IDENTIFIER.oid,
+            parameters: T::ALGORITHM_IDENTIFIER
+                .parameters
+                .as_ref()
+                .map(Any::encode_from)
+                .transpose()?,
+        })
+    }
+}
+
+/// Returns `AlgorithmIdentifier` associated with the signature system.
+///
+/// Unlike AssociatedAlgorithmIdentifier this is intended to be implemented for public and/or
+/// private keys.
+pub trait SignatureAlgorithmIdentifier {
+    /// Algorithm parameters.
+    type Params: Tagged + EncodeValue;
+
+    /// `AlgorithmIdentifier` for the corresponding singature system.
+    const SIGNATURE_ALGORITHM_IDENTIFIER: AlgorithmIdentifier<Self::Params>;
+}
+
+/// Returns `AlgorithmIdentifier` associated with the signature system.
+///
+/// Unlike AssociatedAlgorithmIdentifier this is intended to be implemented for public and/or
+/// private keys.
+#[cfg(feature = "alloc")]
+pub trait DynSignatureAlgorithmIdentifier {
+    /// `AlgorithmIdentifier` for the corresponding singature system.
+    fn signature_algorithm_identifier(&self) -> Result<AlgorithmIdentifierOwned>;
+}
+
+#[cfg(feature = "alloc")]
+impl<T> DynSignatureAlgorithmIdentifier for T
+where
+    T: SignatureAlgorithmIdentifier,
+{
+    fn signature_algorithm_identifier(&self) -> Result<AlgorithmIdentifierOwned> {
+        Ok(AlgorithmIdentifierOwned {
+            oid: T::SIGNATURE_ALGORITHM_IDENTIFIER.oid,
+            parameters: T::SIGNATURE_ALGORITHM_IDENTIFIER
+                .parameters
+                .as_ref()
+                .map(Any::encode_from)
+                .transpose()?,
+        })
+    }
+}
+
+/// Returns the `BitString` encoding of the signature.
+///
+/// X.509 and CSR structures require signatures to be BitString encoded.
+#[cfg(feature = "alloc")]
+pub trait SignatureBitStringEncoding {
+    /// `BitString` encoding for this signature.
+    fn to_bitstring(&self) -> der::Result<BitString>;
+}
diff --git a/crates/spki/tests/examples/ed25519-pub.der b/crates/spki/tests/examples/ed25519-pub.der
new file mode 100644
index 0000000..1b602ee
--- /dev/null
+++ b/crates/spki/tests/examples/ed25519-pub.der
Binary files differ
diff --git a/crates/spki/tests/examples/ed25519-pub.pem b/crates/spki/tests/examples/ed25519-pub.pem
new file mode 100644
index 0000000..6891701
--- /dev/null
+++ b/crates/spki/tests/examples/ed25519-pub.pem
@@ -0,0 +1,3 @@
+-----BEGIN PUBLIC KEY-----
+MCowBQYDK2VwAyEATSkWfz8ZEqb3rfopOgUaFcBexnuPFyZ7HFVQ3OhTvQ0=
+-----END PUBLIC KEY-----
diff --git a/crates/spki/tests/examples/p256-pub.der b/crates/spki/tests/examples/p256-pub.der
new file mode 100644
index 0000000..67c719c
--- /dev/null
+++ b/crates/spki/tests/examples/p256-pub.der
Binary files differ
diff --git a/crates/spki/tests/examples/p256-pub.pem b/crates/spki/tests/examples/p256-pub.pem
new file mode 100644
index 0000000..ee7e5b6
--- /dev/null
+++ b/crates/spki/tests/examples/p256-pub.pem
@@ -0,0 +1,4 @@
+-----BEGIN PUBLIC KEY-----
+MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEHKz/tV8vLO/YnYnrN0smgRUkUoAt
+7qCZFgaBN9g5z3/EgaREkjBNfvZqwRe+/oOo0I8VXytS+fYY3URwKQSODw==
+-----END PUBLIC KEY-----
diff --git a/crates/spki/tests/examples/rsa2048-pub.der b/crates/spki/tests/examples/rsa2048-pub.der
new file mode 100644
index 0000000..4148aaa
--- /dev/null
+++ b/crates/spki/tests/examples/rsa2048-pub.der
Binary files differ
diff --git a/crates/spki/tests/examples/rsa2048-pub.pem b/crates/spki/tests/examples/rsa2048-pub.pem
new file mode 100644
index 0000000..5ecd892
--- /dev/null
+++ b/crates/spki/tests/examples/rsa2048-pub.pem
@@ -0,0 +1,9 @@
+-----BEGIN PUBLIC KEY-----
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAtsQsUV8QpqrygsY+2+JC
+Q6Fw8/omM71IM2N/R8pPbzbgOl0p78MZGsgPOQ2HSznjD0FPzsH8oO2B5Uftws04
+LHb2HJAYlz25+lN5cqfHAfa3fgmC38FfwBkn7l582UtPWZ/wcBOnyCgb3yLcvJrX
+yrt8QxHJgvWO23ITrUVYszImbXQ67YGS0YhMrbixRzmo2tpm3JcIBtnHrEUMsT0N
+fFdfsZhTT8YbxBvA8FdODgEwx7u/vf3J9qbi4+Kv8cvqyJuleIRSjVXPsIMnoejI
+n04APPKIjpMyQdnWlby7rNyQtE4+CV+jcFjqJbE/Xilcvqxt6DirjFCvYeKYl1uH
+LwIDAQAB
+-----END PUBLIC KEY-----
diff --git a/crates/spki/tests/spki.rs b/crates/spki/tests/spki.rs
new file mode 100644
index 0000000..f912d48
--- /dev/null
+++ b/crates/spki/tests/spki.rs
@@ -0,0 +1,161 @@
+//! `SubjectPublicKeyInfo` tests.
+
+use der::asn1::ObjectIdentifier;
+use hex_literal::hex;
+use spki::SubjectPublicKeyInfoRef;
+
+#[cfg(feature = "alloc")]
+use der::Encode;
+
+#[cfg(feature = "pem")]
+use der::{pem::LineEnding, EncodePem};
+
+/// Elliptic Curve (P-256) `SubjectPublicKeyInfo` encoded as ASN.1 DER
+const EC_P256_DER_EXAMPLE: &[u8] = include_bytes!("examples/p256-pub.der");
+
+/// Ed25519 `SubjectPublicKeyInfo` encoded as ASN.1 DER
+#[cfg(any(feature = "alloc", feature = "fingerprint"))]
+const ED25519_DER_EXAMPLE: &[u8] = include_bytes!("examples/ed25519-pub.der");
+
+/// RSA-2048 `SubjectPublicKeyInfo` encoded as ASN.1 DER
+const RSA_2048_DER_EXAMPLE: &[u8] = include_bytes!("examples/rsa2048-pub.der");
+
+/// Elliptic Curve (P-256) public key encoded as PEM
+#[cfg(feature = "pem")]
+const EC_P256_PEM_EXAMPLE: &str = include_str!("examples/p256-pub.pem");
+
+/// Ed25519 public key encoded as PEM
+#[cfg(feature = "pem")]
+const ED25519_PEM_EXAMPLE: &str = include_str!("examples/ed25519-pub.pem");
+
+/// RSA-2048 PKCS#8 public key encoded as PEM
+#[cfg(feature = "pem")]
+const RSA_2048_PEM_EXAMPLE: &str = include_str!("examples/rsa2048-pub.pem");
+
+/// The SPKI fingerprint for `ED25519_SPKI_FINGERPRINT` as a Base64 string
+///
+/// Generated using `cat ed25519-pub.der | openssl dgst -binary -sha256 | base64`
+#[cfg(all(feature = "alloc", feature = "base64", feature = "fingerprint"))]
+const ED25519_SPKI_FINGERPRINT_BASE64: &str = "Vd1MdLDkhTTi9OFzzs61DfjyenrCqomRzHrpFOAwvO0=";
+
+/// The SPKI fingerprint for `ED25519_SPKI_FINGERPRINT` as straight hash bytes
+///
+/// Generated using `cat ed25519-pub.der | openssl dgst -sha256`
+#[cfg(feature = "fingerprint")]
+const ED25519_SPKI_FINGERPRINT: &[u8] =
+    &hex!("55dd4c74b0e48534e2f4e173ceceb50df8f27a7ac2aa8991cc7ae914e030bced");
+
+#[test]
+fn decode_ec_p256_der() {
+    let spki = SubjectPublicKeyInfoRef::try_from(EC_P256_DER_EXAMPLE).unwrap();
+
+    assert_eq!(spki.algorithm.oid, "1.2.840.10045.2.1".parse().unwrap());
+
+    assert_eq!(
+        spki.algorithm
+            .parameters
+            .unwrap()
+            .decode_as::<ObjectIdentifier>()
+            .unwrap(),
+        "1.2.840.10045.3.1.7".parse().unwrap()
+    );
+
+    assert_eq!(spki.subject_public_key.raw_bytes(), &hex!("041CACFFB55F2F2CEFD89D89EB374B2681152452802DEEA09916068137D839CF7FC481A44492304D7EF66AC117BEFE83A8D08F155F2B52F9F618DD447029048E0F")[..]);
+}
+
+#[test]
+#[cfg(feature = "fingerprint")]
+fn decode_ed25519_and_fingerprint_spki() {
+    // Repeat the decode test from the pkcs8 crate
+    let spki = SubjectPublicKeyInfoRef::try_from(ED25519_DER_EXAMPLE).unwrap();
+
+    assert_eq!(spki.algorithm.oid, "1.3.101.112".parse().unwrap());
+    assert_eq!(spki.algorithm.parameters, None);
+    assert_eq!(
+        spki.subject_public_key.raw_bytes(),
+        &hex!("4D29167F3F1912A6F7ADFA293A051A15C05EC67B8F17267B1C5550DCE853BD0D")[..]
+    );
+
+    // Check the fingerprint
+    assert_eq!(
+        spki.fingerprint_bytes().unwrap().as_slice(),
+        ED25519_SPKI_FINGERPRINT
+    );
+}
+
+#[test]
+#[cfg(all(feature = "alloc", feature = "base64", feature = "fingerprint"))]
+fn decode_ed25519_and_fingerprint_base64() {
+    // Repeat the decode test from the pkcs8 crate
+    let spki = SubjectPublicKeyInfoRef::try_from(ED25519_DER_EXAMPLE).unwrap();
+
+    assert_eq!(spki.algorithm.oid, "1.3.101.112".parse().unwrap());
+    assert_eq!(spki.algorithm.parameters, None);
+    assert_eq!(
+        spki.subject_public_key.raw_bytes(),
+        &hex!("4D29167F3F1912A6F7ADFA293A051A15C05EC67B8F17267B1C5550DCE853BD0D")[..]
+    );
+
+    // Check the fingerprint
+    assert_eq!(
+        spki.fingerprint_base64().unwrap(),
+        ED25519_SPKI_FINGERPRINT_BASE64
+    );
+}
+
+#[test]
+fn decode_rsa_2048_der() {
+    let spki = SubjectPublicKeyInfoRef::try_from(RSA_2048_DER_EXAMPLE).unwrap();
+
+    assert_eq!(spki.algorithm.oid, "1.2.840.113549.1.1.1".parse().unwrap());
+    assert!(spki.algorithm.parameters.unwrap().is_null());
+    assert_eq!(spki.subject_public_key.raw_bytes(), &hex!("3082010A0282010100B6C42C515F10A6AAF282C63EDBE24243A170F3FA2633BD4833637F47CA4F6F36E03A5D29EFC3191AC80F390D874B39E30F414FCEC1FCA0ED81E547EDC2CD382C76F61C9018973DB9FA537972A7C701F6B77E0982DFC15FC01927EE5E7CD94B4F599FF07013A7C8281BDF22DCBC9AD7CABB7C4311C982F58EDB7213AD4558B332266D743AED8192D1884CADB8B14739A8DADA66DC970806D9C7AC450CB13D0D7C575FB198534FC61BC41BC0F0574E0E0130C7BBBFBDFDC9F6A6E2E3E2AFF1CBEAC89BA57884528D55CFB08327A1E8C89F4E003CF2888E933241D9D695BCBBACDC90B44E3E095FA37058EA25B13F5E295CBEAC6DE838AB8C50AF61E298975B872F0203010001")[..]);
+}
+
+#[test]
+#[cfg(feature = "alloc")]
+fn encode_ec_p256_der() {
+    let pk = SubjectPublicKeyInfoRef::try_from(EC_P256_DER_EXAMPLE).unwrap();
+    let pk_encoded = pk.to_der().unwrap();
+    assert_eq!(EC_P256_DER_EXAMPLE, pk_encoded.as_slice());
+}
+
+#[test]
+#[cfg(feature = "alloc")]
+fn encode_ed25519_der() {
+    let pk = SubjectPublicKeyInfoRef::try_from(ED25519_DER_EXAMPLE).unwrap();
+    let pk_encoded = pk.to_der().unwrap();
+    assert_eq!(ED25519_DER_EXAMPLE, pk_encoded.as_slice());
+}
+
+#[test]
+#[cfg(feature = "alloc")]
+fn encode_rsa_2048_der() {
+    let pk = SubjectPublicKeyInfoRef::try_from(RSA_2048_DER_EXAMPLE).unwrap();
+    let pk_encoded = pk.to_der().unwrap();
+    assert_eq!(RSA_2048_DER_EXAMPLE, pk_encoded.as_slice());
+}
+
+#[test]
+#[cfg(feature = "pem")]
+fn encode_ec_p256_pem() {
+    let pk = SubjectPublicKeyInfoRef::try_from(EC_P256_DER_EXAMPLE).unwrap();
+    let pk_encoded = pk.to_pem(LineEnding::LF).unwrap();
+    assert_eq!(EC_P256_PEM_EXAMPLE, pk_encoded);
+}
+
+#[test]
+#[cfg(feature = "pem")]
+fn encode_ed25519_pem() {
+    let pk = SubjectPublicKeyInfoRef::try_from(ED25519_DER_EXAMPLE).unwrap();
+    let pk_encoded = pk.to_pem(LineEnding::LF).unwrap();
+    assert_eq!(ED25519_PEM_EXAMPLE, pk_encoded);
+}
+
+#[test]
+#[cfg(feature = "pem")]
+fn encode_rsa_2048_pem() {
+    let pk = SubjectPublicKeyInfoRef::try_from(RSA_2048_DER_EXAMPLE).unwrap();
+    let pk_encoded = pk.to_pem(LineEnding::LF).unwrap();
+    assert_eq!(RSA_2048_PEM_EXAMPLE, pk_encoded);
+}
diff --git a/crates/spki/tests/traits.rs b/crates/spki/tests/traits.rs
new file mode 100644
index 0000000..1114333
--- /dev/null
+++ b/crates/spki/tests/traits.rs
@@ -0,0 +1,102 @@
+//! Tests for SPKI encoding/decoding traits.
+
+#![cfg(any(feature = "pem", feature = "std"))]
+
+use der::{Decode, Encode};
+use spki::{DecodePublicKey, Document, EncodePublicKey, Error, Result, SubjectPublicKeyInfoRef};
+
+#[cfg(feature = "pem")]
+use spki::der::pem::LineEnding;
+
+#[cfg(feature = "std")]
+use tempfile::tempdir;
+
+#[cfg(all(feature = "pem", feature = "std"))]
+use std::fs;
+
+/// Ed25519 `SubjectPublicKeyInfo` encoded as ASN.1 DER
+const ED25519_DER_EXAMPLE: &[u8] = include_bytes!("examples/ed25519-pub.der");
+
+/// Ed25519 public key encoded as PEM
+#[cfg(feature = "pem")]
+const ED25519_PEM_EXAMPLE: &str = include_str!("examples/ed25519-pub.pem");
+
+/// Mock key type for testing trait impls against.
+pub struct MockKey(Vec<u8>);
+
+impl AsRef<[u8]> for MockKey {
+    fn as_ref(&self) -> &[u8] {
+        self.0.as_ref()
+    }
+}
+
+impl EncodePublicKey for MockKey {
+    fn to_public_key_der(&self) -> Result<Document> {
+        Ok(Document::from_der(self.as_ref())?)
+    }
+}
+
+impl TryFrom<SubjectPublicKeyInfoRef<'_>> for MockKey {
+    type Error = Error;
+
+    fn try_from(spki: SubjectPublicKeyInfoRef<'_>) -> Result<MockKey> {
+        Ok(MockKey(spki.to_der()?))
+    }
+}
+
+#[cfg(feature = "pem")]
+#[test]
+fn from_public_key_pem() {
+    let key = MockKey::from_public_key_pem(ED25519_PEM_EXAMPLE).unwrap();
+    assert_eq!(key.as_ref(), ED25519_DER_EXAMPLE);
+}
+
+#[cfg(feature = "std")]
+#[test]
+fn read_public_key_der_file() {
+    let key = MockKey::read_public_key_der_file("tests/examples/ed25519-pub.der").unwrap();
+    assert_eq!(key.as_ref(), ED25519_DER_EXAMPLE);
+}
+
+#[cfg(all(feature = "pem", feature = "std"))]
+#[test]
+fn read_public_key_pem_file() {
+    let key = MockKey::read_public_key_pem_file("tests/examples/ed25519-pub.pem").unwrap();
+    assert_eq!(key.as_ref(), ED25519_DER_EXAMPLE);
+}
+
+#[cfg(feature = "pem")]
+#[test]
+fn to_public_key_pem() {
+    let pem = MockKey(ED25519_DER_EXAMPLE.to_vec())
+        .to_public_key_pem(LineEnding::LF)
+        .unwrap();
+
+    assert_eq!(pem, ED25519_PEM_EXAMPLE);
+}
+
+#[cfg(feature = "std")]
+#[test]
+fn write_public_key_der_file() {
+    let dir = tempdir().unwrap();
+    let path = dir.path().join("example.der");
+    MockKey(ED25519_DER_EXAMPLE.to_vec())
+        .write_public_key_der_file(&path)
+        .unwrap();
+
+    let key = MockKey::read_public_key_der_file(&path).unwrap();
+    assert_eq!(key.as_ref(), ED25519_DER_EXAMPLE);
+}
+
+#[cfg(all(feature = "pem", feature = "std"))]
+#[test]
+fn write_public_key_pem_file() {
+    let dir = tempdir().unwrap();
+    let path = dir.path().join("example.pem");
+    MockKey(ED25519_DER_EXAMPLE.to_vec())
+        .write_public_key_pem_file(&path, LineEnding::LF)
+        .unwrap();
+
+    let pem = fs::read_to_string(path).unwrap();
+    assert_eq!(&pem, ED25519_PEM_EXAMPLE);
+}
diff --git a/crates/strsim/.cargo-checksum.json b/crates/strsim/.cargo-checksum.json
new file mode 100644
index 0000000..dc4b8e7
--- /dev/null
+++ b/crates/strsim/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"CHANGELOG.md":"37aed92999cbdf75969bd1d8e8c85bfda5ddf95a4eef2a6fb1c1371cf8304c3e","Cargo.toml":"8c41e90e312a6e7e1be6e3c10873ea7eb740a577f3d6d54f66798995b4555ce1","LICENSE":"1e697ce8d21401fbf1bddd9b5c3fd4c4c79ae1e3bdf51f81761c85e11d5a89cd","README.md":"599d424147dfbf88943bb6d78cebca346488fd246611917b586d73502a684c3a","benches/benches.rs":"2f7fae162a517378b42af04b4b077ffd563171f7341cba55b4efca3b4c30426a","src/lib.rs":"2e340450050784ea8e6e19ea36cbedcf9084ecb7829fc206cacdca4f6f069784","tests/lib.rs":"4c8207a5728b82836795e2f87d7d7834db7276082f5ded640f34822feb750cb4"},"package":"5ee073c9e4cd00e28217186dbe12796d692868f432bf2e97ee73bed0c56dfa01"}
\ No newline at end of file
diff --git a/crates/strsim/Android.bp b/crates/strsim/Android.bp
new file mode 100644
index 0000000..63ba4e6
--- /dev/null
+++ b/crates/strsim/Android.bp
@@ -0,0 +1,61 @@
+// This file is generated by cargo_embargo.
+// Do not modify this file because the changes will be overridden on upgrade.
+
+package {
+    default_applicable_licenses: ["external_rust_crates_strsim_license"],
+    default_team: "trendy_team_android_rust",
+}
+
+license {
+    name: "external_rust_crates_strsim_license",
+    visibility: [":__subpackages__"],
+    license_kinds: ["SPDX-license-identifier-MIT"],
+    license_text: ["LICENSE"],
+}
+
+rust_library {
+    name: "libstrsim",
+    host_supported: true,
+    crate_name: "strsim",
+    cargo_env_compat: true,
+    cargo_pkg_version: "0.11.0",
+    crate_root: "src/lib.rs",
+    edition: "2015",
+    apex_available: [
+        "//apex_available:platform",
+        "//apex_available:anyapex",
+    ],
+    product_available: true,
+    vendor_available: true,
+}
+
+rust_test {
+    name: "strsim_test_src_lib",
+    host_supported: true,
+    crate_name: "strsim",
+    cargo_env_compat: true,
+    cargo_pkg_version: "0.11.0",
+    crate_root: "src/lib.rs",
+    test_suites: ["general-tests"],
+    auto_gen_config: true,
+    test_options: {
+        unit_test: true,
+    },
+    edition: "2015",
+}
+
+rust_test {
+    name: "strsim_test_tests_lib",
+    host_supported: true,
+    crate_name: "lib",
+    cargo_env_compat: true,
+    cargo_pkg_version: "0.11.0",
+    crate_root: "tests/lib.rs",
+    test_suites: ["general-tests"],
+    auto_gen_config: true,
+    test_options: {
+        unit_test: true,
+    },
+    edition: "2015",
+    rustlibs: ["libstrsim"],
+}
diff --git a/crates/strsim/CHANGELOG.md b/crates/strsim/CHANGELOG.md
new file mode 100644
index 0000000..9c188a6
--- /dev/null
+++ b/crates/strsim/CHANGELOG.md
@@ -0,0 +1,226 @@
+# Change Log
+
+This project attempts to adhere to [Semantic Versioning](http://semver.org).
+
+## [Unreleased]
+
+## [0.11.0] - (2024-01-07)
+
+### Changed
+
+- improve OSA implementation
+  - reduce runtime
+  - reduce binary size by more than `25%`
+
+- reduce binary size of Levenshtein distance
+
+- improve Damerau-Levenshtein implementation
+  - reduce memory usage from `O(N*M)` to `O(N+M)`
+  - reduce runtime in our own benchmark by more than `70%`
+  - reduce binary size by more than `25%`
+
+- only boost similarity in Jaro-Winkler once the Jaro similarity exceeds 0.7
+
+### Fixed
+
+- Fix transposition counting in Jaro and Jaro-Winkler.
+- Limit common prefix in Jaro-Winkler to 4 characters
+
+## [0.10.0] - (2020-01-31)
+
+### Added
+
+- Sørensen-Dice implementation (thanks [@robjtede](https://github.com/robjtede))
+
+## [0.9.3] - (2019-12-12)
+
+### Fixed
+
+- Fix Jaro and Jaro-Winkler when the arguments have lengths of 1 and are equal.
+  Previously, the functions would erroneously return 0 instead of 1. Thanks to
+  [@vvrably](https://github.com/vvrably) for pointing out the issue.
+
+## [0.9.2] - (2019-05-09)
+
+### Changed
+
+- Revert back to the standard library hashmap because it will use hashbrown very
+  soon
+- Remove ndarray in favor of using a single vector to represent the 2d grid in
+  Damerau-Levenshtein
+
+## [0.9.1] - (2019-04-08)
+
+### Changed
+
+- Faster Damerau-Levenshtein implementation (thanks [@lovasoa](https://github.com/lovasoa))
+
+## [0.9.0] - (2019-04-06)
+
+### Added
+
+- Generic distance functions (thanks [@lovasoa](https://github.com/lovasoa))
+
+## [0.8.0] - (2018-08-19)
+
+### Added
+
+- Normalized versions of Levenshtein and Damerau-Levenshtein (thanks [@gentoid](https://github.com/gentoid))
+
+## [0.7.0] - (2018-01-17)
+
+### Changed
+
+- Faster Levenshtein implementation (thanks [@wdv4758h](https://github.com/wdv4758h))
+
+### Removed
+
+- Remove the "against_vec" functions. They are one-liners now, so they don't
+  seem to add enough value to justify making the API larger. I didn't find
+  anybody using them when I skimmed through a GitHub search. If you do use them,
+  you can change the calls to something like:
+```rust
+let distances = strings.iter().map(|a| jaro(target, a)).collect();
+```
+
+## [0.6.0] - (2016-12-26)
+
+### Added
+
+- Add optimal string alignment distance
+
+### Fixed
+
+- Fix Damerau-Levenshtein implementation (previous implementation was actually
+  optimal string alignment; see this [Damerau-Levenshtein explanation])
+
+## [0.5.2] - (2016-11-21)
+
+### Changed
+
+- Remove Cargo generated documentation in favor of a [docs.rs] link
+
+## [0.5.1] - (2016-08-23)
+
+### Added
+
+- Add Cargo generated documentation
+
+### Fixed
+
+- Fix panic when Jaro or Jaro-Winkler are given strings both with a length of
+  one
+
+## [0.5.0] - (2016-08-11)
+
+### Changed
+
+- Make Hamming faster (thanks @IBUzPE9) when the two strings have the same
+  length but slower when they have different lengths
+
+## [0.4.1] - (2016-04-18)
+
+### Added
+
+- Add Vagrant setup for development
+- Add AppVeyor configuration for Windows CI
+
+### Fixed
+
+- Fix metrics when given strings with multibyte characters (thanks @WanzenBug)
+
+## [0.4.0] - (2015-06-10)
+
+### Added
+
+- For each metric, add a function that takes a vector of strings and returns a
+vector of results (thanks @ovarene)
+
+## [0.3.0] - (2015-04-30)
+
+### Changed
+
+- Remove usage of unstable Rust features
+
+## [0.2.5] - (2015-04-24)
+
+### Fixed
+
+- Remove unnecessary `Float` import from doc tests
+
+## [0.2.4] - (2015-04-15)
+
+### Fixed
+
+- Remove unused `core` feature flag
+
+## [0.2.3] - (2015-04-01)
+
+### Fixed
+
+- Remove now unnecessary `Float` import
+
+## [0.2.2] - (2015-03-29)
+
+### Fixed
+
+- Remove usage of `char_at` (marked as unstable)
+
+## [0.2.1] - (2015-02-20)
+
+### Fixed
+
+- Update bit vector import to match Rust update
+
+## [0.2.0] - (2015-02-19)
+
+### Added
+
+- Implement Damerau-Levenshtein
+- Add tests in docs
+
+## [0.1.1] - (2015-02-10)
+
+### Added
+
+- Configure Travis for CI
+- Add rustdoc comments
+
+### Fixed
+
+- Limit Jaro-Winkler return value to a maximum of 1.0
+- Fix float comparisons in tests
+
+## [0.1.0] - (2015-02-09)
+
+### Added
+
+- Implement Hamming, Jaro, Jaro-Winkler, and Levenshtein
+
+[Unreleased]: https://github.com/rapidfuzz/strsim-rs/compare/0.11.0...HEAD
+[0.11.0]: https://github.com/rapidfuzz/strsim-rs/compare/0.10.0...0.11.0
+[0.10.0]: https://github.com/rapidfuzz/strsim-rs/compare/0.9.3...0.10.0
+[0.9.3]: https://github.com/rapidfuzz/strsim-rs/compare/0.9.2...0.9.3
+[0.9.2]: https://github.com/rapidfuzz/strsim-rs/compare/0.9.1...0.9.2
+[0.9.1]: https://github.com/rapidfuzz/strsim-rs/compare/0.9.0...0.9.1
+[0.9.0]: https://github.com/rapidfuzz/strsim-rs/compare/0.8.0...0.9.0
+[0.8.0]: https://github.com/rapidfuzz/strsim-rs/compare/0.7.0...0.8.0
+[0.7.0]: https://github.com/rapidfuzz/strsim-rs/compare/0.6.0...0.7.0
+[0.6.0]: https://github.com/rapidfuzz/strsim-rs/compare/0.5.2...0.6.0
+[0.5.2]: https://github.com/rapidfuzz/strsim-rs/compare/0.5.1...0.5.2
+[0.5.1]: https://github.com/rapidfuzz/strsim-rs/compare/0.5.0...0.5.1
+[0.5.0]: https://github.com/rapidfuzz/strsim-rs/compare/0.4.1...0.5.0
+[0.4.1]: https://github.com/rapidfuzz/strsim-rs/compare/0.4.0...0.4.1
+[0.4.0]: https://github.com/rapidfuzz/strsim-rs/compare/0.3.0...0.4.0
+[0.3.0]: https://github.com/rapidfuzz/strsim-rs/compare/0.2.5...0.3.0
+[0.2.5]: https://github.com/rapidfuzz/strsim-rs/compare/0.2.4...0.2.5
+[0.2.4]: https://github.com/rapidfuzz/strsim-rs/compare/0.2.3...0.2.4
+[0.2.3]: https://github.com/rapidfuzz/strsim-rs/compare/0.2.2...0.2.3
+[0.2.2]: https://github.com/rapidfuzz/strsim-rs/compare/0.2.1...0.2.2
+[0.2.1]: https://github.com/rapidfuzz/strsim-rs/compare/0.2.0...0.2.1
+[0.2.0]: https://github.com/rapidfuzz/strsim-rs/compare/0.1.1...0.2.0
+[0.1.1]: https://github.com/rapidfuzz/strsim-rs/compare/0.1.0...0.1.1
+[0.1.0]: https://github.com/rapidfuzz/strsim-rs/compare/fabad4...0.1.0
+[docs.rs]: https://docs.rs/strsim/
+[Damerau-Levenshtein explanation]:
+http://scarcitycomputing.blogspot.com/2013/04/damerau-levenshtein-edit-distance.html
diff --git a/crates/strsim/Cargo.lock b/crates/strsim/Cargo.lock
new file mode 100644
index 0000000..55257f4
--- /dev/null
+++ b/crates/strsim/Cargo.lock
@@ -0,0 +1,7 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "strsim"
+version = "0.11.0"
diff --git a/crates/strsim/Cargo.toml b/crates/strsim/Cargo.toml
new file mode 100644
index 0000000..ba871a0
--- /dev/null
+++ b/crates/strsim/Cargo.toml
@@ -0,0 +1,39 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+name = "strsim"
+version = "0.11.0"
+authors = [
+    "Danny Guo <danny@dannyguo.com>",
+    "maxbachmann <oss@maxbachmann.de>",
+]
+exclude = [
+    "/.github",
+    "/dev",
+]
+description = """
+Implementations of string similarity metrics. Includes Hamming, Levenshtein,
+OSA, Damerau-Levenshtein, Jaro, Jaro-Winkler, and Sørensen-Dice.
+"""
+homepage = "https://github.com/rapidfuzz/strsim-rs"
+documentation = "https://docs.rs/strsim/"
+readme = "README.md"
+keywords = [
+    "string",
+    "similarity",
+    "Hamming",
+    "Levenshtein",
+    "Jaro",
+]
+categories = ["text-processing"]
+license = "MIT"
+repository = "https://github.com/rapidfuzz/strsim-rs"
diff --git a/crates/strsim/LICENSE b/crates/strsim/LICENSE
new file mode 100644
index 0000000..8d1fbe1
--- /dev/null
+++ b/crates/strsim/LICENSE
@@ -0,0 +1,23 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Danny Guo
+Copyright (c) 2016 Titus Wormer <tituswormer@gmail.com>
+Copyright (c) 2018 Akash Kurdekar
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/crates/strsim/METADATA b/crates/strsim/METADATA
new file mode 100644
index 0000000..008980c
--- /dev/null
+++ b/crates/strsim/METADATA
@@ -0,0 +1,20 @@
+name: "strsim"
+description: "()"
+third_party {
+  identifier {
+    type: "crates.io"
+    value: "strsim"
+  }
+  identifier {
+    type: "Archive"
+    value: "https://static.crates.io/crates/strsim/strsim-0.11.0.crate"
+    primary_source: true
+  }
+  version: "0.11.0"
+  license_type: NOTICE
+  last_upgrade_date {
+    year: 2024
+    month: 2
+    day: 22
+  }
+}
diff --git a/crates/strsim/MODULE_LICENSE_MIT b/crates/strsim/MODULE_LICENSE_MIT
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/crates/strsim/MODULE_LICENSE_MIT
diff --git a/crates/strsim/README.md b/crates/strsim/README.md
new file mode 100644
index 0000000..45d0851
--- /dev/null
+++ b/crates/strsim/README.md
@@ -0,0 +1,102 @@
+# strsim-rs
+
+[![Crates.io](https://img.shields.io/crates/v/strsim.svg)](https://crates.io/crates/strsim)
+[![Crates.io](https://img.shields.io/crates/l/strsim.svg?maxAge=2592000)](https://github.com/rapidfuzz/strsim-rs/blob/main/LICENSE)
+[![CI status](https://github.com/rapidfuzz/strsim-rs/workflows/CI/badge.svg)](https://github.com/rapidfuzz/strsim-rs/actions?query=branch%3Amain)
+[![unsafe forbidden](https://img.shields.io/badge/unsafe-forbidden-success.svg)](https://github.com/rust-secure-code/safety-dance/)
+
+[Rust](https://www.rust-lang.org) implementations of [string similarity metrics]:
+  - [Hamming]
+  - [Levenshtein] - distance & normalized
+  - [Optimal string alignment]
+  - [Damerau-Levenshtein] - distance & normalized
+  - [Jaro and Jaro-Winkler]
+  - [Sørensen-Dice]
+
+The normalized versions return values between `0.0` and `1.0`, where `1.0` means
+an exact match.
+
+There are also generic versions of the functions for non-string inputs.
+
+## Installation
+
+`strsim` is available on [crates.io](https://crates.io/crates/strsim). Add it to
+your project:
+
+```sh
+cargo add strsim
+```
+
+## Usage
+
+Go to [Docs.rs](https://docs.rs/strsim/) for the full documentation. You can
+also clone the repo, and run `$ cargo doc --open`.
+
+### Examples
+
+```rust
+extern crate strsim;
+
+use strsim::{hamming, levenshtein, normalized_levenshtein, osa_distance,
+             damerau_levenshtein, normalized_damerau_levenshtein, jaro,
+             jaro_winkler, sorensen_dice};
+
+fn main() {
+    match hamming("hamming", "hammers") {
+        Ok(distance) => assert_eq!(3, distance),
+        Err(why) => panic!("{:?}", why)
+    }
+
+    assert_eq!(levenshtein("kitten", "sitting"), 3);
+
+    assert!((normalized_levenshtein("kitten", "sitting") - 0.571).abs() < 0.001);
+
+    assert_eq!(osa_distance("ac", "cba"), 3);
+
+    assert_eq!(damerau_levenshtein("ac", "cba"), 2);
+
+    assert!((normalized_damerau_levenshtein("levenshtein", "löwenbräu") - 0.272).abs() <
+            0.001);
+
+    assert!((jaro("Friedrich Nietzsche", "Jean-Paul Sartre") - 0.392).abs() <
+            0.001);
+
+    assert!((jaro_winkler("cheeseburger", "cheese fries") - 0.911).abs() <
+            0.001);
+
+    assert_eq!(sorensen_dice("web applications", "applications of the web"),
+        0.7878787878787878);
+}
+```
+
+Using the generic versions of the functions:
+
+```rust
+extern crate strsim;
+
+use strsim::generic_levenshtein;
+
+fn main() {
+    assert_eq!(2, generic_levenshtein(&[1, 2, 3], &[0, 2, 5]));
+}
+```
+
+## Contributing
+
+If you don't want to install Rust itself, you can run `$ ./dev` for a
+development CLI if you have [Docker] installed.
+
+Benchmarks require a Nightly toolchain. Run `$ cargo +nightly bench`.
+
+## License
+
+[MIT](https://github.com/rapidfuzz/strsim-rs/blob/main/LICENSE)
+
+[string similarity metrics]:http://en.wikipedia.org/wiki/String_metric
+[Damerau-Levenshtein]:http://en.wikipedia.org/wiki/Damerau%E2%80%93Levenshtein_distance
+[Jaro and Jaro-Winkler]:http://en.wikipedia.org/wiki/Jaro%E2%80%93Winkler_distance
+[Levenshtein]:http://en.wikipedia.org/wiki/Levenshtein_distance
+[Hamming]:http://en.wikipedia.org/wiki/Hamming_distance
+[Optimal string alignment]:https://en.wikipedia.org/wiki/Damerau%E2%80%93Levenshtein_distance#Optimal_string_alignment_distance
+[Sørensen-Dice]:http://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient
+[Docker]:https://docs.docker.com/engine/installation/
diff --git a/crates/strsim/benches/benches.rs b/crates/strsim/benches/benches.rs
new file mode 100644
index 0000000..15c7041
--- /dev/null
+++ b/crates/strsim/benches/benches.rs
@@ -0,0 +1,95 @@
+//! Benchmarks for strsim.
+
+#![feature(test)]
+
+extern crate strsim;
+extern crate test;
+use self::test::Bencher;
+
+#[bench]
+fn bench_hamming(bencher: &mut Bencher) {
+    let a = "ACAAGATGCCATTGTCCCCCGGCCTCCTGCTGCTGCTGCTCTCCGGGG";
+    let b = "CCTGGAGGGTGGCCCCACCGGCCGAGACAGCGAGCATATGCAGGAAGC";
+    bencher.iter(|| {
+        strsim::hamming(a, b).unwrap();
+    })
+}
+
+#[bench]
+fn bench_jaro(bencher: &mut Bencher) {
+    let a = "Philosopher Friedrich Nietzsche";
+    let b = "Philosopher Jean-Paul Sartre";
+    bencher.iter(|| {
+        strsim::jaro(a, b);
+    })
+}
+
+#[bench]
+fn bench_jaro_winkler(bencher: &mut Bencher) {
+    let a = "Philosopher Friedrich Nietzsche";
+    let b = "Philosopher Jean-Paul Sartre";
+    bencher.iter(|| {
+        strsim::jaro_winkler(a, b);
+    })
+}
+
+#[bench]
+fn bench_levenshtein(bencher: &mut Bencher) {
+    let a = "Philosopher Friedrich Nietzsche";
+    let b = "Philosopher Jean-Paul Sartre";
+    bencher.iter(|| {
+        strsim::levenshtein(a, b);
+    })
+}
+
+#[bench]
+fn bench_levenshtein_on_u8(bencher: &mut Bencher) {
+    bencher.iter(|| {
+        strsim::generic_levenshtein(&vec![0u8; 30], &vec![7u8; 31]);
+    })
+}
+
+#[bench]
+fn bench_normalized_levenshtein(bencher: &mut Bencher) {
+    let a = "Philosopher Friedrich Nietzsche";
+    let b = "Philosopher Jean-Paul Sartre";
+    bencher.iter(|| {
+        strsim::normalized_levenshtein(a, b);
+    })
+}
+
+#[bench]
+fn bench_osa_distance(bencher: &mut Bencher) {
+    let a = "Philosopher Friedrich Nietzsche";
+    let b = "Philosopher Jean-Paul Sartre";
+    bencher.iter(|| {
+        strsim::osa_distance(a, b);
+    })
+}
+
+#[bench]
+fn bench_damerau_levenshtein(bencher: &mut Bencher) {
+    let a = "Philosopher Friedrich Nietzsche";
+    let b = "Philosopher Jean-Paul Sartre";
+    bencher.iter(|| {
+        strsim::damerau_levenshtein(a, b);
+    })
+}
+
+#[bench]
+fn bench_normalized_damerau_levenshtein(bencher: &mut Bencher) {
+    let a = "Philosopher Friedrich Nietzsche";
+    let b = "Philosopher Jean-Paul Sartre";
+    bencher.iter(|| {
+        strsim::normalized_damerau_levenshtein(a, b);
+    })
+}
+
+#[bench]
+fn bench_sorensen_dice(bencher: &mut Bencher) {
+    let a = "Philosopher Friedrich Nietzsche";
+    let b = "Philosopher Jean-Paul Sartre";
+    bencher.iter(|| {
+        strsim::sorensen_dice(a, b);
+    })
+}
diff --git a/crates/strsim/cargo_embargo.json b/crates/strsim/cargo_embargo.json
new file mode 100644
index 0000000..9a0a579
--- /dev/null
+++ b/crates/strsim/cargo_embargo.json
@@ -0,0 +1,3 @@
+{
+  "tests": true
+}
diff --git a/crates/strsim/src/lib.rs b/crates/strsim/src/lib.rs
new file mode 100644
index 0000000..8118277
--- /dev/null
+++ b/crates/strsim/src/lib.rs
@@ -0,0 +1,1307 @@
+//! This library implements string similarity metrics.
+
+#![forbid(unsafe_code)]
+#![allow(
+    // these casts are sometimes needed. They restrict the length of input iterators
+    // but there isn't really any way around this except for always working with
+    // 128 bit types
+    clippy::cast_possible_wrap,
+    clippy::cast_sign_loss,
+    clippy::cast_precision_loss,
+    // not practical
+    clippy::needless_pass_by_value,
+    clippy::similar_names,
+    // noisy
+    clippy::missing_errors_doc,
+    clippy::missing_panics_doc,
+    clippy::must_use_candidate,
+    // todo https://github.com/rapidfuzz/strsim-rs/issues/59
+    clippy::range_plus_one
+)]
+
+use std::char;
+use std::cmp::{max, min};
+use std::collections::HashMap;
+use std::convert::TryFrom;
+use std::error::Error;
+use std::fmt::{self, Display, Formatter};
+use std::hash::Hash;
+use std::mem;
+use std::str::Chars;
+
+#[derive(Debug, PartialEq)]
+pub enum StrSimError {
+    DifferentLengthArgs,
+}
+
+impl Display for StrSimError {
+    fn fmt(&self, fmt: &mut Formatter) -> Result<(), fmt::Error> {
+        let text = match self {
+            StrSimError::DifferentLengthArgs => "Differing length arguments provided",
+        };
+
+        write!(fmt, "{text}")
+    }
+}
+
+impl Error for StrSimError {}
+
+pub type HammingResult = Result<usize, StrSimError>;
+
+/// Calculates the number of positions in the two sequences where the elements
+/// differ. Returns an error if the sequences have different lengths.
+pub fn generic_hamming<Iter1, Iter2, Elem1, Elem2>(a: Iter1, b: Iter2) -> HammingResult
+where
+    Iter1: IntoIterator<Item = Elem1>,
+    Iter2: IntoIterator<Item = Elem2>,
+    Elem1: PartialEq<Elem2>,
+{
+    let (mut ita, mut itb) = (a.into_iter(), b.into_iter());
+    let mut count = 0;
+    loop {
+        match (ita.next(), itb.next()) {
+            (Some(x), Some(y)) => {
+                if x != y {
+                    count += 1;
+                }
+            }
+            (None, None) => return Ok(count),
+            _ => return Err(StrSimError::DifferentLengthArgs),
+        }
+    }
+}
+
+/// Calculates the number of positions in the two strings where the characters
+/// differ. Returns an error if the strings have different lengths.
+///
+/// ```
+/// use strsim::{hamming, StrSimError::DifferentLengthArgs};
+///
+/// assert_eq!(Ok(3), hamming("hamming", "hammers"));
+///
+/// assert_eq!(Err(DifferentLengthArgs), hamming("hamming", "ham"));
+/// ```
+pub fn hamming(a: &str, b: &str) -> HammingResult {
+    generic_hamming(a.chars(), b.chars())
+}
+
+/// Calculates the Jaro similarity between two sequences. The returned value
+/// is between 0.0 and 1.0 (higher value means more similar).
+pub fn generic_jaro<'a, 'b, Iter1, Iter2, Elem1, Elem2>(a: &'a Iter1, b: &'b Iter2) -> f64
+where
+    &'a Iter1: IntoIterator<Item = Elem1>,
+    &'b Iter2: IntoIterator<Item = Elem2>,
+    Elem1: PartialEq<Elem2>,
+{
+    let a_len = a.into_iter().count();
+    let b_len = b.into_iter().count();
+
+    if a_len == 0 && b_len == 0 {
+        return 1.0;
+    } else if a_len == 0 || b_len == 0 {
+        return 0.0;
+    }
+
+    let mut search_range = max(a_len, b_len) / 2;
+    search_range = search_range.saturating_sub(1);
+
+    // combine memory allocations to reduce runtime
+    let mut flags_memory = vec![false; a_len + b_len];
+    let (a_flags, b_flags) = flags_memory.split_at_mut(a_len);
+
+    let mut matches = 0_usize;
+
+    for (i, a_elem) in a.into_iter().enumerate() {
+        // prevent integer wrapping
+        let min_bound = if i > search_range {
+            i - search_range
+        } else {
+            0
+        };
+
+        let max_bound = min(b_len, i + search_range + 1);
+
+        for (j, b_elem) in b.into_iter().enumerate().take(max_bound) {
+            if min_bound <= j && a_elem == b_elem && !b_flags[j] {
+                a_flags[i] = true;
+                b_flags[j] = true;
+                matches += 1;
+                break;
+            }
+        }
+    }
+
+    let mut transpositions = 0_usize;
+    if matches != 0 {
+        let mut b_iter = b_flags.iter().zip(b);
+        for (a_flag, ch1) in a_flags.iter().zip(a) {
+            if *a_flag {
+                loop {
+                    if let Some((b_flag, ch2)) = b_iter.next() {
+                        if !*b_flag {
+                            continue;
+                        }
+
+                        if ch1 != ch2 {
+                            transpositions += 1;
+                        }
+                        break;
+                    }
+                }
+            }
+        }
+    }
+    transpositions /= 2;
+
+    if matches == 0 {
+        0.0
+    } else {
+        ((matches as f64 / a_len as f64)
+            + (matches as f64 / b_len as f64)
+            + ((matches - transpositions) as f64 / matches as f64))
+            / 3.0
+    }
+}
+
+struct StringWrapper<'a>(&'a str);
+
+impl<'a, 'b> IntoIterator for &'a StringWrapper<'b> {
+    type Item = char;
+    type IntoIter = Chars<'b>;
+
+    fn into_iter(self) -> Self::IntoIter {
+        self.0.chars()
+    }
+}
+
+/// Calculates the Jaro similarity between two strings. The returned value
+/// is between 0.0 and 1.0 (higher value means more similar).
+///
+/// ```
+/// use strsim::jaro;
+///
+/// assert!((0.392 - jaro("Friedrich Nietzsche", "Jean-Paul Sartre")).abs() <
+///         0.001);
+/// ```
+pub fn jaro(a: &str, b: &str) -> f64 {
+    generic_jaro(&StringWrapper(a), &StringWrapper(b))
+}
+
+/// Like Jaro but gives a boost to sequences that have a common prefix.
+pub fn generic_jaro_winkler<'a, 'b, Iter1, Iter2, Elem1, Elem2>(a: &'a Iter1, b: &'b Iter2) -> f64
+where
+    &'a Iter1: IntoIterator<Item = Elem1>,
+    &'b Iter2: IntoIterator<Item = Elem2>,
+    Elem1: PartialEq<Elem2>,
+{
+    let sim = generic_jaro(a, b);
+
+    if sim > 0.7 {
+        let prefix_length = a
+            .into_iter()
+            .take(4)
+            .zip(b)
+            .take_while(|(a_elem, b_elem)| a_elem == b_elem)
+            .count();
+
+        sim + 0.1 * prefix_length as f64 * (1.0 - sim)
+    } else {
+        sim
+    }
+}
+
+/// Like Jaro but gives a boost to strings that have a common prefix.
+///
+/// ```
+/// use strsim::jaro_winkler;
+///
+/// assert!((0.866 - jaro_winkler("cheeseburger", "cheese fries")).abs() <
+///         0.001);
+/// ```
+pub fn jaro_winkler(a: &str, b: &str) -> f64 {
+    generic_jaro_winkler(&StringWrapper(a), &StringWrapper(b))
+}
+
+/// Calculates the minimum number of insertions, deletions, and substitutions
+/// required to change one sequence into the other.
+///
+/// ```
+/// use strsim::generic_levenshtein;
+///
+/// assert_eq!(3, generic_levenshtein(&[1,2,3], &[1,2,3,4,5,6]));
+/// ```
+pub fn generic_levenshtein<'a, 'b, Iter1, Iter2, Elem1, Elem2>(a: &'a Iter1, b: &'b Iter2) -> usize
+where
+    &'a Iter1: IntoIterator<Item = Elem1>,
+    &'b Iter2: IntoIterator<Item = Elem2>,
+    Elem1: PartialEq<Elem2>,
+{
+    let b_len = b.into_iter().count();
+
+    let mut cache: Vec<usize> = (1..b_len + 1).collect();
+
+    let mut result = b_len;
+
+    for (i, a_elem) in a.into_iter().enumerate() {
+        result = i + 1;
+        let mut distance_b = i;
+
+        for (j, b_elem) in b.into_iter().enumerate() {
+            let cost = usize::from(a_elem != b_elem);
+            let distance_a = distance_b + cost;
+            distance_b = cache[j];
+            result = min(result + 1, min(distance_a, distance_b + 1));
+            cache[j] = result;
+        }
+    }
+
+    result
+}
+
+/// Calculates the minimum number of insertions, deletions, and substitutions
+/// required to change one string into the other.
+///
+/// ```
+/// use strsim::levenshtein;
+///
+/// assert_eq!(3, levenshtein("kitten", "sitting"));
+/// ```
+pub fn levenshtein(a: &str, b: &str) -> usize {
+    generic_levenshtein(&StringWrapper(a), &StringWrapper(b))
+}
+
+/// Calculates a normalized score of the Levenshtein algorithm between 0.0 and
+/// 1.0 (inclusive), where 1.0 means the strings are the same.
+///
+/// ```
+/// use strsim::normalized_levenshtein;
+///
+/// assert!((normalized_levenshtein("kitten", "sitting") - 0.57142).abs() < 0.00001);
+/// assert!((normalized_levenshtein("", "") - 1.0).abs() < 0.00001);
+/// assert!(normalized_levenshtein("", "second").abs() < 0.00001);
+/// assert!(normalized_levenshtein("first", "").abs() < 0.00001);
+/// assert!((normalized_levenshtein("string", "string") - 1.0).abs() < 0.00001);
+/// ```
+pub fn normalized_levenshtein(a: &str, b: &str) -> f64 {
+    if a.is_empty() && b.is_empty() {
+        return 1.0;
+    }
+    1.0 - (levenshtein(a, b) as f64) / (a.chars().count().max(b.chars().count()) as f64)
+}
+
+/// Like Levenshtein but allows for adjacent transpositions. Each substring can
+/// only be edited once.
+///
+/// ```
+/// use strsim::osa_distance;
+///
+/// assert_eq!(3, osa_distance("ab", "bca"));
+/// ```
+pub fn osa_distance(a: &str, b: &str) -> usize {
+    let b_len = b.chars().count();
+    // 0..=b_len behaves like 0..b_len.saturating_add(1) which could be a different size
+    // this leads to significantly worse code gen when swapping the vectors below
+    let mut prev_two_distances: Vec<usize> = (0..b_len + 1).collect();
+    let mut prev_distances: Vec<usize> = (0..b_len + 1).collect();
+    let mut curr_distances: Vec<usize> = vec![0; b_len + 1];
+
+    let mut prev_a_char = char::MAX;
+    let mut prev_b_char = char::MAX;
+
+    for (i, a_char) in a.chars().enumerate() {
+        curr_distances[0] = i + 1;
+
+        for (j, b_char) in b.chars().enumerate() {
+            let cost = usize::from(a_char != b_char);
+            curr_distances[j + 1] = min(
+                curr_distances[j] + 1,
+                min(prev_distances[j + 1] + 1, prev_distances[j] + cost),
+            );
+            if i > 0 && j > 0 && a_char != b_char && a_char == prev_b_char && b_char == prev_a_char
+            {
+                curr_distances[j + 1] = min(curr_distances[j + 1], prev_two_distances[j - 1] + 1);
+            }
+
+            prev_b_char = b_char;
+        }
+
+        mem::swap(&mut prev_two_distances, &mut prev_distances);
+        mem::swap(&mut prev_distances, &mut curr_distances);
+        prev_a_char = a_char;
+    }
+
+    // access prev_distances instead of curr_distances since we swapped
+    // them above. In case a is empty this would still contain the correct value
+    // from initializing the last element to b_len
+    prev_distances[b_len]
+}
+
+/* Returns the final index for a value in a single vector that represents a fixed
+2d grid */
+fn flat_index(i: usize, j: usize, width: usize) -> usize {
+    j * width + i
+}
+
+/// Like optimal string alignment, but substrings can be edited an unlimited
+/// number of times, and the triangle inequality holds.
+///
+/// ```
+/// use strsim::generic_damerau_levenshtein;
+///
+/// assert_eq!(2, generic_damerau_levenshtein(&[1,2], &[2,3,1]));
+/// ```
+pub fn generic_damerau_levenshtein<Elem>(a_elems: &[Elem], b_elems: &[Elem]) -> usize
+where
+    Elem: Eq + Hash + Clone,
+{
+    let a_len = a_elems.len();
+    let b_len = b_elems.len();
+
+    if a_len == 0 {
+        return b_len;
+    }
+    if b_len == 0 {
+        return a_len;
+    }
+
+    let width = a_len + 2;
+    let mut distances = vec![0; (a_len + 2) * (b_len + 2)];
+    let max_distance = a_len + b_len;
+    distances[0] = max_distance;
+
+    for i in 0..(a_len + 1) {
+        distances[flat_index(i + 1, 0, width)] = max_distance;
+        distances[flat_index(i + 1, 1, width)] = i;
+    }
+
+    for j in 0..(b_len + 1) {
+        distances[flat_index(0, j + 1, width)] = max_distance;
+        distances[flat_index(1, j + 1, width)] = j;
+    }
+
+    let mut elems: HashMap<Elem, usize> = HashMap::with_capacity(64);
+
+    for i in 1..(a_len + 1) {
+        let mut db = 0;
+
+        for j in 1..(b_len + 1) {
+            let k = match elems.get(&b_elems[j - 1]) {
+                Some(&value) => value,
+                None => 0,
+            };
+
+            let insertion_cost = distances[flat_index(i, j + 1, width)] + 1;
+            let deletion_cost = distances[flat_index(i + 1, j, width)] + 1;
+            let transposition_cost =
+                distances[flat_index(k, db, width)] + (i - k - 1) + 1 + (j - db - 1);
+
+            let mut substitution_cost = distances[flat_index(i, j, width)] + 1;
+            if a_elems[i - 1] == b_elems[j - 1] {
+                db = j;
+                substitution_cost -= 1;
+            }
+
+            distances[flat_index(i + 1, j + 1, width)] = min(
+                substitution_cost,
+                min(insertion_cost, min(deletion_cost, transposition_cost)),
+            );
+        }
+
+        elems.insert(a_elems[i - 1].clone(), i);
+    }
+
+    distances[flat_index(a_len + 1, b_len + 1, width)]
+}
+
+#[derive(Clone, Copy, PartialEq, Eq)]
+struct RowId {
+    val: isize,
+}
+
+impl Default for RowId {
+    fn default() -> Self {
+        Self { val: -1 }
+    }
+}
+
+#[derive(Default, Clone)]
+struct GrowingHashmapMapElemChar<ValueType> {
+    key: u32,
+    value: ValueType,
+}
+
+/// specialized hashmap to store user provided types
+/// this implementation relies on a couple of base assumptions in order to simplify the implementation
+/// - the hashmap does not have an upper limit of included items
+/// - the default value for the `ValueType` can be used as a dummy value to indicate an empty cell
+/// - elements can't be removed
+/// - only allocates memory on first write access.
+///   This improves performance for hashmaps that are never written to
+struct GrowingHashmapChar<ValueType> {
+    used: i32,
+    fill: i32,
+    mask: i32,
+    map: Option<Vec<GrowingHashmapMapElemChar<ValueType>>>,
+}
+
+impl<ValueType> Default for GrowingHashmapChar<ValueType>
+where
+    ValueType: Default + Clone + Eq,
+{
+    fn default() -> Self {
+        Self {
+            used: 0,
+            fill: 0,
+            mask: -1,
+            map: None,
+        }
+    }
+}
+
+impl<ValueType> GrowingHashmapChar<ValueType>
+where
+    ValueType: Default + Clone + Eq + Copy,
+{
+    fn get(&self, key: u32) -> ValueType {
+        self.map
+            .as_ref()
+            .map_or_else(|| Default::default(), |map| map[self.lookup(key)].value)
+    }
+
+    fn get_mut(&mut self, key: u32) -> &mut ValueType {
+        if self.map.is_none() {
+            self.allocate();
+        }
+
+        let mut i = self.lookup(key);
+        if self
+            .map
+            .as_ref()
+            .expect("map should have been created above")[i]
+            .value
+            == Default::default()
+        {
+            self.fill += 1;
+            // resize when 2/3 full
+            if self.fill * 3 >= (self.mask + 1) * 2 {
+                self.grow((self.used + 1) * 2);
+                i = self.lookup(key);
+            }
+
+            self.used += 1;
+        }
+
+        let elem = &mut self
+            .map
+            .as_mut()
+            .expect("map should have been created above")[i];
+        elem.key = key;
+        &mut elem.value
+    }
+
+    fn allocate(&mut self) {
+        self.mask = 8 - 1;
+        self.map = Some(vec![GrowingHashmapMapElemChar::default(); 8]);
+    }
+
+    /// lookup key inside the hashmap using a similar collision resolution
+    /// strategy to `CPython` and `Ruby`
+    fn lookup(&self, key: u32) -> usize {
+        let hash = key;
+        let mut i = hash as usize & self.mask as usize;
+
+        let map = self
+            .map
+            .as_ref()
+            .expect("callers have to ensure map is allocated");
+
+        if map[i].value == Default::default() || map[i].key == key {
+            return i;
+        }
+
+        let mut perturb = key;
+        loop {
+            i = (i * 5 + perturb as usize + 1) & self.mask as usize;
+
+            if map[i].value == Default::default() || map[i].key == key {
+                return i;
+            }
+
+            perturb >>= 5;
+        }
+    }
+
+    fn grow(&mut self, min_used: i32) {
+        let mut new_size = self.mask + 1;
+        while new_size <= min_used {
+            new_size <<= 1;
+        }
+
+        self.fill = self.used;
+        self.mask = new_size - 1;
+
+        let old_map = std::mem::replace(
+            self.map
+                .as_mut()
+                .expect("callers have to ensure map is allocated"),
+            vec![GrowingHashmapMapElemChar::<ValueType>::default(); new_size as usize],
+        );
+
+        for elem in old_map {
+            if elem.value != Default::default() {
+                let j = self.lookup(elem.key);
+                let new_elem = &mut self.map.as_mut().expect("map created above")[j];
+                new_elem.key = elem.key;
+                new_elem.value = elem.value;
+                self.used -= 1;
+                if self.used == 0 {
+                    break;
+                }
+            }
+        }
+
+        self.used = self.fill;
+    }
+}
+
+struct HybridGrowingHashmapChar<ValueType> {
+    map: GrowingHashmapChar<ValueType>,
+    extended_ascii: [ValueType; 256],
+}
+
+impl<ValueType> HybridGrowingHashmapChar<ValueType>
+where
+    ValueType: Default + Clone + Copy + Eq,
+{
+    fn get(&self, key: char) -> ValueType {
+        let value = key as u32;
+        if value <= 255 {
+            let val_u8 = u8::try_from(value).expect("we check the bounds above");
+            self.extended_ascii[usize::from(val_u8)]
+        } else {
+            self.map.get(value)
+        }
+    }
+
+    fn get_mut(&mut self, key: char) -> &mut ValueType {
+        let value = key as u32;
+        if value <= 255 {
+            let val_u8 = u8::try_from(value).expect("we check the bounds above");
+            &mut self.extended_ascii[usize::from(val_u8)]
+        } else {
+            self.map.get_mut(value)
+        }
+    }
+}
+
+impl<ValueType> Default for HybridGrowingHashmapChar<ValueType>
+where
+    ValueType: Default + Clone + Copy + Eq,
+{
+    fn default() -> Self {
+        HybridGrowingHashmapChar {
+            map: GrowingHashmapChar::default(),
+            extended_ascii: [Default::default(); 256],
+        }
+    }
+}
+
+fn damerau_levenshtein_impl<Iter1, Iter2>(s1: Iter1, len1: usize, s2: Iter2, len2: usize) -> usize
+where
+    Iter1: Iterator<Item = char> + Clone,
+    Iter2: Iterator<Item = char> + Clone,
+{
+    // The implementations is based on the paper
+    // `Linear space string correction algorithm using the Damerau-Levenshtein distance`
+    // from Chunchun Zhao and Sartaj Sahni
+    //
+    // It has a runtime complexity of `O(N*M)` and a memory usage of `O(N+M)`.
+    let max_val = max(len1, len2) as isize + 1;
+
+    let mut last_row_id = HybridGrowingHashmapChar::<RowId>::default();
+
+    let size = len2 + 2;
+    let mut fr = vec![max_val; size];
+    let mut r1 = vec![max_val; size];
+    let mut r: Vec<isize> = (max_val..max_val + 1)
+        .chain(0..(size - 1) as isize)
+        .collect();
+
+    for (i, ch1) in s1.enumerate().map(|(i, ch1)| (i + 1, ch1)) {
+        mem::swap(&mut r, &mut r1);
+        let mut last_col_id: isize = -1;
+        let mut last_i2l1 = r[1];
+        r[1] = i as isize;
+        let mut t = max_val;
+
+        for (j, ch2) in s2.clone().enumerate().map(|(j, ch2)| (j + 1, ch2)) {
+            let diag = r1[j] + isize::from(ch1 != ch2);
+            let left = r[j] + 1;
+            let up = r1[j + 1] + 1;
+            let mut temp = min(diag, min(left, up));
+
+            if ch1 == ch2 {
+                last_col_id = j as isize; // last occurence of s1_i
+                fr[j + 1] = r1[j - 1]; // save H_k-1,j-2
+                t = last_i2l1; // save H_i-2,l-1
+            } else {
+                let k = last_row_id.get(ch2).val;
+                let l = last_col_id;
+
+                if j as isize - l == 1 {
+                    let transpose = fr[j + 1] + (i as isize - k);
+                    temp = min(temp, transpose);
+                } else if i as isize - k == 1 {
+                    let transpose = t + (j as isize - l);
+                    temp = min(temp, transpose);
+                }
+            }
+
+            last_i2l1 = r[j + 1];
+            r[j + 1] = temp;
+        }
+        last_row_id.get_mut(ch1).val = i as isize;
+    }
+
+    r[len2 + 1] as usize
+}
+
+/// Like optimal string alignment, but substrings can be edited an unlimited
+/// number of times, and the triangle inequality holds.
+///
+/// ```
+/// use strsim::damerau_levenshtein;
+///
+/// assert_eq!(2, damerau_levenshtein("ab", "bca"));
+/// ```
+pub fn damerau_levenshtein(a: &str, b: &str) -> usize {
+    damerau_levenshtein_impl(a.chars(), a.chars().count(), b.chars(), b.chars().count())
+}
+
+/// Calculates a normalized score of the Damerau–Levenshtein algorithm between
+/// 0.0 and 1.0 (inclusive), where 1.0 means the strings are the same.
+///
+/// ```
+/// use strsim::normalized_damerau_levenshtein;
+///
+/// assert!((normalized_damerau_levenshtein("levenshtein", "löwenbräu") - 0.27272).abs() < 0.00001);
+/// assert!((normalized_damerau_levenshtein("", "") - 1.0).abs() < 0.00001);
+/// assert!(normalized_damerau_levenshtein("", "flower").abs() < 0.00001);
+/// assert!(normalized_damerau_levenshtein("tree", "").abs() < 0.00001);
+/// assert!((normalized_damerau_levenshtein("sunglasses", "sunglasses") - 1.0).abs() < 0.00001);
+/// ```
+pub fn normalized_damerau_levenshtein(a: &str, b: &str) -> f64 {
+    if a.is_empty() && b.is_empty() {
+        return 1.0;
+    }
+
+    let len1 = a.chars().count();
+    let len2 = b.chars().count();
+    let dist = damerau_levenshtein_impl(a.chars(), len1, b.chars(), len2);
+    1.0 - (dist as f64) / (max(len1, len2) as f64)
+}
+
+/// Returns an Iterator of char tuples.
+fn bigrams(s: &str) -> impl Iterator<Item = (char, char)> + '_ {
+    s.chars().zip(s.chars().skip(1))
+}
+
+/// Calculates a Sørensen-Dice similarity distance using bigrams.
+/// See <https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient>.
+///
+/// ```
+/// use strsim::sorensen_dice;
+///
+/// assert_eq!(1.0, sorensen_dice("", ""));
+/// assert_eq!(0.0, sorensen_dice("", "a"));
+/// assert_eq!(0.0, sorensen_dice("french", "quebec"));
+/// assert_eq!(1.0, sorensen_dice("ferris", "ferris"));
+/// assert_eq!(0.8888888888888888, sorensen_dice("feris", "ferris"));
+/// ```
+pub fn sorensen_dice(a: &str, b: &str) -> f64 {
+    // implementation guided by
+    // https://github.com/aceakash/string-similarity/blob/f83ba3cd7bae874c20c429774e911ae8cff8bced/src/index.js#L6
+
+    let a: String = a.chars().filter(|&x| !char::is_whitespace(x)).collect();
+    let b: String = b.chars().filter(|&x| !char::is_whitespace(x)).collect();
+
+    if a == b {
+        return 1.0;
+    }
+
+    if a.len() < 2 || b.len() < 2 {
+        return 0.0;
+    }
+
+    let mut a_bigrams: HashMap<(char, char), usize> = HashMap::new();
+
+    for bigram in bigrams(&a) {
+        *a_bigrams.entry(bigram).or_insert(0) += 1;
+    }
+
+    let mut intersection_size = 0_usize;
+
+    for bigram in bigrams(&b) {
+        a_bigrams.entry(bigram).and_modify(|bi| {
+            if *bi > 0 {
+                *bi -= 1;
+                intersection_size += 1;
+            }
+        });
+    }
+
+    (2 * intersection_size) as f64 / (a.len() + b.len() - 2) as f64
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    macro_rules! assert_delta {
+        ($x:expr, $y:expr) => {
+            assert_delta!($x, $y, 1e-5);
+        };
+        ($x:expr, $y:expr, $d:expr) => {
+            if ($x - $y).abs() > $d {
+                panic!(
+                    "assertion failed: actual: `{}`, expected: `{}`: \
+                    actual not within < {} of expected",
+                    $x, $y, $d
+                );
+            }
+        };
+    }
+
+    #[test]
+    fn bigrams_iterator() {
+        let mut bi = bigrams("abcde");
+
+        assert_eq!(Some(('a', 'b')), bi.next());
+        assert_eq!(Some(('b', 'c')), bi.next());
+        assert_eq!(Some(('c', 'd')), bi.next());
+        assert_eq!(Some(('d', 'e')), bi.next());
+        assert_eq!(None, bi.next());
+    }
+
+    fn assert_hamming_dist(dist: usize, str1: &str, str2: &str) {
+        assert_eq!(Ok(dist), hamming(str1, str2));
+    }
+
+    #[test]
+    fn hamming_empty() {
+        assert_hamming_dist(0, "", "")
+    }
+
+    #[test]
+    fn hamming_same() {
+        assert_hamming_dist(0, "hamming", "hamming")
+    }
+
+    #[test]
+    fn hamming_numbers() {
+        assert_eq!(Ok(1), generic_hamming(&[1, 2, 4], &[1, 2, 3]));
+    }
+
+    #[test]
+    fn hamming_diff() {
+        assert_hamming_dist(3, "hamming", "hammers")
+    }
+
+    #[test]
+    fn hamming_diff_multibyte() {
+        assert_hamming_dist(2, "hamming", "h香mmüng");
+    }
+
+    #[test]
+    fn hamming_unequal_length() {
+        assert_eq!(
+            Err(StrSimError::DifferentLengthArgs),
+            generic_hamming("ham".chars(), "hamming".chars())
+        );
+    }
+
+    #[test]
+    fn hamming_names() {
+        assert_hamming_dist(14, "Friedrich Nietzs", "Jean-Paul Sartre")
+    }
+
+    #[test]
+    fn jaro_both_empty() {
+        assert_eq!(1.0, jaro("", ""));
+    }
+
+    #[test]
+    fn jaro_first_empty() {
+        assert_eq!(0.0, jaro("", "jaro"));
+    }
+
+    #[test]
+    fn jaro_second_empty() {
+        assert_eq!(0.0, jaro("distance", ""));
+    }
+
+    #[test]
+    fn jaro_same() {
+        assert_eq!(1.0, jaro("jaro", "jaro"));
+    }
+
+    #[test]
+    fn jaro_multibyte() {
+        assert_delta!(0.818, jaro("testabctest", "testöঙ香test"), 0.001);
+        assert_delta!(0.818, jaro("testöঙ香test", "testabctest"), 0.001);
+    }
+
+    #[test]
+    fn jaro_diff_short() {
+        assert_delta!(0.767, jaro("dixon", "dicksonx"), 0.001);
+    }
+
+    #[test]
+    fn jaro_diff_one_character() {
+        assert_eq!(0.0, jaro("a", "b"));
+    }
+
+    #[test]
+    fn jaro_same_one_character() {
+        assert_eq!(1.0, jaro("a", "a"));
+    }
+
+    #[test]
+    fn generic_jaro_diff() {
+        assert_eq!(0.0, generic_jaro(&[1, 2], &[3, 4]));
+    }
+
+    #[test]
+    fn jaro_diff_one_and_two() {
+        assert_delta!(0.83, jaro("a", "ab"), 0.01);
+    }
+
+    #[test]
+    fn jaro_diff_two_and_one() {
+        assert_delta!(0.83, jaro("ab", "a"), 0.01);
+    }
+
+    #[test]
+    fn jaro_diff_no_transposition() {
+        assert_delta!(0.822, jaro("dwayne", "duane"), 0.001);
+    }
+
+    #[test]
+    fn jaro_diff_with_transposition() {
+        assert_delta!(0.944, jaro("martha", "marhta"), 0.001);
+        assert_delta!(0.6, jaro("a jke", "jane a k"), 0.001);
+    }
+
+    #[test]
+    fn jaro_names() {
+        assert_delta!(
+            0.392,
+            jaro("Friedrich Nietzsche", "Jean-Paul Sartre"),
+            0.001
+        );
+    }
+
+    #[test]
+    fn jaro_winkler_both_empty() {
+        assert_eq!(1.0, jaro_winkler("", ""));
+    }
+
+    #[test]
+    fn jaro_winkler_first_empty() {
+        assert_eq!(0.0, jaro_winkler("", "jaro-winkler"));
+    }
+
+    #[test]
+    fn jaro_winkler_second_empty() {
+        assert_eq!(0.0, jaro_winkler("distance", ""));
+    }
+
+    #[test]
+    fn jaro_winkler_same() {
+        assert_eq!(1.0, jaro_winkler("Jaro-Winkler", "Jaro-Winkler"));
+    }
+
+    #[test]
+    fn jaro_winkler_multibyte() {
+        assert_delta!(0.89, jaro_winkler("testabctest", "testöঙ香test"), 0.001);
+        assert_delta!(0.89, jaro_winkler("testöঙ香test", "testabctest"), 0.001);
+    }
+
+    #[test]
+    fn jaro_winkler_diff_short() {
+        assert_delta!(0.813, jaro_winkler("dixon", "dicksonx"), 0.001);
+        assert_delta!(0.813, jaro_winkler("dicksonx", "dixon"), 0.001);
+    }
+
+    #[test]
+    fn jaro_winkler_diff_one_character() {
+        assert_eq!(0.0, jaro_winkler("a", "b"));
+    }
+
+    #[test]
+    fn jaro_winkler_same_one_character() {
+        assert_eq!(1.0, jaro_winkler("a", "a"));
+    }
+
+    #[test]
+    fn jaro_winkler_diff_no_transposition() {
+        assert_delta!(0.84, jaro_winkler("dwayne", "duane"), 0.001);
+    }
+
+    #[test]
+    fn jaro_winkler_diff_with_transposition() {
+        assert_delta!(0.961, jaro_winkler("martha", "marhta"), 0.001);
+        assert_delta!(0.6, jaro_winkler("a jke", "jane a k"), 0.001);
+    }
+
+    #[test]
+    fn jaro_winkler_names() {
+        assert_delta!(
+            0.452,
+            jaro_winkler("Friedrich Nietzsche", "Fran-Paul Sartre"),
+            0.001
+        );
+    }
+
+    #[test]
+    fn jaro_winkler_long_prefix() {
+        assert_delta!(0.866, jaro_winkler("cheeseburger", "cheese fries"), 0.001);
+    }
+
+    #[test]
+    fn jaro_winkler_more_names() {
+        assert_delta!(0.868, jaro_winkler("Thorkel", "Thorgier"), 0.001);
+    }
+
+    #[test]
+    fn jaro_winkler_length_of_one() {
+        assert_delta!(0.738, jaro_winkler("Dinsdale", "D"), 0.001);
+    }
+
+    #[test]
+    fn jaro_winkler_very_long_prefix() {
+        assert_delta!(
+            0.98519,
+            jaro_winkler("thequickbrownfoxjumpedoverx", "thequickbrownfoxjumpedovery")
+        );
+    }
+
+    #[test]
+    fn levenshtein_empty() {
+        assert_eq!(0, levenshtein("", ""));
+    }
+
+    #[test]
+    fn levenshtein_same() {
+        assert_eq!(0, levenshtein("levenshtein", "levenshtein"));
+    }
+
+    #[test]
+    fn levenshtein_diff_short() {
+        assert_eq!(3, levenshtein("kitten", "sitting"));
+    }
+
+    #[test]
+    fn levenshtein_diff_with_space() {
+        assert_eq!(5, levenshtein("hello, world", "bye, world"));
+    }
+
+    #[test]
+    fn levenshtein_diff_multibyte() {
+        assert_eq!(3, levenshtein("öঙ香", "abc"));
+        assert_eq!(3, levenshtein("abc", "öঙ香"));
+    }
+
+    #[test]
+    fn levenshtein_diff_longer() {
+        let a = "The quick brown fox jumped over the angry dog.";
+        let b = "Lorem ipsum dolor sit amet, dicta latine an eam.";
+        assert_eq!(37, levenshtein(a, b));
+    }
+
+    #[test]
+    fn levenshtein_first_empty() {
+        assert_eq!(7, levenshtein("", "sitting"));
+    }
+
+    #[test]
+    fn levenshtein_second_empty() {
+        assert_eq!(6, levenshtein("kitten", ""));
+    }
+
+    #[test]
+    fn normalized_levenshtein_diff_short() {
+        assert_delta!(0.57142, normalized_levenshtein("kitten", "sitting"));
+    }
+
+    #[test]
+    fn normalized_levenshtein_for_empty_strings() {
+        assert_delta!(1.0, normalized_levenshtein("", ""));
+    }
+
+    #[test]
+    fn normalized_levenshtein_first_empty() {
+        assert_delta!(0.0, normalized_levenshtein("", "second"));
+    }
+
+    #[test]
+    fn normalized_levenshtein_second_empty() {
+        assert_delta!(0.0, normalized_levenshtein("first", ""));
+    }
+
+    #[test]
+    fn normalized_levenshtein_identical_strings() {
+        assert_delta!(1.0, normalized_levenshtein("identical", "identical"));
+    }
+
+    #[test]
+    fn osa_distance_empty() {
+        assert_eq!(0, osa_distance("", ""));
+    }
+
+    #[test]
+    fn osa_distance_same() {
+        assert_eq!(0, osa_distance("damerau", "damerau"));
+    }
+
+    #[test]
+    fn osa_distance_first_empty() {
+        assert_eq!(7, osa_distance("", "damerau"));
+    }
+
+    #[test]
+    fn osa_distance_second_empty() {
+        assert_eq!(7, osa_distance("damerau", ""));
+    }
+
+    #[test]
+    fn osa_distance_diff() {
+        assert_eq!(3, osa_distance("ca", "abc"));
+    }
+
+    #[test]
+    fn osa_distance_diff_short() {
+        assert_eq!(3, osa_distance("damerau", "aderua"));
+    }
+
+    #[test]
+    fn osa_distance_diff_reversed() {
+        assert_eq!(3, osa_distance("aderua", "damerau"));
+    }
+
+    #[test]
+    fn osa_distance_diff_multibyte() {
+        assert_eq!(3, osa_distance("öঙ香", "abc"));
+        assert_eq!(3, osa_distance("abc", "öঙ香"));
+    }
+
+    #[test]
+    fn osa_distance_diff_unequal_length() {
+        assert_eq!(6, osa_distance("damerau", "aderuaxyz"));
+    }
+
+    #[test]
+    fn osa_distance_diff_unequal_length_reversed() {
+        assert_eq!(6, osa_distance("aderuaxyz", "damerau"));
+    }
+
+    #[test]
+    fn osa_distance_diff_comedians() {
+        assert_eq!(5, osa_distance("Stewart", "Colbert"));
+    }
+
+    #[test]
+    fn osa_distance_many_transpositions() {
+        assert_eq!(4, osa_distance("abcdefghijkl", "bacedfgihjlk"));
+    }
+
+    #[test]
+    fn osa_distance_diff_longer() {
+        let a = "The quick brown fox jumped over the angry dog.";
+        let b = "Lehem ipsum dolor sit amet, dicta latine an eam.";
+        assert_eq!(36, osa_distance(a, b));
+    }
+
+    #[test]
+    fn osa_distance_beginning_transposition() {
+        assert_eq!(1, osa_distance("foobar", "ofobar"));
+    }
+
+    #[test]
+    fn osa_distance_end_transposition() {
+        assert_eq!(1, osa_distance("specter", "spectre"));
+    }
+
+    #[test]
+    fn osa_distance_restricted_edit() {
+        assert_eq!(4, osa_distance("a cat", "an abct"));
+    }
+
+    #[test]
+    fn damerau_levenshtein_empty() {
+        assert_eq!(0, damerau_levenshtein("", ""));
+    }
+
+    #[test]
+    fn damerau_levenshtein_same() {
+        assert_eq!(0, damerau_levenshtein("damerau", "damerau"));
+    }
+
+    #[test]
+    fn damerau_levenshtein_first_empty() {
+        assert_eq!(7, damerau_levenshtein("", "damerau"));
+    }
+
+    #[test]
+    fn damerau_levenshtein_second_empty() {
+        assert_eq!(7, damerau_levenshtein("damerau", ""));
+    }
+
+    #[test]
+    fn damerau_levenshtein_diff() {
+        assert_eq!(2, damerau_levenshtein("ca", "abc"));
+    }
+
+    #[test]
+    fn damerau_levenshtein_diff_short() {
+        assert_eq!(3, damerau_levenshtein("damerau", "aderua"));
+    }
+
+    #[test]
+    fn damerau_levenshtein_diff_reversed() {
+        assert_eq!(3, damerau_levenshtein("aderua", "damerau"));
+    }
+
+    #[test]
+    fn damerau_levenshtein_diff_multibyte() {
+        assert_eq!(3, damerau_levenshtein("öঙ香", "abc"));
+        assert_eq!(3, damerau_levenshtein("abc", "öঙ香"));
+    }
+
+    #[test]
+    fn damerau_levenshtein_diff_unequal_length() {
+        assert_eq!(6, damerau_levenshtein("damerau", "aderuaxyz"));
+    }
+
+    #[test]
+    fn damerau_levenshtein_diff_unequal_length_reversed() {
+        assert_eq!(6, damerau_levenshtein("aderuaxyz", "damerau"));
+    }
+
+    #[test]
+    fn damerau_levenshtein_diff_comedians() {
+        assert_eq!(5, damerau_levenshtein("Stewart", "Colbert"));
+    }
+
+    #[test]
+    fn damerau_levenshtein_many_transpositions() {
+        assert_eq!(4, damerau_levenshtein("abcdefghijkl", "bacedfgihjlk"));
+    }
+
+    #[test]
+    fn damerau_levenshtein_diff_longer() {
+        let a = "The quick brown fox jumped over the angry dog.";
+        let b = "Lehem ipsum dolor sit amet, dicta latine an eam.";
+        assert_eq!(36, damerau_levenshtein(a, b));
+    }
+
+    #[test]
+    fn damerau_levenshtein_beginning_transposition() {
+        assert_eq!(1, damerau_levenshtein("foobar", "ofobar"));
+    }
+
+    #[test]
+    fn damerau_levenshtein_end_transposition() {
+        assert_eq!(1, damerau_levenshtein("specter", "spectre"));
+    }
+
+    #[test]
+    fn damerau_levenshtein_unrestricted_edit() {
+        assert_eq!(3, damerau_levenshtein("a cat", "an abct"));
+    }
+
+    #[test]
+    fn normalized_damerau_levenshtein_diff_short() {
+        assert_delta!(
+            0.27272,
+            normalized_damerau_levenshtein("levenshtein", "löwenbräu")
+        );
+    }
+
+    #[test]
+    fn normalized_damerau_levenshtein_for_empty_strings() {
+        assert_delta!(1.0, normalized_damerau_levenshtein("", ""));
+    }
+
+    #[test]
+    fn normalized_damerau_levenshtein_first_empty() {
+        assert_delta!(0.0, normalized_damerau_levenshtein("", "flower"));
+    }
+
+    #[test]
+    fn normalized_damerau_levenshtein_second_empty() {
+        assert_delta!(0.0, normalized_damerau_levenshtein("tree", ""));
+    }
+
+    #[test]
+    fn normalized_damerau_levenshtein_identical_strings() {
+        assert_delta!(
+            1.0,
+            normalized_damerau_levenshtein("sunglasses", "sunglasses")
+        );
+    }
+
+    #[test]
+    fn sorensen_dice_all() {
+        // test cases taken from
+        // https://github.com/aceakash/string-similarity/blob/f83ba3cd7bae874c20c429774e911ae8cff8bced/src/spec/index.spec.js#L11
+
+        assert_delta!(1.0, sorensen_dice("a", "a"));
+        assert_delta!(0.0, sorensen_dice("a", "b"));
+        assert_delta!(1.0, sorensen_dice("", ""));
+        assert_delta!(0.0, sorensen_dice("a", ""));
+        assert_delta!(0.0, sorensen_dice("", "a"));
+        assert_delta!(1.0, sorensen_dice("apple event", "apple    event"));
+        assert_delta!(0.90909, sorensen_dice("iphone", "iphone x"));
+        assert_delta!(0.0, sorensen_dice("french", "quebec"));
+        assert_delta!(1.0, sorensen_dice("france", "france"));
+        assert_delta!(0.2, sorensen_dice("fRaNce", "france"));
+        assert_delta!(0.8, sorensen_dice("healed", "sealed"));
+        assert_delta!(
+            0.78788,
+            sorensen_dice("web applications", "applications of the web")
+        );
+        assert_delta!(
+            0.92,
+            sorensen_dice(
+                "this will have a typo somewhere",
+                "this will huve a typo somewhere"
+            )
+        );
+        assert_delta!(
+            0.60606,
+            sorensen_dice(
+                "Olive-green table for sale, in extremely good condition.",
+                "For sale: table in very good  condition, olive green in colour."
+            )
+        );
+        assert_delta!(
+            0.25581,
+            sorensen_dice(
+                "Olive-green table for sale, in extremely good condition.",
+                "For sale: green Subaru Impreza, 210,000 miles"
+            )
+        );
+        assert_delta!(
+            0.14118,
+            sorensen_dice(
+                "Olive-green table for sale, in extremely good condition.",
+                "Wanted: mountain bike with at least 21 gears."
+            )
+        );
+        assert_delta!(
+            0.77419,
+            sorensen_dice("this has one extra word", "this has one word")
+        );
+    }
+}
diff --git a/crates/strsim/tests/lib.rs b/crates/strsim/tests/lib.rs
new file mode 100644
index 0000000..991fc6f
--- /dev/null
+++ b/crates/strsim/tests/lib.rs
@@ -0,0 +1,71 @@
+extern crate strsim;
+
+use strsim::{
+    damerau_levenshtein, hamming, jaro, jaro_winkler, levenshtein, normalized_damerau_levenshtein,
+    normalized_levenshtein, osa_distance,
+};
+
+macro_rules! assert_delta {
+    ($x:expr, $y:expr) => {
+        assert_delta!($x, $y, 1e-5);
+    };
+    ($x:expr, $y:expr, $d:expr) => {
+        if ($x - $y).abs() > $d {
+            panic!(
+                "assertion failed: actual: `{}`, expected: `{}`: \
+                actual not within < {} of expected",
+                $x, $y, $d
+            );
+        }
+    };
+}
+
+#[test]
+fn hamming_works() {
+    match hamming("hamming", "hammers") {
+        Ok(distance) => assert_eq!(3, distance),
+        Err(why) => panic!("{:?}", why),
+    }
+}
+
+#[test]
+fn levenshtein_works() {
+    assert_eq!(3, levenshtein("kitten", "sitting"));
+}
+
+#[test]
+fn normalized_levenshtein_works() {
+    assert_delta!(0.57142, normalized_levenshtein("kitten", "sitting"));
+}
+
+#[test]
+fn osa_distance_works() {
+    assert_eq!(3, osa_distance("ac", "cba"));
+}
+
+#[test]
+fn damerau_levenshtein_works() {
+    assert_eq!(2, damerau_levenshtein("ac", "cba"));
+}
+
+#[test]
+fn normalized_damerau_levenshtein_works() {
+    assert_delta!(
+        0.27272,
+        normalized_damerau_levenshtein("levenshtein", "löwenbräu")
+    );
+}
+
+#[test]
+fn jaro_works() {
+    assert_delta!(
+        0.392,
+        jaro("Friedrich Nietzsche", "Jean-Paul Sartre"),
+        0.001
+    );
+}
+
+#[test]
+fn jaro_winkler_works() {
+    assert_delta!(0.866, jaro_winkler("cheeseburger", "cheese fries"), 0.001);
+}
diff --git a/crates/strum/.cargo-checksum.json b/crates/strum/.cargo-checksum.json
new file mode 100644
index 0000000..2446a0e
--- /dev/null
+++ b/crates/strum/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.toml":"f615f23ee012c313d795d487c1c9b89a09f8aa8c025bb6b2f872dd27f35ab11b","LICENSE":"8bce3b45e49ecd1461f223b46de133d8f62cd39f745cfdaf81bee554b908bd42","src/additional_attributes.rs":"1d7a611079a0d094a4a0aebc760323bc9eb2550e0d367123e4439a364c824039","src/lib.rs":"f07e798a8cd3c4869437f6c6e954c230db44481d3fb215c5a0906ba9fab5a011"},"package":"290d54ea6f91c969195bdbcd7442c8c2a2ba87da8bf60a7ee86a235d4bc1e125"}
\ No newline at end of file
diff --git a/crates/strum/Android.bp b/crates/strum/Android.bp
new file mode 100644
index 0000000..9de75bb
--- /dev/null
+++ b/crates/strum/Android.bp
@@ -0,0 +1,34 @@
+// This file is generated by cargo_embargo.
+// Do not modify this file because the changes will be overridden on upgrade.
+
+package {
+    default_applicable_licenses: ["external_rust_crates_strum_license"],
+    default_team: "trendy_team_android_rust",
+}
+
+license {
+    name: "external_rust_crates_strum_license",
+    visibility: [":__subpackages__"],
+    license_kinds: ["SPDX-license-identifier-MIT"],
+    license_text: ["LICENSE"],
+}
+
+rust_library {
+    name: "libstrum",
+    host_supported: true,
+    crate_name: "strum",
+    cargo_env_compat: true,
+    cargo_pkg_version: "0.25.0",
+    crate_root: "src/lib.rs",
+    edition: "2018",
+    features: [
+        "default",
+        "std",
+    ],
+    apex_available: [
+        "//apex_available:platform",
+        "//apex_available:anyapex",
+    ],
+    product_available: true,
+    vendor_available: true,
+}
diff --git a/crates/strum/Cargo.lock b/crates/strum/Cargo.lock
new file mode 100644
index 0000000..a13d6ec
--- /dev/null
+++ b/crates/strum/Cargo.lock
@@ -0,0 +1,233 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "byteorder"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b"
+
+[[package]]
+name = "cfg-if"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
+
+[[package]]
+name = "getrandom"
+version = "0.2.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7"
+dependencies = [
+ "cfg-if",
+ "libc",
+ "wasi",
+]
+
+[[package]]
+name = "heck"
+version = "0.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8"
+
+[[package]]
+name = "libc"
+version = "0.2.158"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439"
+
+[[package]]
+name = "phf"
+version = "0.10.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fabbf1ead8a5bcbc20f5f8b939ee3f5b0f6f281b6ad3468b84656b658b455259"
+dependencies = [
+ "phf_macros",
+ "phf_shared",
+ "proc-macro-hack",
+]
+
+[[package]]
+name = "phf_generator"
+version = "0.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5d5285893bb5eb82e6aaf5d59ee909a06a16737a8970984dd7746ba9283498d6"
+dependencies = [
+ "phf_shared",
+ "rand",
+]
+
+[[package]]
+name = "phf_macros"
+version = "0.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "58fdf3184dd560f160dd73922bea2d5cd6e8f064bf4b13110abd81b03697b4e0"
+dependencies = [
+ "phf_generator",
+ "phf_shared",
+ "proc-macro-hack",
+ "proc-macro2",
+ "quote",
+ "syn 1.0.109",
+]
+
+[[package]]
+name = "phf_shared"
+version = "0.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b6796ad771acdc0123d2a88dc428b5e38ef24456743ddb1744ed628f9815c096"
+dependencies = [
+ "siphasher",
+]
+
+[[package]]
+name = "ppv-lite86"
+version = "0.2.20"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04"
+dependencies = [
+ "zerocopy",
+]
+
+[[package]]
+name = "proc-macro-hack"
+version = "0.5.20+deprecated"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068"
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.86"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77"
+dependencies = [
+ "unicode-ident",
+]
+
+[[package]]
+name = "quote"
+version = "1.0.37"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af"
+dependencies = [
+ "proc-macro2",
+]
+
+[[package]]
+name = "rand"
+version = "0.8.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"
+dependencies = [
+ "libc",
+ "rand_chacha",
+ "rand_core",
+]
+
+[[package]]
+name = "rand_chacha"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
+dependencies = [
+ "ppv-lite86",
+ "rand_core",
+]
+
+[[package]]
+name = "rand_core"
+version = "0.6.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
+dependencies = [
+ "getrandom",
+]
+
+[[package]]
+name = "rustversion"
+version = "1.0.17"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6"
+
+[[package]]
+name = "siphasher"
+version = "0.3.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d"
+
+[[package]]
+name = "strum"
+version = "0.25.0"
+dependencies = [
+ "phf",
+ "strum_macros",
+]
+
+[[package]]
+name = "strum_macros"
+version = "0.25.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "23dc1fa9ac9c169a78ba62f0b841814b7abae11bdd047b9c58f893439e309ea0"
+dependencies = [
+ "heck",
+ "proc-macro2",
+ "quote",
+ "rustversion",
+ "syn 2.0.76",
+]
+
+[[package]]
+name = "syn"
+version = "1.0.109"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
+]
+
+[[package]]
+name = "syn"
+version = "2.0.76"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "578e081a14e0cefc3279b0472138c513f37b41a08d5a3cca9b6e4e8ceb6cd525"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
+]
+
+[[package]]
+name = "unicode-ident"
+version = "1.0.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
+
+[[package]]
+name = "wasi"
+version = "0.11.0+wasi-snapshot-preview1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
+
+[[package]]
+name = "zerocopy"
+version = "0.7.35"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0"
+dependencies = [
+ "byteorder",
+ "zerocopy-derive",
+]
+
+[[package]]
+name = "zerocopy-derive"
+version = "0.7.35"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.76",
+]
diff --git a/crates/strum/Cargo.toml b/crates/strum/Cargo.toml
new file mode 100644
index 0000000..ccafbb1
--- /dev/null
+++ b/crates/strum/Cargo.toml
@@ -0,0 +1,44 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2018"
+name = "strum"
+version = "0.25.0"
+authors = ["Peter Glotfelty <peter.glotfelty@microsoft.com>"]
+description = "Helpful macros for working with enums and strings"
+homepage = "https://github.com/Peternator7/strum"
+documentation = "https://docs.rs/strum"
+readme = "../README.md"
+keywords = ["enum", "string", "macros", "proc-macros"]
+categories = ["development-tools::procedural-macro-helpers", "parsing"]
+license = "MIT"
+repository = "https://github.com/Peternator7/strum"
+[package.metadata.docs.rs]
+features = ["derive"]
+rustdoc-args = ["--cfg", "docsrs"]
+[dependencies.phf]
+version = "0.10"
+features = ["macros"]
+optional = true
+
+[dependencies.strum_macros]
+version = "0.25"
+optional = true
+[dev-dependencies.strum_macros]
+version = "0.25"
+
+[features]
+default = ["std"]
+derive = ["strum_macros"]
+std = []
+[badges.travis-ci]
+repository = "Peternator7/strum"
diff --git a/crates/strum/LICENSE b/crates/strum/LICENSE
new file mode 100644
index 0000000..588b4a7
--- /dev/null
+++ b/crates/strum/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2019 Peter Glotfelty
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/crates/strum/METADATA b/crates/strum/METADATA
new file mode 100644
index 0000000..16697ae
--- /dev/null
+++ b/crates/strum/METADATA
@@ -0,0 +1,19 @@
+name: "strum"
+description: "Helpful macros for working with enums and strings"
+third_party {
+  identifier {
+    type: "crates.io"
+    value: "https://crates.io/crates/strum"
+  }
+  identifier {
+    type: "Archive"
+    value: "https://static.crates.io/crates/strum/strum-0.25.0.crate"
+  }
+  version: "0.25.0"
+  license_type: NOTICE
+  last_upgrade_date {
+    year: 2024
+    month: 1
+    day: 17
+  }
+}
diff --git a/crates/strum/MODULE_LICENSE_MIT b/crates/strum/MODULE_LICENSE_MIT
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/crates/strum/MODULE_LICENSE_MIT
diff --git a/crates/strum/cargo_embargo.json b/crates/strum/cargo_embargo.json
new file mode 100644
index 0000000..cb908d7
--- /dev/null
+++ b/crates/strum/cargo_embargo.json
@@ -0,0 +1,3 @@
+{
+  "run_cargo": false
+}
diff --git a/crates/strum/src/additional_attributes.rs b/crates/strum/src/additional_attributes.rs
new file mode 100644
index 0000000..a29f1d6
--- /dev/null
+++ b/crates/strum/src/additional_attributes.rs
@@ -0,0 +1,92 @@
+//! # Documentation for Additional Attributes
+//!
+//! ## Attributes on Enums
+//!
+//! Strum supports several custom attributes to modify the generated code. At the enum level, the following attributes are supported:
+//!
+//! - `#[strum(serialize_all = "case_style")]` attribute can be used to change the case used when serializing to and deserializing
+//!   from strings. This feature is enabled by [withoutboats/heck](https://github.com/withoutboats/heck) and supported case styles are:
+//!
+//!   - `camelCase`
+//!   - `PascalCase`
+//!   - `kebab-case`
+//!   - `snake_case`
+//!   - `SCREAMING_SNAKE_CASE`
+//!   - `SCREAMING-KEBAB-CASE`
+//!   - `lowercase`
+//!   - `UPPERCASE`
+//!   - `title_case`
+//!   - `mixed_case`
+//!   - `Train-Case`
+//!
+//!   ```rust
+//!   use strum_macros;
+//!   
+//!   #[derive(Debug, Eq, PartialEq, strum_macros::Display)]
+//!   #[strum(serialize_all = "snake_case")]
+//!   enum Brightness {
+//!       DarkBlack,
+//!       Dim {
+//!           glow: usize,
+//!       },
+//!       #[strum(serialize = "bright")]
+//!       BrightWhite,
+//!   }
+//!   
+//!   assert_eq!(
+//!       String::from("dark_black"),
+//!       Brightness::DarkBlack.to_string().as_ref()
+//!   );
+//!   assert_eq!(
+//!       String::from("dim"),
+//!       Brightness::Dim { glow: 0 }.to_string().as_ref()
+//!   );
+//!   assert_eq!(
+//!       String::from("bright"),
+//!       Brightness::BrightWhite.to_string().as_ref()
+//!   );
+//!   ```
+//!
+//! - You can also apply the `#[strum(ascii_case_insensitive)]` attribute to the enum,
+//!   and this has the same effect of applying it to every variant.
+//!
+//! ## Attributes on Variants
+//!
+//! Custom attributes are applied to a variant by adding `#[strum(parameter="value")]` to the variant.
+//!
+//! - `serialize="..."`: Changes the text that `FromStr()` looks for when parsing a string. This attribute can
+//!    be applied multiple times to an element and the enum variant will be parsed if any of them match.
+//!
+//! - `to_string="..."`: Similar to `serialize`. This value will be included when using `FromStr()`. More importantly,
+//!    this specifies what text to use when calling `variant.to_string()` with the `Display` derivation, or when calling `variant.as_ref()` with `AsRefStr`.
+//!
+//! - `default`: Applied to a single variant of an enum. The variant must be a Tuple-like
+//!    variant with a single piece of data that can be create from a `&str` i.e. `T: From<&str>`.
+//!    The generated code will now return the variant with the input string captured as shown below
+//!    instead of failing.
+//!
+//!     ```text
+//!     // Replaces this:
+//!     _ => Err(strum::ParseError::VariantNotFound)
+//!     // With this in generated code:
+//!     default => Ok(Variant(default.into()))
+//!     ```
+//!     The plugin will fail if the data doesn't implement From<&str>. You can only have one `default`
+//!     on your enum.
+//!
+//! - `disabled`: removes variant from generated code.
+//!
+//! - `ascii_case_insensitive`: makes the comparison to this variant case insensitive (ASCII only).
+//!   If the whole enum is marked `ascii_case_insensitive`, you can specify `ascii_case_insensitive = false`
+//!   to disable case insensitivity on this v ariant.
+//!
+//! - `message=".."`: Adds a message to enum variant. This is used in conjunction with the `EnumMessage`
+//!    trait to associate a message with a variant. If `detailed_message` is not provided,
+//!    then `message` will also be returned when `get_detailed_message` is called.
+//!
+//! - `detailed_message=".."`: Adds a more detailed message to a variant. If this value is omitted, then
+//!    `message` will be used in it's place.
+//!
+//! - Structured documentation, as in `/// ...`: If using `EnumMessage`, is accessible via get_documentation().
+//!
+//! - `props(key="value")`: Enables associating additional information with a given variant.
diff --git a/crates/strum/src/lib.rs b/crates/strum/src/lib.rs
new file mode 100644
index 0000000..7fbe5e2
--- /dev/null
+++ b/crates/strum/src/lib.rs
@@ -0,0 +1,245 @@
+//! # Strum
+//!
+//! [![Build Status](https://travis-ci.org/Peternator7/strum.svg?branch=master)](https://travis-ci.org/Peternator7/strum)
+//! [![Latest Version](https://img.shields.io/crates/v/strum.svg)](https://crates.io/crates/strum)
+//! [![Rust Documentation](https://docs.rs/strum/badge.svg)](https://docs.rs/strum)
+//!
+//! Strum is a set of macros and traits for working with
+//! enums and strings easier in Rust.
+//!
+//! The full version of the README can be found on [GitHub](https://github.com/Peternator7/strum).
+//!
+//! # Including Strum in Your Project
+//!
+//! Import strum and `strum_macros` into your project by adding the following lines to your
+//! Cargo.toml. `strum_macros` contains the macros needed to derive all the traits in Strum.
+//!
+//! ```toml
+//! [dependencies]
+//! strum = "0.25"
+//! strum_macros = "0.25"
+//!
+//! # You can also access strum_macros exports directly through strum using the "derive" feature
+//! strum = { version = "0.25", features = ["derive"] }
+//! ```
+//!
+
+#![cfg_attr(not(feature = "std"), no_std)]
+#![cfg_attr(docsrs, feature(doc_cfg))]
+
+// only for documentation purposes
+pub mod additional_attributes;
+
+#[cfg(feature = "phf")]
+#[doc(hidden)]
+pub use phf as _private_phf_reexport_for_macro_if_phf_feature;
+
+/// The `ParseError` enum is a collection of all the possible reasons
+/// an enum can fail to parse from a string.
+#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)]
+pub enum ParseError {
+    VariantNotFound,
+}
+
+#[cfg(feature = "std")]
+impl std::fmt::Display for ParseError {
+    fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> {
+        // We could use our macro here, but this way we don't take a dependency on the
+        // macros crate.
+        match self {
+            ParseError::VariantNotFound => write!(f, "Matching variant not found"),
+        }
+    }
+}
+
+#[cfg(feature = "std")]
+impl std::error::Error for ParseError {
+    fn description(&self) -> &str {
+        match self {
+            ParseError::VariantNotFound => {
+                "Unable to find a variant of the given enum matching the string given. Matching \
+                 can be extended with the Serialize attribute and is case sensitive."
+            }
+        }
+    }
+}
+
+/// This trait designates that an `Enum` can be iterated over. It can
+/// be auto generated using `strum_macros` on your behalf.
+///
+/// # Example
+///
+/// ```rust
+/// # use std::fmt::Debug;
+/// // You need to bring the type into scope to use it!!!
+/// use strum::{EnumIter, IntoEnumIterator};
+///
+/// #[derive(EnumIter, Debug)]
+/// enum Color {
+///     Red,
+///     Green { range: usize },
+///     Blue(usize),
+///     Yellow,
+/// }
+///
+/// // Iterate over the items in an enum and perform some function on them.
+/// fn generic_iterator<E, F>(pred: F)
+/// where
+///     E: IntoEnumIterator,
+///     F: Fn(E),
+/// {
+///     for e in E::iter() {
+///         pred(e)
+///     }
+/// }
+///
+/// generic_iterator::<Color, _>(|color| println!("{:?}", color));
+/// ```
+pub trait IntoEnumIterator: Sized {
+    type Iterator: Iterator<Item = Self>;
+
+    fn iter() -> Self::Iterator;
+}
+
+pub trait VariantIterator: Sized {
+    type Iterator: Iterator<Item = Self>;
+
+    fn iter() -> Self::Iterator;
+}
+
+pub trait VariantMetadata {
+    const VARIANT_COUNT: usize;
+    const VARIANT_NAMES: &'static [&'static str];
+
+    fn variant_name(&self) -> &'static str;
+}
+
+/// Associates additional pieces of information with an Enum. This can be
+/// autoimplemented by deriving `EnumMessage` and annotating your variants with
+/// `#[strum(message="...")]`.
+///
+/// # Example
+///
+/// ```rust
+/// # use std::fmt::Debug;
+/// // You need to bring the type into scope to use it!!!
+/// use strum::EnumMessage;
+///
+/// #[derive(PartialEq, Eq, Debug, EnumMessage)]
+/// enum Pet {
+///     #[strum(message="I have a dog")]
+///     #[strum(detailed_message="My dog's name is Spots")]
+///     Dog,
+///     /// I am documented.
+///     #[strum(message="I don't have a cat")]
+///     Cat,
+/// }
+///
+/// let my_pet = Pet::Dog;
+/// assert_eq!("I have a dog", my_pet.get_message().unwrap());
+/// ```
+pub trait EnumMessage {
+    fn get_message(&self) -> Option<&'static str>;
+    fn get_detailed_message(&self) -> Option<&'static str>;
+
+    /// Get the doc comment associated with a variant if it exists.
+    fn get_documentation(&self) -> Option<&'static str>;
+    fn get_serializations(&self) -> &'static [&'static str];
+}
+
+/// `EnumProperty` is a trait that makes it possible to store additional information
+/// with enum variants. This trait is designed to be used with the macro of the same
+/// name in the `strum_macros` crate. Currently, the only string literals are supported
+/// in attributes, the other methods will be implemented as additional attribute types
+/// become stabilized.
+///
+/// # Example
+///
+/// ```rust
+/// # use std::fmt::Debug;
+/// // You need to bring the type into scope to use it!!!
+/// use strum::EnumProperty;
+///
+/// #[derive(PartialEq, Eq, Debug, EnumProperty)]
+/// enum Class {
+///     #[strum(props(Teacher="Ms.Frizzle", Room="201"))]
+///     History,
+///     #[strum(props(Teacher="Mr.Smith"))]
+///     #[strum(props(Room="103"))]
+///     Mathematics,
+///     #[strum(props(Time="2:30"))]
+///     Science,
+/// }
+///
+/// let history = Class::History;
+/// assert_eq!("Ms.Frizzle", history.get_str("Teacher").unwrap());
+/// ```
+pub trait EnumProperty {
+    fn get_str(&self, prop: &str) -> Option<&'static str>;
+    fn get_int(&self, _prop: &str) -> Option<usize> {
+        Option::None
+    }
+
+    fn get_bool(&self, _prop: &str) -> Option<bool> {
+        Option::None
+    }
+}
+
+/// A cheap reference-to-reference conversion. Used to convert a value to a
+/// reference value with `'static` lifetime within generic code.
+#[deprecated(
+    since = "0.22.0",
+    note = "please use `#[derive(IntoStaticStr)]` instead"
+)]
+pub trait AsStaticRef<T>
+where
+    T: ?Sized,
+{
+    fn as_static(&self) -> &'static T;
+}
+
+/// A trait for capturing the number of variants in Enum. This trait can be autoderived by
+/// `strum_macros`.
+pub trait EnumCount {
+    const COUNT: usize;
+}
+
+/// A trait for retrieving the names of each variant in Enum. This trait can
+/// be autoderived by `strum_macros`.
+pub trait VariantNames {
+    /// Names of the variants of this enum
+    const VARIANTS: &'static [&'static str];
+}
+
+#[cfg(feature = "derive")]
+pub use strum_macros::*;
+
+macro_rules! DocumentMacroRexports {
+    ($($export:ident),+) => {
+        $(
+            #[cfg(all(docsrs, feature = "derive"))]
+            #[cfg_attr(docsrs, doc(cfg(feature = "derive")))]
+            pub use strum_macros::$export;
+        )+
+    };
+}
+
+// We actually only re-export these items individually if we're building
+// for docsrs. You can do a weird thing where you rename the macro
+// and then reference it through strum. The renaming feature should be deprecated now that
+// 2018 edition is almost 2 years old, but we'll need to give people some time to do that.
+DocumentMacroRexports! {
+    AsRefStr,
+    AsStaticStr,
+    Display,
+    EnumCount,
+    EnumDiscriminants,
+    EnumIter,
+    EnumMessage,
+    EnumProperty,
+    EnumString,
+    EnumVariantNames,
+    FromRepr,
+    IntoStaticStr,
+    ToString
+}
diff --git a/crates/strum_macros/.cargo-checksum.json b/crates/strum_macros/.cargo-checksum.json
new file mode 100644
index 0000000..a9f9601
--- /dev/null
+++ b/crates/strum_macros/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.toml":"23072aa33960a8ac715daca71a1aef3236d09e65c8b3dc1c69b1fd0ab5f83a90","LICENSE":"8bce3b45e49ecd1461f223b46de133d8f62cd39f745cfdaf81bee554b908bd42","README.md":"78ad65413ae9989028bfef9ebf198dee8dc74af3ec7fb60d26e0d984b979c126","src/helpers/case_style.rs":"6094ea43fb51d249f0685957060bd7170e5bcea3356a93569a01ef011dc41cdb","src/helpers/metadata.rs":"87320d82b70220a75ccf411dc66a4e0b9eb959e7ad1602d82006a7cd07d6e310","src/helpers/mod.rs":"e3e8977fcf94b7cf3b769d976cc998707cc4641e5857f88859a10d2dc1bd8d5a","src/helpers/type_props.rs":"f9c5f1ae29173f5dd51025f83e723e176d47b01de0abc45105cedaa7abccbb47","src/helpers/variant_props.rs":"d528ce14015850ac1b34cbea46a8836bdaf50a290a2fe92ca560af6b192b5507","src/lib.rs":"eb2cc12a6136780ba93ab069d01fdfdaa41b3ca959ccb6f5350ec9006f778d0b","src/macros/enum_count.rs":"44084ab800ca8fca3b17f76c6025e8e59855598c0119807ce7bc80d5dc345d99","src/macros/enum_discriminants.rs":"7513c5eb515da55ff6507c8fd80fd1c626de78e68ea737d3edea48e0ee56337c","src/macros/enum_is.rs":"c56e67aa93b173b1fc0301106864ed567bc708e55bc28982ad05f5c8efa343ce","src/macros/enum_iter.rs":"81f62d61cfe50320ee4cacb09d19f817ec04c0b7c77c6a7aad24dafe093eedab","src/macros/enum_messages.rs":"d23b427e1b0f8ab9709e93837aba16bff12ccd92fb55bf013ca80a292ed7ffc5","src/macros/enum_properties.rs":"a9e1bf27504d46df689aad8910c7ad8ae7b3780ee97b6e659b2b19e481474030","src/macros/enum_try_as.rs":"b3bbc5020351251f005cfadf86a8759bb28172c9d176684b90074d32cc2ae3dd","src/macros/enum_variant_names.rs":"7d6d381ab10c603d1e87a089057138ef21c9b3de1246ba1fdcec894ef239a461","src/macros/from_repr.rs":"69b904604cdb0287f9dccafa15592627e32d3edd2310a2ddaa53a5547e903f71","src/macros/mod.rs":"13b0e3b5ddc63e4750aa29fbdb4a338307367cf148099b17813e340fe5dfdf32","src/macros/strings/as_ref_str.rs":"73ce53ae25c8a148075e858ab03eb13fe7cba350cfb649048b5b6582703fc5da","src/macros/strings/display.rs":"c53989fcb45a9d22f0990cf620e0ffff9e2d9e46a93edc780901ab43650dfb29","src/macros/strings/from_string.rs":"99d8b8c1a7aa99ab12f63e5f74e96c980e17740738e800a854d688d6fed59ecb","src/macros/strings/mod.rs":"b7937f72a46c807fd87c787f3d23029509565a0388a30106e7026363782f2b56","src/macros/strings/to_string.rs":"3d43865603be815cc52a8364bc3e969af7a1bd9532d9461fcce8252098488b0a"},"package":"23dc1fa9ac9c169a78ba62f0b841814b7abae11bdd047b9c58f893439e309ea0"}
\ No newline at end of file
diff --git a/crates/strum_macros/Android.bp b/crates/strum_macros/Android.bp
new file mode 100644
index 0000000..344b2b7
--- /dev/null
+++ b/crates/strum_macros/Android.bp
@@ -0,0 +1,53 @@
+// This file is generated by cargo_embargo.
+// Do not modify this file because the changes will be overridden on upgrade.
+
+package {
+    default_applicable_licenses: ["external_rust_crates_strum_macros_license"],
+    default_team: "trendy_team_android_rust",
+}
+
+license {
+    name: "external_rust_crates_strum_macros_license",
+    visibility: [":__subpackages__"],
+    license_kinds: ["SPDX-license-identifier-MIT"],
+    license_text: ["LICENSE"],
+}
+
+rust_proc_macro {
+    name: "libstrum_macros",
+    crate_name: "strum_macros",
+    cargo_env_compat: true,
+    cargo_pkg_version: "0.25.3",
+    crate_root: "src/lib.rs",
+    edition: "2018",
+    rustlibs: [
+        "libheck",
+        "libproc_macro2",
+        "libquote",
+        "libsyn",
+    ],
+    proc_macros: ["librustversion"],
+}
+
+rust_test_host {
+    name: "strum_macros_test_src_lib",
+    host_cross_supported: false,
+    crate_name: "strum_macros",
+    cargo_env_compat: true,
+    cargo_pkg_version: "0.25.3",
+    crate_root: "src/lib.rs",
+    test_suites: ["general-tests"],
+    auto_gen_config: true,
+    test_options: {
+        unit_test: true,
+    },
+    edition: "2018",
+    rustlibs: [
+        "libheck",
+        "libproc_macro2",
+        "libquote",
+        "libstrum",
+        "libsyn",
+    ],
+    proc_macros: ["librustversion"],
+}
diff --git a/crates/strum_macros/Cargo.lock b/crates/strum_macros/Cargo.lock
new file mode 100644
index 0000000..bdfb95e
--- /dev/null
+++ b/crates/strum_macros/Cargo.lock
@@ -0,0 +1,68 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "heck"
+version = "0.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8"
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.86"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77"
+dependencies = [
+ "unicode-ident",
+]
+
+[[package]]
+name = "quote"
+version = "1.0.37"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af"
+dependencies = [
+ "proc-macro2",
+]
+
+[[package]]
+name = "rustversion"
+version = "1.0.17"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6"
+
+[[package]]
+name = "strum"
+version = "0.25.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "290d54ea6f91c969195bdbcd7442c8c2a2ba87da8bf60a7ee86a235d4bc1e125"
+
+[[package]]
+name = "strum_macros"
+version = "0.25.3"
+dependencies = [
+ "heck",
+ "proc-macro2",
+ "quote",
+ "rustversion",
+ "strum",
+ "syn",
+]
+
+[[package]]
+name = "syn"
+version = "2.0.76"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "578e081a14e0cefc3279b0472138c513f37b41a08d5a3cca9b6e4e8ceb6cd525"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
+]
+
+[[package]]
+name = "unicode-ident"
+version = "1.0.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
diff --git a/crates/strum_macros/Cargo.toml b/crates/strum_macros/Cargo.toml
new file mode 100644
index 0000000..45078e7
--- /dev/null
+++ b/crates/strum_macros/Cargo.toml
@@ -0,0 +1,58 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2018"
+name = "strum_macros"
+version = "0.25.3"
+authors = ["Peter Glotfelty <peter.glotfelty@microsoft.com>"]
+description = "Helpful macros for working with enums and strings"
+homepage = "https://github.com/Peternator7/strum"
+documentation = "https://docs.rs/strum"
+readme = "README.md"
+keywords = [
+    "enum",
+    "string",
+    "macros",
+    "proc-macros",
+]
+categories = [
+    "development-tools::procedural-macro-helpers",
+    "parsing",
+]
+license = "MIT"
+repository = "https://github.com/Peternator7/strum"
+
+[lib]
+name = "strum_macros"
+proc-macro = true
+
+[dependencies.heck]
+version = "0.4.1"
+
+[dependencies.proc-macro2]
+version = "1.0"
+
+[dependencies.quote]
+version = "1.0"
+
+[dependencies.rustversion]
+version = "1.0"
+
+[dependencies.syn]
+version = "2.0"
+features = [
+    "parsing",
+    "extra-traits",
+]
+
+[dev-dependencies.strum]
+version = "0.25"
diff --git a/crates/strum_macros/LICENSE b/crates/strum_macros/LICENSE
new file mode 100644
index 0000000..588b4a7
--- /dev/null
+++ b/crates/strum_macros/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2019 Peter Glotfelty
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/crates/strum_macros/METADATA b/crates/strum_macros/METADATA
new file mode 100644
index 0000000..705441a
--- /dev/null
+++ b/crates/strum_macros/METADATA
@@ -0,0 +1,19 @@
+name: "strum_macros"
+description: "Helpful macros for working with enums and strings"
+third_party {
+  identifier {
+    type: "crates.io"
+    value: "https://crates.io/crates/strum_macros"
+  }
+  identifier {
+    type: "Archive"
+    value: "https://static.crates.io/crates/strum_macros/strum_macros-0.25.3.crate"
+  }
+  version: "0.25.3"
+  license_type: NOTICE
+  last_upgrade_date {
+    year: 2024
+    month: 1
+    day: 17
+  }
+}
diff --git a/crates/strum_macros/MODULE_LICENSE_MIT b/crates/strum_macros/MODULE_LICENSE_MIT
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/crates/strum_macros/MODULE_LICENSE_MIT
diff --git a/crates/strum_macros/README.md b/crates/strum_macros/README.md
new file mode 100644
index 0000000..491c24f
--- /dev/null
+++ b/crates/strum_macros/README.md
@@ -0,0 +1,82 @@
+# Strum
+
+[![Build Status](https://travis-ci.com/Peternator7/strum.svg?branch=master)](https://travis-ci.com/Peternator7/strum)
+[![Build status](https://ci.appveyor.com/api/projects/status/ji4f6n2m5lvu11xt?svg=true)](https://ci.appveyor.com/project/Peternator7/strum)
+[![Latest Version](https://img.shields.io/crates/v/strum.svg)](https://crates.io/crates/strum)
+[![Rust Documentation](https://docs.rs/strum/badge.svg)](https://docs.rs/strum)
+![Crates.io](https://img.shields.io/crates/l/strum)
+![Crates.io](https://img.shields.io/crates/d/strum)
+
+Strum is a set of macros and traits for working with enums and strings easier in Rust.
+
+# Compatibility
+
+Strum is currently compatible with versions of rustc >= 1.56.1. Pull Requests that improve compatibility with older
+versions are welcome. The project goal is to support a rust version for at least 2 years after release 
+and even longer is preferred since this project changes slowly.
+
+# Including Strum in Your Project
+
+Import strum and strum_macros into your project by adding the following lines to your
+Cargo.toml. Strum_macros contains the macros needed to derive all the traits in Strum.
+
+```toml
+[dependencies]
+strum = "0.25"
+strum_macros = "0.25"
+
+# You can also use the "derive" feature, and import the macros directly from "strum"
+# strum = { version = "0.25", features = ["derive"] }
+```
+
+# Strum Macros
+
+Strum has implemented the following macros:
+
+| Macro | Description |
+| --- | ----------- |
+| [EnumString] | Converts strings to enum variants based on their name. |
+| [Display] | Converts enum variants to strings |
+| [FromRepr] | Convert from an integer to an enum. |
+| [AsRefStr] | Implement `AsRef<str>` for `MyEnum` |
+| [IntoStaticStr] | Implements `From<MyEnum> for &'static str` on an enum |
+| [EnumVariantNames] | Adds an associated `VARIANTS` constant which is an array of discriminant names |
+| [EnumIter] | Creates a new type that iterates of the variants of an enum. |
+| [EnumProperty] | Add custom properties to enum variants. |
+| [EnumMessage] | Add a verbose message to an enum variant. |
+| [EnumDiscriminants] | Generate a new type with only the discriminant names. |
+| [EnumCount] | Add a constant `usize` equal to the number of variants. |
+
+# Contributing
+
+Thanks for your interest in contributing. The project is divided into 3 parts, the traits are in the
+`/strum` folder. The procedural macros are in the `/strum_macros` folder, and the integration tests are
+in `/strum_tests`. If you are adding additional features to `strum` or `strum_macros`, you should make sure
+to run the tests and add new integration tests to make sure the features work as expected.
+
+# Debugging
+
+To see the generated code, set the STRUM_DEBUG environment variable before compiling your code.
+`STRUM_DEBUG=1` will dump all of the generated code for every type. `STRUM_DEBUG=YourType` will
+only dump the code generated on a type named `YourType`.
+
+# Name
+
+Strum is short for STRing enUM because it's a library for augmenting enums with additional
+information through strings.
+
+Strumming is also a very whimsical motion, much like writing Rust code.
+
+[Macro-Renames]: https://github.com/Peternator7/strum/wiki/Macro-Renames
+[EnumString]: https://docs.rs/strum_macros/0.25/strum_macros/derive.EnumString.html
+[Display]: https://docs.rs/strum_macros/0.25/strum_macros/derive.Display.html
+[AsRefStr]: https://docs.rs/strum_macros/0.25/strum_macros/derive.AsRefStr.html
+[IntoStaticStr]: https://docs.rs/strum_macros/0.25/strum_macros/derive.IntoStaticStr.html
+[EnumVariantNames]: https://docs.rs/strum_macros/0.25/strum_macros/derive.EnumVariantNames.html
+[EnumIter]: https://docs.rs/strum_macros/0.25/strum_macros/derive.EnumIter.html
+[EnumIs]: https://docs.rs/strum_macros/0.25/strum_macros/derive.EnumIs.html
+[EnumProperty]: https://docs.rs/strum_macros/0.25/strum_macros/derive.EnumProperty.html
+[EnumMessage]: https://docs.rs/strum_macros/0.25/strum_macros/derive.EnumMessage.html
+[EnumDiscriminants]: https://docs.rs/strum_macros/0.25/strum_macros/derive.EnumDiscriminants.html
+[EnumCount]: https://docs.rs/strum_macros/0.25/strum_macros/derive.EnumCount.html
+[FromRepr]: https://docs.rs/strum_macros/0.25/strum_macros/derive.FromRepr.html
diff --git a/crates/strum_macros/cargo_embargo.json b/crates/strum_macros/cargo_embargo.json
new file mode 100644
index 0000000..818511f
--- /dev/null
+++ b/crates/strum_macros/cargo_embargo.json
@@ -0,0 +1,9 @@
+{
+  "package": {
+    "strum_macros": {
+      "device_supported": false
+    }
+  },
+  "run_cargo": false,
+  "tests": true
+}
diff --git a/crates/strum_macros/src/helpers/case_style.rs b/crates/strum_macros/src/helpers/case_style.rs
new file mode 100644
index 0000000..86a8583
--- /dev/null
+++ b/crates/strum_macros/src/helpers/case_style.rs
@@ -0,0 +1,177 @@
+use heck::{
+    ToKebabCase, ToLowerCamelCase, ToShoutySnakeCase, ToSnakeCase, ToTitleCase, ToUpperCamelCase, ToTrainCase,
+};
+use std::str::FromStr;
+use syn::{
+    parse::{Parse, ParseStream},
+    Ident, LitStr,
+};
+
+#[allow(clippy::enum_variant_names)]
+#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
+pub enum CaseStyle {
+    CamelCase,
+    KebabCase,
+    MixedCase,
+    ShoutySnakeCase,
+    SnakeCase,
+    TitleCase,
+    UpperCase,
+    LowerCase,
+    ScreamingKebabCase,
+    PascalCase,
+    TrainCase,
+}
+
+const VALID_CASE_STYLES: &[&str] = &[
+    "camelCase",
+    "PascalCase",
+    "kebab-case",
+    "snake_case",
+    "SCREAMING_SNAKE_CASE",
+    "SCREAMING-KEBAB-CASE",
+    "lowercase",
+    "UPPERCASE",
+    "title_case",
+    "mixed_case",
+    "Train-Case",
+];
+
+impl Parse for CaseStyle {
+    fn parse(input: ParseStream) -> syn::Result<Self> {
+        let text = input.parse::<LitStr>()?;
+        let val = text.value();
+
+        val.as_str().parse().map_err(|_| {
+            syn::Error::new_spanned(
+                &text,
+                format!(
+                    "Unexpected case style for serialize_all: `{}`. Valid values are: `{:?}`",
+                    val, VALID_CASE_STYLES
+                ),
+            )
+        })
+    }
+}
+
+impl FromStr for CaseStyle {
+    type Err = ();
+
+    fn from_str(text: &str) -> Result<Self, ()> {
+        Ok(match text {
+            // "camel_case" is a soft-deprecated case-style left for backward compatibility.
+            // <https://github.com/Peternator7/strum/pull/250#issuecomment-1374682221>
+            "PascalCase" | "camel_case" => CaseStyle::PascalCase,
+            "camelCase" => CaseStyle::CamelCase,
+            "snake_case" | "snek_case" => CaseStyle::SnakeCase,
+            "kebab-case" | "kebab_case" => CaseStyle::KebabCase,
+            "SCREAMING-KEBAB-CASE" => CaseStyle::ScreamingKebabCase,
+            "SCREAMING_SNAKE_CASE" | "shouty_snake_case" | "shouty_snek_case" => {
+                CaseStyle::ShoutySnakeCase
+            }
+            "title_case" => CaseStyle::TitleCase,
+            "mixed_case" => CaseStyle::MixedCase,
+            "lowercase" => CaseStyle::LowerCase,
+            "UPPERCASE" => CaseStyle::UpperCase,
+            "Train-Case" => CaseStyle::TrainCase,
+            _ => return Err(()),
+        })
+    }
+}
+
+pub trait CaseStyleHelpers {
+    fn convert_case(&self, case_style: Option<CaseStyle>) -> String;
+}
+
+impl CaseStyleHelpers for Ident {
+    fn convert_case(&self, case_style: Option<CaseStyle>) -> String {
+        let ident_string = self.to_string();
+        if let Some(case_style) = case_style {
+            match case_style {
+                CaseStyle::PascalCase => ident_string.to_upper_camel_case(),
+                CaseStyle::KebabCase => ident_string.to_kebab_case(),
+                CaseStyle::MixedCase => ident_string.to_lower_camel_case(),
+                CaseStyle::ShoutySnakeCase => ident_string.to_shouty_snake_case(),
+                CaseStyle::SnakeCase => ident_string.to_snake_case(),
+                CaseStyle::TitleCase => ident_string.to_title_case(),
+                CaseStyle::UpperCase => ident_string.to_uppercase(),
+                CaseStyle::LowerCase => ident_string.to_lowercase(),
+                CaseStyle::ScreamingKebabCase => ident_string.to_kebab_case().to_uppercase(),
+                CaseStyle::TrainCase => ident_string.to_train_case(),
+                CaseStyle::CamelCase => {
+                    let camel_case = ident_string.to_upper_camel_case();
+                    let mut pascal = String::with_capacity(camel_case.len());
+                    let mut it = camel_case.chars();
+                    if let Some(ch) = it.next() {
+                        pascal.extend(ch.to_lowercase());
+                    }
+                    pascal.extend(it);
+                    pascal
+                }
+            }
+        } else {
+            ident_string
+        }
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[test]
+    fn test_convert_case() {
+        let id = Ident::new("test_me", proc_macro2::Span::call_site());
+        assert_eq!("testMe", id.convert_case(Some(CaseStyle::CamelCase)));
+        assert_eq!("TestMe", id.convert_case(Some(CaseStyle::PascalCase)));
+        assert_eq!("Test-Me", id.convert_case(Some(CaseStyle::TrainCase)));
+    }
+
+    #[test]
+    fn test_impl_from_str_for_case_style_pascal_case() {
+        use CaseStyle::*;
+        let f = CaseStyle::from_str;
+
+        assert_eq!(PascalCase, f("PascalCase").unwrap());
+        assert_eq!(PascalCase, f("camel_case").unwrap());
+
+        assert_eq!(CamelCase, f("camelCase").unwrap());
+
+        assert_eq!(SnakeCase, f("snake_case").unwrap());
+        assert_eq!(SnakeCase, f("snek_case").unwrap());
+
+        assert_eq!(KebabCase, f("kebab-case").unwrap());
+        assert_eq!(KebabCase, f("kebab_case").unwrap());
+
+        assert_eq!(ScreamingKebabCase, f("SCREAMING-KEBAB-CASE").unwrap());
+
+        assert_eq!(ShoutySnakeCase, f("SCREAMING_SNAKE_CASE").unwrap());
+        assert_eq!(ShoutySnakeCase, f("shouty_snake_case").unwrap());
+        assert_eq!(ShoutySnakeCase, f("shouty_snek_case").unwrap());
+
+        assert_eq!(LowerCase, f("lowercase").unwrap());
+
+        assert_eq!(UpperCase, f("UPPERCASE").unwrap());
+
+        assert_eq!(TitleCase, f("title_case").unwrap());
+
+        assert_eq!(MixedCase, f("mixed_case").unwrap());
+    }
+}
+
+/// heck doesn't treat numbers as new words, but this function does.
+/// E.g. for input `Hello2You`, heck would output `hello2_you`, and snakify would output `hello_2_you`.
+pub fn snakify(s: &str) -> String {
+    let mut output: Vec<char> = s.to_string().to_snake_case().chars().collect();
+    let mut num_starts = vec![];
+    for (pos, c) in output.iter().enumerate() {
+        if c.is_digit(10) && pos != 0 && !output[pos - 1].is_digit(10) {
+            num_starts.push(pos);
+        }
+    }
+    // need to do in reverse, because after inserting, all chars after the point of insertion are off
+    for i in num_starts.into_iter().rev() {
+        output.insert(i, '_')
+    }
+    output.into_iter().collect()
+}
diff --git a/crates/strum_macros/src/helpers/metadata.rs b/crates/strum_macros/src/helpers/metadata.rs
new file mode 100644
index 0000000..d638ae3
--- /dev/null
+++ b/crates/strum_macros/src/helpers/metadata.rs
@@ -0,0 +1,276 @@
+use proc_macro2::TokenStream;
+use syn::{
+    parenthesized,
+    parse::{Parse, ParseStream},
+    parse2, parse_str,
+    punctuated::Punctuated,
+    Attribute, DeriveInput, Expr, ExprLit, Ident, Lit, LitBool, LitStr, Meta, MetaNameValue, Path,
+    Token, Variant, Visibility,
+};
+
+use super::case_style::CaseStyle;
+
+pub mod kw {
+    use syn::custom_keyword;
+    pub use syn::token::Crate;
+
+    // enum metadata
+    custom_keyword!(serialize_all);
+    custom_keyword!(use_phf);
+
+    // enum discriminant metadata
+    custom_keyword!(derive);
+    custom_keyword!(name);
+    custom_keyword!(vis);
+
+    // variant metadata
+    custom_keyword!(message);
+    custom_keyword!(detailed_message);
+    custom_keyword!(serialize);
+    custom_keyword!(to_string);
+    custom_keyword!(disabled);
+    custom_keyword!(default);
+    custom_keyword!(props);
+    custom_keyword!(ascii_case_insensitive);
+}
+
+pub enum EnumMeta {
+    SerializeAll {
+        kw: kw::serialize_all,
+        case_style: CaseStyle,
+    },
+    AsciiCaseInsensitive(kw::ascii_case_insensitive),
+    Crate {
+        kw: kw::Crate,
+        crate_module_path: Path,
+    },
+    UsePhf(kw::use_phf),
+}
+
+impl Parse for EnumMeta {
+    fn parse(input: ParseStream) -> syn::Result<Self> {
+        let lookahead = input.lookahead1();
+        if lookahead.peek(kw::serialize_all) {
+            let kw = input.parse::<kw::serialize_all>()?;
+            input.parse::<Token![=]>()?;
+            let case_style = input.parse()?;
+            Ok(EnumMeta::SerializeAll { kw, case_style })
+        } else if lookahead.peek(kw::Crate) {
+            let kw = input.parse::<kw::Crate>()?;
+            input.parse::<Token![=]>()?;
+            let path_str: LitStr = input.parse()?;
+            let path_tokens = parse_str(&path_str.value())?;
+            let crate_module_path = parse2(path_tokens)?;
+            Ok(EnumMeta::Crate {
+                kw,
+                crate_module_path,
+            })
+        } else if lookahead.peek(kw::ascii_case_insensitive) {
+            Ok(EnumMeta::AsciiCaseInsensitive(input.parse()?))
+        } else if lookahead.peek(kw::use_phf) {
+            Ok(EnumMeta::UsePhf(input.parse()?))
+        } else {
+            Err(lookahead.error())
+        }
+    }
+}
+
+pub enum EnumDiscriminantsMeta {
+    Derive { kw: kw::derive, paths: Vec<Path> },
+    Name { kw: kw::name, name: Ident },
+    Vis { kw: kw::vis, vis: Visibility },
+    Other { path: Path, nested: TokenStream },
+}
+
+impl Parse for EnumDiscriminantsMeta {
+    fn parse(input: ParseStream) -> syn::Result<Self> {
+        if input.peek(kw::derive) {
+            let kw = input.parse()?;
+            let content;
+            parenthesized!(content in input);
+            let paths = content.parse_terminated(Path::parse, Token![,])?;
+            Ok(EnumDiscriminantsMeta::Derive {
+                kw,
+                paths: paths.into_iter().collect(),
+            })
+        } else if input.peek(kw::name) {
+            let kw = input.parse()?;
+            let content;
+            parenthesized!(content in input);
+            let name = content.parse()?;
+            Ok(EnumDiscriminantsMeta::Name { kw, name })
+        } else if input.peek(kw::vis) {
+            let kw = input.parse()?;
+            let content;
+            parenthesized!(content in input);
+            let vis = content.parse()?;
+            Ok(EnumDiscriminantsMeta::Vis { kw, vis })
+        } else {
+            let path = input.parse()?;
+            let content;
+            parenthesized!(content in input);
+            let nested = content.parse()?;
+            Ok(EnumDiscriminantsMeta::Other { path, nested })
+        }
+    }
+}
+
+pub trait DeriveInputExt {
+    /// Get all the strum metadata associated with an enum.
+    fn get_metadata(&self) -> syn::Result<Vec<EnumMeta>>;
+
+    /// Get all the `strum_discriminants` metadata associated with an enum.
+    fn get_discriminants_metadata(&self) -> syn::Result<Vec<EnumDiscriminantsMeta>>;
+}
+
+impl DeriveInputExt for DeriveInput {
+    fn get_metadata(&self) -> syn::Result<Vec<EnumMeta>> {
+        get_metadata_inner("strum", &self.attrs)
+    }
+
+    fn get_discriminants_metadata(&self) -> syn::Result<Vec<EnumDiscriminantsMeta>> {
+        get_metadata_inner("strum_discriminants", &self.attrs)
+    }
+}
+
+pub enum VariantMeta {
+    Message {
+        kw: kw::message,
+        value: LitStr,
+    },
+    DetailedMessage {
+        kw: kw::detailed_message,
+        value: LitStr,
+    },
+    Serialize {
+        kw: kw::serialize,
+        value: LitStr,
+    },
+    Documentation {
+        value: LitStr,
+    },
+    ToString {
+        kw: kw::to_string,
+        value: LitStr,
+    },
+    Disabled(kw::disabled),
+    Default(kw::default),
+    AsciiCaseInsensitive {
+        kw: kw::ascii_case_insensitive,
+        value: bool,
+    },
+    Props {
+        kw: kw::props,
+        props: Vec<(LitStr, LitStr)>,
+    },
+}
+
+impl Parse for VariantMeta {
+    fn parse(input: ParseStream) -> syn::Result<Self> {
+        let lookahead = input.lookahead1();
+        if lookahead.peek(kw::message) {
+            let kw = input.parse()?;
+            let _: Token![=] = input.parse()?;
+            let value = input.parse()?;
+            Ok(VariantMeta::Message { kw, value })
+        } else if lookahead.peek(kw::detailed_message) {
+            let kw = input.parse()?;
+            let _: Token![=] = input.parse()?;
+            let value = input.parse()?;
+            Ok(VariantMeta::DetailedMessage { kw, value })
+        } else if lookahead.peek(kw::serialize) {
+            let kw = input.parse()?;
+            let _: Token![=] = input.parse()?;
+            let value = input.parse()?;
+            Ok(VariantMeta::Serialize { kw, value })
+        } else if lookahead.peek(kw::to_string) {
+            let kw = input.parse()?;
+            let _: Token![=] = input.parse()?;
+            let value = input.parse()?;
+            Ok(VariantMeta::ToString { kw, value })
+        } else if lookahead.peek(kw::disabled) {
+            Ok(VariantMeta::Disabled(input.parse()?))
+        } else if lookahead.peek(kw::default) {
+            Ok(VariantMeta::Default(input.parse()?))
+        } else if lookahead.peek(kw::ascii_case_insensitive) {
+            let kw = input.parse()?;
+            let value = if input.peek(Token![=]) {
+                let _: Token![=] = input.parse()?;
+                input.parse::<LitBool>()?.value
+            } else {
+                true
+            };
+            Ok(VariantMeta::AsciiCaseInsensitive { kw, value })
+        } else if lookahead.peek(kw::props) {
+            let kw = input.parse()?;
+            let content;
+            parenthesized!(content in input);
+            let props = content.parse_terminated(Prop::parse, Token![,])?;
+            Ok(VariantMeta::Props {
+                kw,
+                props: props
+                    .into_iter()
+                    .map(|Prop(k, v)| (LitStr::new(&k.to_string(), k.span()), v))
+                    .collect(),
+            })
+        } else {
+            Err(lookahead.error())
+        }
+    }
+}
+
+struct Prop(Ident, LitStr);
+
+impl Parse for Prop {
+    fn parse(input: ParseStream) -> syn::Result<Self> {
+        use syn::ext::IdentExt;
+
+        let k = Ident::parse_any(input)?;
+        let _: Token![=] = input.parse()?;
+        let v = input.parse()?;
+
+        Ok(Prop(k, v))
+    }
+}
+
+pub trait VariantExt {
+    /// Get all the metadata associated with an enum variant.
+    fn get_metadata(&self) -> syn::Result<Vec<VariantMeta>>;
+}
+
+impl VariantExt for Variant {
+    fn get_metadata(&self) -> syn::Result<Vec<VariantMeta>> {
+        let result = get_metadata_inner("strum", &self.attrs)?;
+        self.attrs
+            .iter()
+            .filter(|attr| attr.path().is_ident("doc"))
+            .try_fold(result, |mut vec, attr| {
+                if let Meta::NameValue(MetaNameValue {
+                    value:
+                        Expr::Lit(ExprLit {
+                            lit: Lit::Str(value),
+                            ..
+                        }),
+                    ..
+                }) = &attr.meta
+                {
+                    vec.push(VariantMeta::Documentation {
+                        value: value.clone(),
+                    })
+                }
+                Ok(vec)
+            })
+    }
+}
+
+fn get_metadata_inner<'a, T: Parse>(
+    ident: &str,
+    it: impl IntoIterator<Item = &'a Attribute>,
+) -> syn::Result<Vec<T>> {
+    it.into_iter()
+        .filter(|attr| attr.path().is_ident(ident))
+        .try_fold(Vec::new(), |mut vec, attr| {
+            vec.extend(attr.parse_args_with(Punctuated::<T, Token![,]>::parse_terminated)?);
+            Ok(vec)
+        })
+}
diff --git a/crates/strum_macros/src/helpers/mod.rs b/crates/strum_macros/src/helpers/mod.rs
new file mode 100644
index 0000000..142ea0b
--- /dev/null
+++ b/crates/strum_macros/src/helpers/mod.rs
@@ -0,0 +1,32 @@
+pub use self::case_style::{CaseStyleHelpers, snakify};
+pub use self::type_props::HasTypeProperties;
+pub use self::variant_props::HasStrumVariantProperties;
+
+pub mod case_style;
+mod metadata;
+pub mod type_props;
+pub mod variant_props;
+
+use proc_macro2::Span;
+use quote::ToTokens;
+use syn::spanned::Spanned;
+
+pub fn non_enum_error() -> syn::Error {
+    syn::Error::new(Span::call_site(), "This macro only supports enums.")
+}
+
+pub fn strum_discriminants_passthrough_error(span: &impl Spanned) -> syn::Error {
+    syn::Error::new(
+        span.span(),
+        "expected a pass-through attribute, e.g. #[strum_discriminants(serde(rename = \"var0\"))]",
+    )
+}
+
+pub fn occurrence_error<T: ToTokens>(fst: T, snd: T, attr: &str) -> syn::Error {
+    let mut e = syn::Error::new_spanned(
+        snd,
+        format!("Found multiple occurrences of strum({})", attr),
+    );
+    e.combine(syn::Error::new_spanned(fst, "first one here"));
+    e
+}
diff --git a/crates/strum_macros/src/helpers/type_props.rs b/crates/strum_macros/src/helpers/type_props.rs
new file mode 100644
index 0000000..0d49e04
--- /dev/null
+++ b/crates/strum_macros/src/helpers/type_props.rs
@@ -0,0 +1,116 @@
+use proc_macro2::TokenStream;
+use quote::quote;
+use std::default::Default;
+use syn::{parse_quote, DeriveInput, Ident, Path, Visibility};
+
+use super::case_style::CaseStyle;
+use super::metadata::{DeriveInputExt, EnumDiscriminantsMeta, EnumMeta};
+use super::occurrence_error;
+
+pub trait HasTypeProperties {
+    fn get_type_properties(&self) -> syn::Result<StrumTypeProperties>;
+}
+
+#[derive(Debug, Clone, Default)]
+pub struct StrumTypeProperties {
+    pub case_style: Option<CaseStyle>,
+    pub ascii_case_insensitive: bool,
+    pub crate_module_path: Option<Path>,
+    pub discriminant_derives: Vec<Path>,
+    pub discriminant_name: Option<Ident>,
+    pub discriminant_others: Vec<TokenStream>,
+    pub discriminant_vis: Option<Visibility>,
+    pub use_phf: bool,
+}
+
+impl HasTypeProperties for DeriveInput {
+    fn get_type_properties(&self) -> syn::Result<StrumTypeProperties> {
+        let mut output = StrumTypeProperties::default();
+
+        let strum_meta = self.get_metadata()?;
+        let discriminants_meta = self.get_discriminants_metadata()?;
+
+        let mut serialize_all_kw = None;
+        let mut ascii_case_insensitive_kw = None;
+        let mut use_phf_kw = None;
+        let mut crate_module_path_kw = None;
+        for meta in strum_meta {
+            match meta {
+                EnumMeta::SerializeAll { case_style, kw } => {
+                    if let Some(fst_kw) = serialize_all_kw {
+                        return Err(occurrence_error(fst_kw, kw, "serialize_all"));
+                    }
+
+                    serialize_all_kw = Some(kw);
+                    output.case_style = Some(case_style);
+                }
+                EnumMeta::AsciiCaseInsensitive(kw) => {
+                    if let Some(fst_kw) = ascii_case_insensitive_kw {
+                        return Err(occurrence_error(fst_kw, kw, "ascii_case_insensitive"));
+                    }
+
+                    ascii_case_insensitive_kw = Some(kw);
+                    output.ascii_case_insensitive = true;
+                }
+                EnumMeta::UsePhf(kw) => {
+                    if let Some(fst_kw) = use_phf_kw {
+                        return Err(occurrence_error(fst_kw, kw, "use_phf"));
+                    }
+
+                    use_phf_kw = Some(kw);
+                    output.use_phf = true;
+                }
+                EnumMeta::Crate {
+                    crate_module_path,
+                    kw,
+                } => {
+                    if let Some(fst_kw) = crate_module_path_kw {
+                        return Err(occurrence_error(fst_kw, kw, "Crate"));
+                    }
+
+                    crate_module_path_kw = Some(kw);
+                    output.crate_module_path = Some(crate_module_path);
+                }
+            }
+        }
+
+        let mut name_kw = None;
+        let mut vis_kw = None;
+        for meta in discriminants_meta {
+            match meta {
+                EnumDiscriminantsMeta::Derive { paths, .. } => {
+                    output.discriminant_derives.extend(paths);
+                }
+                EnumDiscriminantsMeta::Name { name, kw } => {
+                    if let Some(fst_kw) = name_kw {
+                        return Err(occurrence_error(fst_kw, kw, "name"));
+                    }
+
+                    name_kw = Some(kw);
+                    output.discriminant_name = Some(name);
+                }
+                EnumDiscriminantsMeta::Vis { vis, kw } => {
+                    if let Some(fst_kw) = vis_kw {
+                        return Err(occurrence_error(fst_kw, kw, "vis"));
+                    }
+
+                    vis_kw = Some(kw);
+                    output.discriminant_vis = Some(vis);
+                }
+                EnumDiscriminantsMeta::Other { path, nested } => {
+                    output.discriminant_others.push(quote! { #path(#nested) });
+                }
+            }
+        }
+
+        Ok(output)
+    }
+}
+
+impl StrumTypeProperties {
+    pub fn crate_module_path(&self) -> Path {
+        self.crate_module_path
+            .as_ref()
+            .map_or_else(|| parse_quote!(::strum), |path| parse_quote!(#path))
+    }
+}
diff --git a/crates/strum_macros/src/helpers/variant_props.rs b/crates/strum_macros/src/helpers/variant_props.rs
new file mode 100644
index 0000000..f637253
--- /dev/null
+++ b/crates/strum_macros/src/helpers/variant_props.rs
@@ -0,0 +1,133 @@
+use std::default::Default;
+use syn::{Ident, LitStr, Variant};
+
+use super::case_style::{CaseStyle, CaseStyleHelpers};
+use super::metadata::{kw, VariantExt, VariantMeta};
+use super::occurrence_error;
+
+pub trait HasStrumVariantProperties {
+    fn get_variant_properties(&self) -> syn::Result<StrumVariantProperties>;
+}
+
+#[derive(Clone, Eq, PartialEq, Debug, Default)]
+pub struct StrumVariantProperties {
+    pub disabled: Option<kw::disabled>,
+    pub default: Option<kw::default>,
+    pub ascii_case_insensitive: Option<bool>,
+    pub message: Option<LitStr>,
+    pub detailed_message: Option<LitStr>,
+    pub documentation: Vec<LitStr>,
+    pub string_props: Vec<(LitStr, LitStr)>,
+    serialize: Vec<LitStr>,
+    pub to_string: Option<LitStr>,
+    ident: Option<Ident>,
+}
+
+impl StrumVariantProperties {
+    fn ident_as_str(&self, case_style: Option<CaseStyle>) -> LitStr {
+        let ident = self.ident.as_ref().expect("identifier");
+        LitStr::new(&ident.convert_case(case_style), ident.span())
+    }
+
+    pub fn get_preferred_name(&self, case_style: Option<CaseStyle>) -> LitStr {
+        self.to_string.as_ref().cloned().unwrap_or_else(|| {
+            self.serialize
+                .iter()
+                .max_by_key(|s| s.value().len())
+                .cloned()
+                .unwrap_or_else(|| self.ident_as_str(case_style))
+        })
+    }
+
+    pub fn get_serializations(&self, case_style: Option<CaseStyle>) -> Vec<LitStr> {
+        let mut attrs = self.serialize.clone();
+        if let Some(to_string) = &self.to_string {
+            attrs.push(to_string.clone());
+        }
+
+        if attrs.is_empty() {
+            attrs.push(self.ident_as_str(case_style));
+        }
+
+        attrs
+    }
+}
+
+impl HasStrumVariantProperties for Variant {
+    fn get_variant_properties(&self) -> syn::Result<StrumVariantProperties> {
+        let mut output = StrumVariantProperties {
+            ident: Some(self.ident.clone()),
+            ..Default::default()
+        };
+
+        let mut message_kw = None;
+        let mut detailed_message_kw = None;
+        let mut to_string_kw = None;
+        let mut disabled_kw = None;
+        let mut default_kw = None;
+        let mut ascii_case_insensitive_kw = None;
+        for meta in self.get_metadata()? {
+            match meta {
+                VariantMeta::Message { value, kw } => {
+                    if let Some(fst_kw) = message_kw {
+                        return Err(occurrence_error(fst_kw, kw, "message"));
+                    }
+
+                    message_kw = Some(kw);
+                    output.message = Some(value);
+                }
+                VariantMeta::DetailedMessage { value, kw } => {
+                    if let Some(fst_kw) = detailed_message_kw {
+                        return Err(occurrence_error(fst_kw, kw, "detailed_message"));
+                    }
+
+                    detailed_message_kw = Some(kw);
+                    output.detailed_message = Some(value);
+                }
+                VariantMeta::Documentation { value } => {
+                    output.documentation.push(value);
+                }
+                VariantMeta::Serialize { value, .. } => {
+                    output.serialize.push(value);
+                }
+                VariantMeta::ToString { value, kw } => {
+                    if let Some(fst_kw) = to_string_kw {
+                        return Err(occurrence_error(fst_kw, kw, "to_string"));
+                    }
+
+                    to_string_kw = Some(kw);
+                    output.to_string = Some(value);
+                }
+                VariantMeta::Disabled(kw) => {
+                    if let Some(fst_kw) = disabled_kw {
+                        return Err(occurrence_error(fst_kw, kw, "disabled"));
+                    }
+
+                    disabled_kw = Some(kw);
+                    output.disabled = Some(kw);
+                }
+                VariantMeta::Default(kw) => {
+                    if let Some(fst_kw) = default_kw {
+                        return Err(occurrence_error(fst_kw, kw, "default"));
+                    }
+
+                    default_kw = Some(kw);
+                    output.default = Some(kw);
+                }
+                VariantMeta::AsciiCaseInsensitive { kw, value } => {
+                    if let Some(fst_kw) = ascii_case_insensitive_kw {
+                        return Err(occurrence_error(fst_kw, kw, "ascii_case_insensitive"));
+                    }
+
+                    ascii_case_insensitive_kw = Some(kw);
+                    output.ascii_case_insensitive = Some(value);
+                }
+                VariantMeta::Props { props, .. } => {
+                    output.string_props.extend(props);
+                }
+            }
+        }
+
+        Ok(output)
+    }
+}
diff --git a/crates/strum_macros/src/lib.rs b/crates/strum_macros/src/lib.rs
new file mode 100644
index 0000000..82db12a
--- /dev/null
+++ b/crates/strum_macros/src/lib.rs
@@ -0,0 +1,815 @@
+//! # Strum
+//!
+//! Strum is a set of macros and traits for working with
+//! enums and strings easier in Rust.
+//!
+
+#![recursion_limit = "128"]
+
+extern crate proc_macro;
+
+mod helpers;
+mod macros;
+
+use proc_macro2::TokenStream;
+use std::env;
+use syn::DeriveInput;
+
+fn debug_print_generated(ast: &DeriveInput, toks: &TokenStream) {
+    let debug = env::var("STRUM_DEBUG");
+    if let Ok(s) = debug {
+        if s == "1" {
+            println!("{}", toks);
+        }
+
+        if ast.ident == s {
+            println!("{}", toks);
+        }
+    }
+}
+
+/// Converts strings to enum variants based on their name.
+///
+/// auto-derives `std::str::FromStr` on the enum (for Rust 1.34 and above, `std::convert::TryFrom<&str>`
+/// will be derived as well). Each variant of the enum will match on it's own name.
+/// This can be overridden using `serialize="DifferentName"` or `to_string="DifferentName"`
+/// on the attribute as shown below.
+/// Multiple deserializations can be added to the same variant. If the variant contains additional data,
+/// they will be set to their default values upon deserialization.
+///
+/// The `default` attribute can be applied to a tuple variant with a single data parameter. When a match isn't
+/// found, the given variant will be returned and the input string will be captured in the parameter.
+///
+/// Note that the implementation of `FromStr` by default only matches on the name of the
+/// variant. There is an option to match on different case conversions through the
+/// `#[strum(serialize_all = "snake_case")]` type attribute.
+///
+/// See the [Additional Attributes](https://docs.rs/strum/0.22/strum/additional_attributes/index.html)
+/// Section for more information on using this feature.
+///
+/// If you have a large enum, you may want to consider using the `use_phf` attribute here. It leverages
+/// perfect hash functions to parse much quicker than a standard `match`. (MSRV 1.46)
+///
+/// # Example howto use `EnumString`
+/// ```
+/// use std::str::FromStr;
+/// use strum_macros::EnumString;
+///
+/// #[derive(Debug, PartialEq, EnumString)]
+/// enum Color {
+///     Red,
+///     // The Default value will be inserted into range if we match "Green".
+///     Green {
+///         range: usize,
+///     },
+///
+///     // We can match on multiple different patterns.
+///     #[strum(serialize = "blue", serialize = "b")]
+///     Blue(usize),
+///
+///     // Notice that we can disable certain variants from being found
+///     #[strum(disabled)]
+///     Yellow,
+///
+///     // We can make the comparison case insensitive (however Unicode is not supported at the moment)
+///     #[strum(ascii_case_insensitive)]
+///     Black,
+/// }
+///
+/// /*
+/// //The generated code will look like:
+/// impl std::str::FromStr for Color {
+///     type Err = ::strum::ParseError;
+///
+///     fn from_str(s: &str) -> ::core::result::Result<Color, Self::Err> {
+///         match s {
+///             "Red" => ::core::result::Result::Ok(Color::Red),
+///             "Green" => ::core::result::Result::Ok(Color::Green { range:Default::default() }),
+///             "blue" => ::core::result::Result::Ok(Color::Blue(Default::default())),
+///             "b" => ::core::result::Result::Ok(Color::Blue(Default::default())),
+///             s if s.eq_ignore_ascii_case("Black") => ::core::result::Result::Ok(Color::Black),
+///             _ => ::core::result::Result::Err(::strum::ParseError::VariantNotFound),
+///         }
+///     }
+/// }
+/// */
+///
+/// // simple from string
+/// let color_variant = Color::from_str("Red").unwrap();
+/// assert_eq!(Color::Red, color_variant);
+/// // short version works too
+/// let color_variant = Color::from_str("b").unwrap();
+/// assert_eq!(Color::Blue(0), color_variant);
+/// // was disabled for parsing = returns parse-error
+/// let color_variant = Color::from_str("Yellow");
+/// assert!(color_variant.is_err());
+/// // however the variant is still normally usable
+/// println!("{:?}", Color::Yellow);
+/// let color_variant = Color::from_str("bLACk").unwrap();
+/// assert_eq!(Color::Black, color_variant);
+/// ```
+#[proc_macro_derive(EnumString, attributes(strum))]
+pub fn from_string(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
+    let ast = syn::parse_macro_input!(input as DeriveInput);
+
+    let toks =
+        macros::from_string::from_string_inner(&ast).unwrap_or_else(|err| err.to_compile_error());
+    debug_print_generated(&ast, &toks);
+    toks.into()
+}
+
+/// Converts enum variants to `&'static str`.
+///
+/// Implements `AsRef<str>` on your enum using the same rules as
+/// `Display` for determining what string is returned. The difference is that `as_ref()` returns
+/// a `&str` instead of a `String` so you don't allocate any additional memory with each call.
+///
+/// ```
+/// // You need to bring the AsRef trait into scope to use it
+/// use std::convert::AsRef;
+/// use strum_macros::AsRefStr;
+///
+/// #[derive(AsRefStr, Debug)]
+/// enum Color {
+///     #[strum(serialize = "redred")]
+///     Red,
+///     Green {
+///         range: usize,
+///     },
+///     Blue(usize),
+///     Yellow,
+/// }
+///
+/// // uses the serialize string for Display
+/// let red = Color::Red;
+/// assert_eq!("redred", red.as_ref());
+/// // by default the variants Name
+/// let yellow = Color::Yellow;
+/// assert_eq!("Yellow", yellow.as_ref());
+/// // or for string formatting
+/// println!(
+///     "blue: {} green: {}",
+///     Color::Blue(10).as_ref(),
+///     Color::Green { range: 42 }.as_ref()
+/// );
+/// ```
+#[proc_macro_derive(AsRefStr, attributes(strum))]
+pub fn as_ref_str(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
+    let ast = syn::parse_macro_input!(input as DeriveInput);
+
+    let toks =
+        macros::as_ref_str::as_ref_str_inner(&ast).unwrap_or_else(|err| err.to_compile_error());
+    debug_print_generated(&ast, &toks);
+    toks.into()
+}
+
+/// Implements `Strum::VariantNames` which adds an associated constant `VARIANTS` which is an array of discriminant names.
+///
+/// Adds an `impl` block for the `enum` that adds a static `VARIANTS` array of `&'static str` that are the discriminant names.
+/// This will respect the `serialize_all` attribute on the `enum` (like `#[strum(serialize_all = "snake_case")]`.
+///
+/// ```
+/// // import the macros needed
+/// use strum_macros::{EnumString, EnumVariantNames};
+/// // You need to import the trait, to have access to VARIANTS
+/// use strum::VariantNames;
+///
+/// #[derive(Debug, EnumString, EnumVariantNames)]
+/// #[strum(serialize_all = "kebab-case")]
+/// enum Color {
+///     Red,
+///     Blue,
+///     Yellow,
+///     RebeccaPurple,
+/// }
+/// assert_eq!(["red", "blue", "yellow", "rebecca-purple"], Color::VARIANTS);
+/// ```
+#[proc_macro_derive(EnumVariantNames, attributes(strum))]
+pub fn variant_names(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
+    let ast = syn::parse_macro_input!(input as DeriveInput);
+
+    let toks = macros::enum_variant_names::enum_variant_names_inner(&ast)
+        .unwrap_or_else(|err| err.to_compile_error());
+    debug_print_generated(&ast, &toks);
+    toks.into()
+}
+
+#[proc_macro_derive(AsStaticStr, attributes(strum))]
+#[deprecated(
+    since = "0.22.0",
+    note = "please use `#[derive(IntoStaticStr)]` instead"
+)]
+pub fn as_static_str(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
+    let ast = syn::parse_macro_input!(input as DeriveInput);
+
+    let toks = macros::as_ref_str::as_static_str_inner(
+        &ast,
+        &macros::as_ref_str::GenerateTraitVariant::AsStaticStr,
+    )
+    .unwrap_or_else(|err| err.to_compile_error());
+    debug_print_generated(&ast, &toks);
+    toks.into()
+}
+
+/// Implements `From<MyEnum> for &'static str` on an enum.
+///
+/// Implements `From<YourEnum>` and `From<&'a YourEnum>` for `&'static str`. This is
+/// useful for turning an enum variant into a static string.
+/// The Rust `std` provides a blanket impl of the reverse direction - i.e. `impl Into<&'static str> for YourEnum`.
+///
+/// ```
+/// use strum_macros::IntoStaticStr;
+///
+/// #[derive(IntoStaticStr)]
+/// enum State<'a> {
+///     Initial(&'a str),
+///     Finished,
+/// }
+///
+/// fn verify_state<'a>(s: &'a str) {
+///     let mut state = State::Initial(s);
+///     // The following won't work because the lifetime is incorrect:
+///     // let wrong: &'static str = state.as_ref();
+///     // using the trait implemented by the derive works however:
+///     let right: &'static str = state.into();
+///     assert_eq!("Initial", right);
+///     state = State::Finished;
+///     let done: &'static str = state.into();
+///     assert_eq!("Finished", done);
+/// }
+///
+/// verify_state(&"hello world".to_string());
+/// ```
+#[proc_macro_derive(IntoStaticStr, attributes(strum))]
+pub fn into_static_str(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
+    let ast = syn::parse_macro_input!(input as DeriveInput);
+
+    let toks = macros::as_ref_str::as_static_str_inner(
+        &ast,
+        &macros::as_ref_str::GenerateTraitVariant::From,
+    )
+    .unwrap_or_else(|err| err.to_compile_error());
+    debug_print_generated(&ast, &toks);
+    toks.into()
+}
+
+/// implements `std::string::ToString` on an enum
+///
+/// ```
+/// // You need to bring the ToString trait into scope to use it
+/// use std::string::ToString;
+/// use strum_macros;
+///
+/// #[derive(strum_macros::ToString, Debug)]
+/// enum Color {
+///     #[strum(serialize = "redred")]
+///     Red,
+///     Green {
+///         range: usize,
+///     },
+///     Blue(usize),
+///     Yellow,
+/// }
+///
+/// // uses the serialize string for Display
+/// let red = Color::Red;
+/// assert_eq!(String::from("redred"), red.to_string());
+/// // by default the variants Name
+/// let yellow = Color::Yellow;
+/// assert_eq!(String::from("Yellow"), yellow.to_string());
+/// ```
+#[deprecated(
+    since = "0.22.0",
+    note = "please use `#[derive(Display)]` instead. See issue https://github.com/Peternator7/strum/issues/132"
+)]
+#[proc_macro_derive(ToString, attributes(strum))]
+pub fn to_string(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
+    let ast = syn::parse_macro_input!(input as DeriveInput);
+
+    let toks =
+        macros::to_string::to_string_inner(&ast).unwrap_or_else(|err| err.to_compile_error());
+    debug_print_generated(&ast, &toks);
+    toks.into()
+}
+
+/// Converts enum variants to strings.
+///
+/// Deriving `Display` on an enum prints out the given enum. This enables you to perform round
+/// trip style conversions from enum into string and back again for unit style variants. `Display`
+/// choose which serialization to used based on the following criteria:
+///
+/// 1. If there is a `to_string` property, this value will be used. There can only be one per variant.
+/// 1. Of the various `serialize` properties, the value with the longest length is chosen. If that
+///    behavior isn't desired, you should use `to_string`.
+/// 1. The name of the variant will be used if there are no `serialize` or `to_string` attributes.
+///
+/// ```
+/// // You need to bring the ToString trait into scope to use it
+/// use std::string::ToString;
+/// use strum_macros::Display;
+///
+/// #[derive(Display, Debug)]
+/// enum Color {
+///     #[strum(serialize = "redred")]
+///     Red,
+///     Green {
+///         range: usize,
+///     },
+///     Blue(usize),
+///     Yellow,
+/// }
+///
+/// // uses the serialize string for Display
+/// let red = Color::Red;
+/// assert_eq!(String::from("redred"), format!("{}", red));
+/// // by default the variants Name
+/// let yellow = Color::Yellow;
+/// assert_eq!(String::from("Yellow"), yellow.to_string());
+/// // or for string formatting
+/// println!(
+///     "blue: {} green: {}",
+///     Color::Blue(10),
+///     Color::Green { range: 42 }
+/// );
+/// ```
+#[proc_macro_derive(Display, attributes(strum))]
+pub fn display(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
+    let ast = syn::parse_macro_input!(input as DeriveInput);
+
+    let toks = macros::display::display_inner(&ast).unwrap_or_else(|err| err.to_compile_error());
+    debug_print_generated(&ast, &toks);
+    toks.into()
+}
+
+/// Creates a new type that iterates of the variants of an enum.
+///
+/// Iterate over the variants of an Enum. Any additional data on your variants will be set to `Default::default()`.
+/// The macro implements `strum::IntoEnumIterator` on your enum and creates a new type called `YourEnumIter` that is the iterator object.
+/// You cannot derive `EnumIter` on any type with a lifetime bound (`<'a>`) because the iterator would surely
+/// create [unbounded lifetimes](https://doc.rust-lang.org/nightly/nomicon/unbounded-lifetimes.html).
+///
+/// ```
+///
+/// // You need to bring the trait into scope to use it!
+/// use strum::IntoEnumIterator;
+/// use strum_macros::EnumIter;
+///
+/// #[derive(EnumIter, Debug, PartialEq)]
+/// enum Color {
+///     Red,
+///     Green { range: usize },
+///     Blue(usize),
+///     Yellow,
+/// }
+///
+/// // It's simple to iterate over the variants of an enum.
+/// for color in Color::iter() {
+///     println!("My favorite color is {:?}", color);
+/// }
+///
+/// let mut ci = Color::iter();
+/// assert_eq!(Some(Color::Red), ci.next());
+/// assert_eq!(Some(Color::Green {range: 0}), ci.next());
+/// assert_eq!(Some(Color::Blue(0)), ci.next());
+/// assert_eq!(Some(Color::Yellow), ci.next());
+/// assert_eq!(None, ci.next());
+/// ```
+#[proc_macro_derive(EnumIter, attributes(strum))]
+pub fn enum_iter(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
+    let ast = syn::parse_macro_input!(input as DeriveInput);
+
+    let toks =
+        macros::enum_iter::enum_iter_inner(&ast).unwrap_or_else(|err| err.to_compile_error());
+    debug_print_generated(&ast, &toks);
+    toks.into()
+}
+
+/// Generated `is_*()` methods for each variant.
+/// E.g. `Color.is_red()`.
+///
+/// ```
+///
+/// use strum_macros::EnumIs;
+///
+/// #[derive(EnumIs, Debug)]
+/// enum Color {
+///     Red,
+///     Green { range: usize },
+/// }
+///
+/// assert!(Color::Red.is_red());
+/// assert!(Color::Green{range: 0}.is_green());
+/// ```
+#[proc_macro_derive(EnumIs, attributes(strum))]
+pub fn enum_is(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
+    let ast = syn::parse_macro_input!(input as DeriveInput);
+
+    let toks = macros::enum_is::enum_is_inner(&ast).unwrap_or_else(|err| err.to_compile_error());
+    debug_print_generated(&ast, &toks);
+    toks.into()
+}
+
+/// Generated `try_as_*()` methods for all tuple-style variants.
+/// E.g. `Message.try_as_write()`.
+///
+/// These methods will only be generated for tuple-style variants, not for named or unit variants.
+///
+/// ```
+/// use strum_macros::EnumTryAs;
+///
+/// #[derive(EnumTryAs, Debug)]
+/// enum Message {
+///     Quit,
+///     Move { x: i32, y: i32 },
+///     Write(String),
+///     ChangeColor(i32, i32, i32),
+/// }
+///
+/// assert_eq!(
+///     Message::Write(String::from("Hello")).try_as_write(),
+///     Some(String::from("Hello"))
+/// );
+/// assert_eq!(
+///     Message::ChangeColor(1, 2, 3).try_as_change_color(),
+///     Some((1, 2, 3))
+/// );
+/// ```
+#[proc_macro_derive(EnumTryAs, attributes(strum))]
+pub fn enum_try_as(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
+    let ast = syn::parse_macro_input!(input as DeriveInput);
+
+    let toks =
+        macros::enum_try_as::enum_try_as_inner(&ast).unwrap_or_else(|err| err.to_compile_error());
+    debug_print_generated(&ast, &toks);
+    toks.into()
+}
+
+/// Add a function to enum that allows accessing variants by its discriminant
+///
+/// This macro adds a standalone function to obtain an enum variant by its discriminant. The macro adds
+/// `from_repr(discriminant: usize) -> Option<YourEnum>` as a standalone function on the enum. For
+/// variants with additional data, the returned variant will use the `Default` trait to fill the
+/// data. The discriminant follows the same rules as `rustc`. The first discriminant is zero and each
+/// successive variant has a discriminant of one greater than the previous variant, except where an
+/// explicit discriminant is specified. The type of the discriminant will match the `repr` type if
+/// it is specifed.
+///
+/// When the macro is applied using rustc >= 1.46 and when there is no additional data on any of
+/// the variants, the `from_repr` function is marked `const`. rustc >= 1.46 is required
+/// to allow `match` statements in `const fn`. The no additional data requirement is due to the
+/// inability to use `Default::default()` in a `const fn`.
+///
+/// You cannot derive `FromRepr` on any type with a lifetime bound (`<'a>`) because the function would surely
+/// create [unbounded lifetimes](https://doc.rust-lang.org/nightly/nomicon/unbounded-lifetimes.html).
+///
+/// ```
+///
+/// use strum_macros::FromRepr;
+///
+/// #[derive(FromRepr, Debug, PartialEq)]
+/// enum Color {
+///     Red,
+///     Green { range: usize },
+///     Blue(usize),
+///     Yellow,
+/// }
+///
+/// assert_eq!(Some(Color::Red), Color::from_repr(0));
+/// assert_eq!(Some(Color::Green {range: 0}), Color::from_repr(1));
+/// assert_eq!(Some(Color::Blue(0)), Color::from_repr(2));
+/// assert_eq!(Some(Color::Yellow), Color::from_repr(3));
+/// assert_eq!(None, Color::from_repr(4));
+///
+/// // Custom discriminant tests
+/// #[derive(FromRepr, Debug, PartialEq)]
+/// #[repr(u8)]
+/// enum Vehicle {
+///     Car = 1,
+///     Truck = 3,
+/// }
+///
+/// assert_eq!(None, Vehicle::from_repr(0));
+/// ```
+///
+/// On versions of rust >= 1.46, the `from_repr` function is marked `const`.
+///
+/// ```rust
+/// use strum_macros::FromRepr;
+///
+/// #[derive(FromRepr, Debug, PartialEq)]
+/// #[repr(u8)]
+/// enum Number {
+///     One = 1,
+///     Three = 3,
+/// }
+///
+/// # #[rustversion::since(1.46)]
+/// const fn number_from_repr(d: u8) -> Option<Number> {
+///     Number::from_repr(d)
+/// }
+///
+/// # #[rustversion::before(1.46)]
+/// # fn number_from_repr(d: u8) -> Option<Number> {
+/// #     Number::from_repr(d)
+/// # }
+/// assert_eq!(None, number_from_repr(0));
+/// assert_eq!(Some(Number::One), number_from_repr(1));
+/// assert_eq!(None, number_from_repr(2));
+/// assert_eq!(Some(Number::Three), number_from_repr(3));
+/// assert_eq!(None, number_from_repr(4));
+/// ```
+
+#[proc_macro_derive(FromRepr, attributes(strum))]
+pub fn from_repr(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
+    let ast = syn::parse_macro_input!(input as DeriveInput);
+
+    let toks =
+        macros::from_repr::from_repr_inner(&ast).unwrap_or_else(|err| err.to_compile_error());
+    debug_print_generated(&ast, &toks);
+    toks.into()
+}
+
+/// Add a verbose message to an enum variant.
+///
+/// Encode strings into the enum itself. The `strum_macros::EmumMessage` macro implements the `strum::EnumMessage` trait.
+/// `EnumMessage` looks for `#[strum(message="...")]` attributes on your variants.
+/// You can also provided a `detailed_message="..."` attribute to create a seperate more detailed message than the first.
+///
+/// `EnumMessage` also exposes the variants doc comments through `get_documentation()`. This is useful in some scenarios,
+/// but `get_message` should generally be preferred. Rust doc comments are intended for developer facing documentation,
+/// not end user messaging.
+///
+/// ```
+/// // You need to bring the trait into scope to use it
+/// use strum::EnumMessage;
+/// use strum_macros;
+///
+/// #[derive(strum_macros::EnumMessage, Debug)]
+/// #[allow(dead_code)]
+/// enum Color {
+///     /// Danger color.
+///     #[strum(message = "Red", detailed_message = "This is very red")]
+///     Red,
+///     #[strum(message = "Simply Green")]
+///     Green { range: usize },
+///     #[strum(serialize = "b", serialize = "blue")]
+///     Blue(usize),
+/// }
+///
+/// // Generated code looks like more or less like this:
+/// /*
+/// impl ::strum::EnumMessage for Color {
+///     fn get_message(&self) -> ::core::option::Option<&'static str> {
+///         match self {
+///             &Color::Red => ::core::option::Option::Some("Red"),
+///             &Color::Green {..} => ::core::option::Option::Some("Simply Green"),
+///             _ => None
+///         }
+///     }
+///
+///     fn get_detailed_message(&self) -> ::core::option::Option<&'static str> {
+///         match self {
+///             &Color::Red => ::core::option::Option::Some("This is very red"),
+///             &Color::Green {..}=> ::core::option::Option::Some("Simply Green"),
+///             _ => None
+///         }
+///     }
+///
+///     fn get_documentation(&self) -> ::std::option::Option<&'static str> {
+///         match self {
+///             &Color::Red => ::std::option::Option::Some("Danger color."),
+///             _ => None
+///         }
+///     }
+///
+///     fn get_serializations(&self) -> &'static [&'static str] {
+///         match self {
+///             &Color::Red => {
+///                 static ARR: [&'static str; 1] = ["Red"];
+///                 &ARR
+///             },
+///             &Color::Green {..}=> {
+///                 static ARR: [&'static str; 1] = ["Green"];
+///                 &ARR
+///             },
+///             &Color::Blue (..) => {
+///                 static ARR: [&'static str; 2] = ["b", "blue"];
+///                 &ARR
+///             },
+///         }
+///     }
+/// }
+/// */
+///
+/// let c = Color::Red;
+/// assert_eq!("Red", c.get_message().unwrap());
+/// assert_eq!("This is very red", c.get_detailed_message().unwrap());
+/// assert_eq!("Danger color.", c.get_documentation().unwrap());
+/// assert_eq!(["Red"], c.get_serializations());
+/// ```
+#[proc_macro_derive(EnumMessage, attributes(strum))]
+pub fn enum_messages(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
+    let ast = syn::parse_macro_input!(input as DeriveInput);
+
+    let toks = macros::enum_messages::enum_message_inner(&ast)
+        .unwrap_or_else(|err| err.to_compile_error());
+    debug_print_generated(&ast, &toks);
+    toks.into()
+}
+
+/// Add custom properties to enum variants.
+///
+/// Enables the encoding of arbitary constants into enum variants. This method
+/// currently only supports adding additional string values. Other types of literals are still
+/// experimental in the rustc compiler. The generated code works by nesting match statements.
+/// The first match statement matches on the type of the enum, and the inner match statement
+/// matches on the name of the property requested. This design works well for enums with a small
+/// number of variants and properties, but scales linearly with the number of variants so may not
+/// be the best choice in all situations.
+///
+/// ```
+///
+/// use strum_macros;
+/// // bring the trait into scope
+/// use strum::EnumProperty;
+///
+/// #[derive(strum_macros::EnumProperty, Debug)]
+/// #[allow(dead_code)]
+/// enum Color {
+///     #[strum(props(Red = "255", Blue = "255", Green = "255"))]
+///     White,
+///     #[strum(props(Red = "0", Blue = "0", Green = "0"))]
+///     Black,
+///     #[strum(props(Red = "0", Blue = "255", Green = "0"))]
+///     Blue,
+///     #[strum(props(Red = "255", Blue = "0", Green = "0"))]
+///     Red,
+///     #[strum(props(Red = "0", Blue = "0", Green = "255"))]
+///     Green,
+/// }
+///
+/// let my_color = Color::Red;
+/// let display = format!(
+///     "My color is {:?}. It's RGB is {},{},{}",
+///     my_color,
+///     my_color.get_str("Red").unwrap(),
+///     my_color.get_str("Green").unwrap(),
+///     my_color.get_str("Blue").unwrap()
+/// );
+/// assert_eq!("My color is Red. It\'s RGB is 255,0,0", &display);
+/// ```
+
+#[proc_macro_derive(EnumProperty, attributes(strum))]
+pub fn enum_properties(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
+    let ast = syn::parse_macro_input!(input as DeriveInput);
+
+    let toks = macros::enum_properties::enum_properties_inner(&ast)
+        .unwrap_or_else(|err| err.to_compile_error());
+    debug_print_generated(&ast, &toks);
+    toks.into()
+}
+
+/// Generate a new type with only the discriminant names.
+///
+/// Given an enum named `MyEnum`, generates another enum called `MyEnumDiscriminants` with the same
+/// variants but without any data fields. This is useful when you wish to determine the variant of
+/// an `enum` but one or more of the variants contains a non-`Default` field. `From`
+/// implementations are generated so that you can easily convert from `MyEnum` to
+/// `MyEnumDiscriminants`.
+///
+/// By default, the generated enum has the following derives: `Clone, Copy, Debug, PartialEq, Eq`.
+/// You can add additional derives using the `#[strum_discriminants(derive(AdditionalDerive))]`
+/// attribute.
+///
+/// Note, the variant attributes passed to the discriminant enum are filtered to avoid compilation
+/// errors due to the derives mismatches, thus only `#[doc]`, `#[cfg]`, `#[allow]`, and `#[deny]`
+/// are passed through by default. If you want to specify a custom attribute on the discriminant
+/// variant, wrap it with `#[strum_discriminants(...)]` attribute.
+///
+/// ```
+/// // Bring trait into scope
+/// use std::str::FromStr;
+/// use strum::{IntoEnumIterator, EnumMessage};
+/// use strum_macros::{EnumDiscriminants, EnumIter, EnumString, EnumMessage};
+///
+/// #[derive(Debug)]
+/// struct NonDefault;
+///
+/// // simple example
+/// # #[allow(dead_code)]
+/// #[derive(Debug, EnumDiscriminants)]
+/// #[strum_discriminants(derive(EnumString, EnumMessage))]
+/// enum MyEnum {
+///     #[strum_discriminants(strum(message = "Variant zero"))]
+///     Variant0(NonDefault),
+///     Variant1 { a: NonDefault },
+/// }
+///
+/// // You can rename the generated enum using the `#[strum_discriminants(name(OtherName))]` attribute:
+/// # #[allow(dead_code)]
+/// #[derive(Debug, EnumDiscriminants)]
+/// #[strum_discriminants(derive(EnumIter))]
+/// #[strum_discriminants(name(MyVariants))]
+/// enum MyEnumR {
+///     Variant0(bool),
+///     Variant1 { a: bool },
+/// }
+///
+/// // test simple example
+/// assert_eq!(
+///     MyEnumDiscriminants::Variant0,
+///     MyEnumDiscriminants::from_str("Variant0").unwrap()
+/// );
+/// // test rename example combined with EnumIter
+/// assert_eq!(
+///     vec![MyVariants::Variant0, MyVariants::Variant1],
+///     MyVariants::iter().collect::<Vec<_>>()
+/// );
+///
+/// // Make use of the auto-From conversion to check whether an instance of `MyEnum` matches a
+/// // `MyEnumDiscriminants` discriminant.
+/// assert_eq!(
+///     MyEnumDiscriminants::Variant0,
+///     MyEnum::Variant0(NonDefault).into()
+/// );
+/// assert_eq!(
+///     MyEnumDiscriminants::Variant0,
+///     MyEnumDiscriminants::from(MyEnum::Variant0(NonDefault))
+/// );
+///
+/// // Make use of the EnumMessage on the `MyEnumDiscriminants` discriminant.
+/// assert_eq!(
+///     MyEnumDiscriminants::Variant0.get_message(),
+///     Some("Variant zero")
+/// );
+/// ```
+///
+/// It is also possible to specify the visibility (e.g. `pub`/`pub(crate)`/etc.)
+/// of the generated enum. By default, the generated enum inherits the
+/// visibility of the parent enum it was generated from.
+///
+/// ```
+/// use strum_macros::EnumDiscriminants;
+///
+/// // You can set the visibility of the generated enum using the `#[strum_discriminants(vis(..))]` attribute:
+/// mod inner {
+///     use strum_macros::EnumDiscriminants;
+///
+///     # #[allow(dead_code)]
+///     #[derive(Debug, EnumDiscriminants)]
+///     #[strum_discriminants(vis(pub))]
+///     #[strum_discriminants(name(PubDiscriminants))]
+///     enum PrivateEnum {
+///         Variant0(bool),
+///         Variant1 { a: bool },
+///     }
+/// }
+///
+/// // test visibility example, `PrivateEnum` should not be accessible here
+/// assert_ne!(
+///     inner::PubDiscriminants::Variant0,
+///     inner::PubDiscriminants::Variant1,
+/// );
+/// ```
+#[proc_macro_derive(EnumDiscriminants, attributes(strum, strum_discriminants))]
+pub fn enum_discriminants(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
+    let ast = syn::parse_macro_input!(input as DeriveInput);
+
+    let toks = macros::enum_discriminants::enum_discriminants_inner(&ast)
+        .unwrap_or_else(|err| err.to_compile_error());
+    debug_print_generated(&ast, &toks);
+    toks.into()
+}
+
+/// Add a constant `usize` equal to the number of variants.
+///
+/// For a given enum generates implementation of `strum::EnumCount`,
+/// which adds a static property `COUNT` of type usize that holds the number of variants.
+///
+/// ```
+/// use strum::{EnumCount, IntoEnumIterator};
+/// use strum_macros::{EnumCount as EnumCountMacro, EnumIter};
+///
+/// #[derive(Debug, EnumCountMacro, EnumIter)]
+/// enum Week {
+///     Sunday,
+///     Monday,
+///     Tuesday,
+///     Wednesday,
+///     Thursday,
+///     Friday,
+///     Saturday,
+/// }
+///
+/// assert_eq!(7, Week::COUNT);
+/// assert_eq!(Week::iter().count(), Week::COUNT);
+///
+/// ```
+#[proc_macro_derive(EnumCount, attributes(strum))]
+pub fn enum_count(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
+    let ast = syn::parse_macro_input!(input as DeriveInput);
+    let toks =
+        macros::enum_count::enum_count_inner(&ast).unwrap_or_else(|err| err.to_compile_error());
+    debug_print_generated(&ast, &toks);
+    toks.into()
+}
diff --git a/crates/strum_macros/src/macros/enum_count.rs b/crates/strum_macros/src/macros/enum_count.rs
new file mode 100644
index 0000000..b9e6aaa
--- /dev/null
+++ b/crates/strum_macros/src/macros/enum_count.rs
@@ -0,0 +1,34 @@
+use proc_macro2::TokenStream;
+use quote::quote;
+use syn::{Data, DeriveInput};
+
+use crate::helpers::variant_props::HasStrumVariantProperties;
+use crate::helpers::{non_enum_error, HasTypeProperties};
+
+pub(crate) fn enum_count_inner(ast: &DeriveInput) -> syn::Result<TokenStream> {
+    let n = match &ast.data {
+        Data::Enum(v) => v.variants.iter().try_fold(0usize, |acc, v| {
+            if v.get_variant_properties()?.disabled.is_none() {
+                Ok::<usize, syn::Error>(acc + 1usize)
+            } else {
+                Ok::<usize, syn::Error>(acc)
+            }
+        })?,
+        _ => return Err(non_enum_error()),
+    };
+    let type_properties = ast.get_type_properties()?;
+    let strum_module_path = type_properties.crate_module_path();
+
+    // Used in the quasi-quotation below as `#name`
+    let name = &ast.ident;
+
+    // Helper is provided for handling complex generic types correctly and effortlessly
+    let (impl_generics, ty_generics, where_clause) = ast.generics.split_for_impl();
+
+    Ok(quote! {
+        // Implementation
+        impl #impl_generics #strum_module_path::EnumCount for #name #ty_generics #where_clause {
+            const COUNT: usize = #n;
+        }
+    })
+}
diff --git a/crates/strum_macros/src/macros/enum_discriminants.rs b/crates/strum_macros/src/macros/enum_discriminants.rs
new file mode 100644
index 0000000..4e54a30
--- /dev/null
+++ b/crates/strum_macros/src/macros/enum_discriminants.rs
@@ -0,0 +1,164 @@
+use proc_macro2::{Span, TokenStream, TokenTree};
+use quote::{quote, ToTokens};
+use syn::parse_quote;
+use syn::{Data, DeriveInput, Fields};
+
+use crate::helpers::{non_enum_error, strum_discriminants_passthrough_error, HasTypeProperties};
+
+/// Attributes to copy from the main enum's variants to the discriminant enum's variants.
+///
+/// Attributes not in this list may be for other `proc_macro`s on the main enum, and may cause
+/// compilation problems when copied across.
+const ATTRIBUTES_TO_COPY: &[&str] = &["doc", "cfg", "allow", "deny", "strum_discriminants"];
+
+pub fn enum_discriminants_inner(ast: &DeriveInput) -> syn::Result<TokenStream> {
+    let name = &ast.ident;
+    let vis = &ast.vis;
+
+    let variants = match &ast.data {
+        Data::Enum(v) => &v.variants,
+        _ => return Err(non_enum_error()),
+    };
+
+    // Derives for the generated enum
+    let type_properties = ast.get_type_properties()?;
+
+    let derives = type_properties.discriminant_derives;
+
+    let derives = quote! {
+        #[derive(Clone, Copy, Debug, PartialEq, Eq, #(#derives),*)]
+    };
+
+    // Work out the name
+    let default_name = syn::Ident::new(&format!("{}Discriminants", name), Span::call_site());
+
+    let discriminants_name = type_properties.discriminant_name.unwrap_or(default_name);
+    let discriminants_vis = type_properties
+        .discriminant_vis
+        .unwrap_or_else(|| vis.clone());
+
+    // Pass through all other attributes
+    let pass_though_attributes = type_properties.discriminant_others;
+
+    // Add the variants without fields, but exclude the `strum` meta item
+    let mut discriminants = Vec::new();
+    for variant in variants {
+        let ident = &variant.ident;
+
+        // Don't copy across the "strum" meta attribute. Only passthrough the whitelisted
+        // attributes and proxy `#[strum_discriminants(...)]` attributes
+        let attrs = variant
+            .attrs
+            .iter()
+            .filter(|attr| {
+                ATTRIBUTES_TO_COPY
+                    .iter()
+                    .any(|attr_whitelisted| attr.path().is_ident(attr_whitelisted))
+            })
+            .map(|attr| {
+                if attr.path().is_ident("strum_discriminants") {
+                    let mut ts = attr.meta.require_list()?.to_token_stream().into_iter();
+
+                    // Discard strum_discriminants(...)
+                    let _ = ts.next();
+
+                    let passthrough_group = ts
+                        .next()
+                        .ok_or_else(|| strum_discriminants_passthrough_error(attr))?;
+                    let passthrough_attribute = match passthrough_group {
+                        TokenTree::Group(ref group) => group.stream(),
+                        _ => {
+                            return Err(strum_discriminants_passthrough_error(&passthrough_group));
+                        }
+                    };
+                    if passthrough_attribute.is_empty() {
+                        return Err(strum_discriminants_passthrough_error(&passthrough_group));
+                    }
+                    Ok(quote! { #[#passthrough_attribute] })
+                } else {
+                    Ok(attr.to_token_stream())
+                }
+            })
+            .collect::<Result<Vec<_>, _>>()?;
+
+        discriminants.push(quote! { #(#attrs)* #ident });
+    }
+
+    // Ideally:
+    //
+    // * For `Copy` types, we `impl From<TheEnum> for TheEnumDiscriminants`
+    // * For `!Copy` types, we `impl<'enum> From<&'enum TheEnum> for TheEnumDiscriminants`
+    //
+    // That way we ensure users are not able to pass a `Copy` type by reference. However, the
+    // `#[derive(..)]` attributes are not in the parsed tokens, so we are not able to check if a
+    // type is `Copy`, so we just implement both.
+    //
+    // See <https://github.com/dtolnay/syn/issues/433>
+    // ---
+    // let is_copy = unique_meta_list(type_meta.iter(), "derive")
+    //     .map(extract_list_metas)
+    //     .map(|metas| {
+    //         metas
+    //             .filter_map(get_meta_ident)
+    //             .any(|derive| derive.to_string() == "Copy")
+    //     }).unwrap_or(false);
+
+    let arms = variants
+        .iter()
+        .map(|variant| {
+            let ident = &variant.ident;
+            let params = match &variant.fields {
+                Fields::Unit => quote! {},
+                Fields::Unnamed(_fields) => {
+                    quote! { (..) }
+                }
+                Fields::Named(_fields) => {
+                    quote! { { .. } }
+                }
+            };
+
+            quote! { #name::#ident #params => #discriminants_name::#ident }
+        })
+        .collect::<Vec<_>>();
+
+    let from_fn_body = quote! { match val { #(#arms),* } };
+
+    let (impl_generics, ty_generics, where_clause) = ast.generics.split_for_impl();
+    let impl_from = quote! {
+        impl #impl_generics ::core::convert::From< #name #ty_generics > for #discriminants_name #where_clause {
+            fn from(val: #name #ty_generics) -> #discriminants_name {
+                #from_fn_body
+            }
+        }
+    };
+    let impl_from_ref = {
+        let mut generics = ast.generics.clone();
+
+        let lifetime = parse_quote!('_enum);
+        let enum_life = quote! { & #lifetime };
+        generics.params.push(lifetime);
+
+        // Shadows the earlier `impl_generics`
+        let (impl_generics, _, _) = generics.split_for_impl();
+
+        quote! {
+            impl #impl_generics ::core::convert::From< #enum_life #name #ty_generics > for #discriminants_name #where_clause {
+                fn from(val: #enum_life #name #ty_generics) -> #discriminants_name {
+                    #from_fn_body
+                }
+            }
+        }
+    };
+
+    Ok(quote! {
+        /// Auto-generated discriminant enum variants
+        #derives
+        #(#[ #pass_though_attributes ])*
+        #discriminants_vis enum #discriminants_name {
+            #(#discriminants),*
+        }
+
+        #impl_from
+        #impl_from_ref
+    })
+}
diff --git a/crates/strum_macros/src/macros/enum_is.rs b/crates/strum_macros/src/macros/enum_is.rs
new file mode 100644
index 0000000..ecada45
--- /dev/null
+++ b/crates/strum_macros/src/macros/enum_is.rs
@@ -0,0 +1,44 @@
+use crate::helpers::{non_enum_error, snakify, HasStrumVariantProperties};
+use proc_macro2::TokenStream;
+use quote::{format_ident, quote};
+use syn::{Data, DeriveInput};
+
+pub fn enum_is_inner(ast: &DeriveInput) -> syn::Result<TokenStream> {
+    let variants = match &ast.data {
+        Data::Enum(v) => &v.variants,
+        _ => return Err(non_enum_error()),
+    };
+    let (impl_generics, ty_generics, where_clause) = ast.generics.split_for_impl();
+
+    let enum_name = &ast.ident;
+    let variants: Vec<_> = variants
+        .iter()
+        .filter_map(|variant| {
+            if variant.get_variant_properties().ok()?.disabled.is_some() {
+                return None;
+            }
+
+            let variant_name = &variant.ident;
+            let fn_name = format_ident!("is_{}", snakify(&variant_name.to_string()));
+            let doc_comment = format!("Returns [true] if the enum is [{}::{}] otherwise [false]", enum_name, variant_name);
+            Some(quote! {
+                #[must_use]
+                #[inline]
+                #[doc = #doc_comment]
+                pub const fn #fn_name(&self) -> bool {
+                    match self {
+                        &#enum_name::#variant_name { .. } => true,
+                        _ => false
+                    }
+                }
+            })
+        })
+        .collect();
+
+    Ok(quote! {
+        impl #impl_generics #enum_name  #ty_generics #where_clause {
+            #(#variants)*
+        }
+    }
+    .into())
+}
diff --git a/crates/strum_macros/src/macros/enum_iter.rs b/crates/strum_macros/src/macros/enum_iter.rs
new file mode 100644
index 0000000..0e700aa
--- /dev/null
+++ b/crates/strum_macros/src/macros/enum_iter.rs
@@ -0,0 +1,172 @@
+use proc_macro2::{Span, TokenStream};
+use quote::quote;
+use syn::{Data, DeriveInput, Fields, Ident};
+
+use crate::helpers::{non_enum_error, HasStrumVariantProperties, HasTypeProperties};
+
+pub fn enum_iter_inner(ast: &DeriveInput) -> syn::Result<TokenStream> {
+    let name = &ast.ident;
+    let gen = &ast.generics;
+    let (impl_generics, ty_generics, where_clause) = gen.split_for_impl();
+    let vis = &ast.vis;
+    let type_properties = ast.get_type_properties()?;
+    let strum_module_path = type_properties.crate_module_path();
+    let doc_comment = format!("An iterator over the variants of [{}]", name);
+
+    if gen.lifetimes().count() > 0 {
+        return Err(syn::Error::new(
+            Span::call_site(),
+            "This macro doesn't support enums with lifetimes. \
+             The resulting enums would be unbounded.",
+        ));
+    }
+
+    let phantom_data = if gen.type_params().count() > 0 {
+        let g = gen.type_params().map(|param| &param.ident);
+        quote! { < ( #(#g),* ) > }
+    } else {
+        quote! { < () > }
+    };
+
+    let variants = match &ast.data {
+        Data::Enum(v) => &v.variants,
+        _ => return Err(non_enum_error()),
+    };
+
+    let mut arms = Vec::new();
+    let mut idx = 0usize;
+    for variant in variants {
+        if variant.get_variant_properties()?.disabled.is_some() {
+            continue;
+        }
+
+        let ident = &variant.ident;
+        let params = match &variant.fields {
+            Fields::Unit => quote! {},
+            Fields::Unnamed(fields) => {
+                let defaults = ::core::iter::repeat(quote!(::core::default::Default::default()))
+                    .take(fields.unnamed.len());
+                quote! { (#(#defaults),*) }
+            }
+            Fields::Named(fields) => {
+                let fields = fields
+                    .named
+                    .iter()
+                    .map(|field| field.ident.as_ref().unwrap());
+                quote! { {#(#fields: ::core::default::Default::default()),*} }
+            }
+        };
+
+        arms.push(quote! {#idx => ::core::option::Option::Some(#name::#ident #params)});
+        idx += 1;
+    }
+
+    let variant_count = arms.len();
+    arms.push(quote! { _ => ::core::option::Option::None });
+    let iter_name = syn::parse_str::<Ident>(&format!("{}Iter", name)).unwrap();
+
+    // Create a string literal "MyEnumIter" to use in the debug impl.
+    let iter_name_debug_struct =
+        syn::parse_str::<syn::LitStr>(&format!("\"{}\"", iter_name)).unwrap();
+
+    Ok(quote! {
+        #[doc = #doc_comment]
+        #[allow(
+            missing_copy_implementations,
+        )]
+        #vis struct #iter_name #ty_generics {
+            idx: usize,
+            back_idx: usize,
+            marker: ::core::marker::PhantomData #phantom_data,
+        }
+
+        impl #impl_generics ::core::fmt::Debug for #iter_name #ty_generics #where_clause {
+            fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result {
+                // We don't know if the variants implement debug themselves so the only thing we
+                // can really show is how many elements are left.
+                f.debug_struct(#iter_name_debug_struct)
+                    .field("len", &self.len())
+                    .finish()
+            }
+        }
+
+        impl #impl_generics #iter_name #ty_generics #where_clause {
+            fn get(&self, idx: usize) -> ::core::option::Option<#name #ty_generics> {
+                match idx {
+                    #(#arms),*
+                }
+            }
+        }
+
+        impl #impl_generics #strum_module_path::IntoEnumIterator for #name #ty_generics #where_clause {
+            type Iterator = #iter_name #ty_generics;
+            fn iter() -> #iter_name #ty_generics {
+                #iter_name {
+                    idx: 0,
+                    back_idx: 0,
+                    marker: ::core::marker::PhantomData,
+                }
+            }
+        }
+
+        impl #impl_generics Iterator for #iter_name #ty_generics #where_clause {
+            type Item = #name #ty_generics;
+
+            fn next(&mut self) -> ::core::option::Option<<Self as Iterator>::Item> {
+                self.nth(0)
+            }
+
+            fn size_hint(&self) -> (usize, ::core::option::Option<usize>) {
+                let t = if self.idx + self.back_idx >= #variant_count { 0 } else { #variant_count - self.idx - self.back_idx };
+                (t, Some(t))
+            }
+
+            fn nth(&mut self, n: usize) -> ::core::option::Option<<Self as Iterator>::Item> {
+                let idx = self.idx + n + 1;
+                if idx + self.back_idx > #variant_count {
+                    // We went past the end of the iterator. Freeze idx at #variant_count
+                    // so that it doesn't overflow if the user calls this repeatedly.
+                    // See PR #76 for context.
+                    self.idx = #variant_count;
+                    ::core::option::Option::None
+                } else {
+                    self.idx = idx;
+                    self.get(idx - 1)
+                }
+            }
+        }
+
+        impl #impl_generics ExactSizeIterator for #iter_name #ty_generics #where_clause {
+            fn len(&self) -> usize {
+                self.size_hint().0
+            }
+        }
+
+        impl #impl_generics DoubleEndedIterator for #iter_name #ty_generics #where_clause {
+            fn next_back(&mut self) -> ::core::option::Option<<Self as Iterator>::Item> {
+                let back_idx = self.back_idx + 1;
+
+                if self.idx + back_idx > #variant_count {
+                    // We went past the end of the iterator. Freeze back_idx at #variant_count
+                    // so that it doesn't overflow if the user calls this repeatedly.
+                    // See PR #76 for context.
+                    self.back_idx = #variant_count;
+                    ::core::option::Option::None
+                } else {
+                    self.back_idx = back_idx;
+                    self.get(#variant_count - self.back_idx)
+                }
+            }
+        }
+
+        impl #impl_generics Clone for #iter_name #ty_generics #where_clause {
+            fn clone(&self) -> #iter_name #ty_generics {
+                #iter_name {
+                    idx: self.idx,
+                    back_idx: self.back_idx,
+                    marker: self.marker.clone(),
+                }
+            }
+        }
+    })
+}
diff --git a/crates/strum_macros/src/macros/enum_messages.rs b/crates/strum_macros/src/macros/enum_messages.rs
new file mode 100644
index 0000000..c056108
--- /dev/null
+++ b/crates/strum_macros/src/macros/enum_messages.rs
@@ -0,0 +1,138 @@
+use proc_macro2::TokenStream;
+use quote::quote;
+use syn::{Data, DeriveInput, Fields, LitStr};
+
+use crate::helpers::{non_enum_error, HasStrumVariantProperties, HasTypeProperties};
+
+pub fn enum_message_inner(ast: &DeriveInput) -> syn::Result<TokenStream> {
+    let name = &ast.ident;
+    let (impl_generics, ty_generics, where_clause) = ast.generics.split_for_impl();
+    let variants = match &ast.data {
+        Data::Enum(v) => &v.variants,
+        _ => return Err(non_enum_error()),
+    };
+
+    let type_properties = ast.get_type_properties()?;
+    let strum_module_path = type_properties.crate_module_path();
+
+    let mut arms = Vec::new();
+    let mut detailed_arms = Vec::new();
+    let mut documentation_arms = Vec::new();
+    let mut serializations = Vec::new();
+
+    for variant in variants {
+        let variant_properties = variant.get_variant_properties()?;
+        let messages = variant_properties.message.as_ref();
+        let detailed_messages = variant_properties.detailed_message.as_ref();
+        let documentation = &variant_properties.documentation;
+        let ident = &variant.ident;
+
+        let params = match variant.fields {
+            Fields::Unit => quote! {},
+            Fields::Unnamed(..) => quote! { (..) },
+            Fields::Named(..) => quote! { {..} },
+        };
+
+        // You can't disable getting the serializations.
+        {
+            let serialization_variants =
+                variant_properties.get_serializations(type_properties.case_style);
+
+            let count = serialization_variants.len();
+            serializations.push(quote! {
+                &#name::#ident #params => {
+                    static ARR: [&'static str; #count] = [#(#serialization_variants),*];
+                    &ARR
+                }
+            });
+        }
+
+        // But you can disable the messages.
+        if variant_properties.disabled.is_some() {
+            continue;
+        }
+
+        if let Some(msg) = messages {
+            let params = params.clone();
+
+            // Push the simple message.
+            let tokens = quote! { &#name::#ident #params => ::core::option::Option::Some(#msg) };
+            arms.push(tokens.clone());
+
+            if detailed_messages.is_none() {
+                detailed_arms.push(tokens);
+            }
+        }
+
+        if let Some(msg) = detailed_messages {
+            let params = params.clone();
+            // Push the detailed message.
+            detailed_arms
+                .push(quote! { &#name::#ident #params => ::core::option::Option::Some(#msg) });
+        }
+
+        if !documentation.is_empty() {
+            let params = params.clone();
+            // Strip a single leading space from each documentation line.
+            let documentation: Vec<LitStr> = documentation.iter().map(|lit_str| {
+                let line = lit_str.value();
+                if line.starts_with(' ') {
+                    LitStr::new(&line.as_str()[1..], lit_str.span())
+                } else {
+                    lit_str.clone()
+                }
+            }).collect();
+            if documentation.len() == 1 {
+                let text = &documentation[0];
+                documentation_arms
+                    .push(quote! { &#name::#ident #params => ::core::option::Option::Some(#text) });
+            } else {
+                // Push the documentation.
+                documentation_arms
+                    .push(quote! {
+                        &#name::#ident #params => ::core::option::Option::Some(concat!(#(concat!(#documentation, "\n")),*))
+                    });
+            }
+        }
+    }
+
+    if arms.len() < variants.len() {
+        arms.push(quote! { _ => ::core::option::Option::None });
+    }
+
+    if detailed_arms.len() < variants.len() {
+        detailed_arms.push(quote! { _ => ::core::option::Option::None });
+    }
+
+    if documentation_arms.len() < variants.len() {
+        documentation_arms.push(quote! { _ => ::core::option::Option::None });
+    }
+
+    Ok(quote! {
+        impl #impl_generics #strum_module_path::EnumMessage for #name #ty_generics #where_clause {
+            fn get_message(&self) -> ::core::option::Option<&'static str> {
+                match self {
+                    #(#arms),*
+                }
+            }
+
+            fn get_detailed_message(&self) -> ::core::option::Option<&'static str> {
+                match self {
+                    #(#detailed_arms),*
+                }
+            }
+
+            fn get_documentation(&self) -> ::core::option::Option<&'static str> {
+                match self {
+                    #(#documentation_arms),*
+                }
+            }
+
+            fn get_serializations(&self) -> &'static [&'static str] {
+                match self {
+                    #(#serializations),*
+                }
+            }
+        }
+    })
+}
diff --git a/crates/strum_macros/src/macros/enum_properties.rs b/crates/strum_macros/src/macros/enum_properties.rs
new file mode 100644
index 0000000..2583096
--- /dev/null
+++ b/crates/strum_macros/src/macros/enum_properties.rs
@@ -0,0 +1,61 @@
+use proc_macro2::TokenStream;
+use quote::quote;
+use syn::{Data, DeriveInput, Fields};
+
+use crate::helpers::{non_enum_error, HasStrumVariantProperties, HasTypeProperties};
+
+pub fn enum_properties_inner(ast: &DeriveInput) -> syn::Result<TokenStream> {
+    let name = &ast.ident;
+    let (impl_generics, ty_generics, where_clause) = ast.generics.split_for_impl();
+    let variants = match &ast.data {
+        Data::Enum(v) => &v.variants,
+        _ => return Err(non_enum_error()),
+    };
+    let type_properties = ast.get_type_properties()?;
+    let strum_module_path = type_properties.crate_module_path();
+
+    let mut arms = Vec::new();
+    for variant in variants {
+        let ident = &variant.ident;
+        let variant_properties = variant.get_variant_properties()?;
+        let mut string_arms = Vec::new();
+        // But you can disable the messages.
+        if variant_properties.disabled.is_some() {
+            continue;
+        }
+
+        let params = match variant.fields {
+            Fields::Unit => quote! {},
+            Fields::Unnamed(..) => quote! { (..) },
+            Fields::Named(..) => quote! { {..} },
+        };
+
+        for (key, value) in variant_properties.string_props {
+            string_arms.push(quote! { #key => ::core::option::Option::Some( #value )});
+        }
+
+        string_arms.push(quote! { _ => ::core::option::Option::None });
+
+        arms.push(quote! {
+            &#name::#ident #params => {
+                match prop {
+                    #(#string_arms),*
+                }
+            }
+        });
+    }
+
+    if arms.len() < variants.len() {
+        arms.push(quote! { _ => ::core::option::Option::None });
+    }
+
+    Ok(quote! {
+        impl #impl_generics #strum_module_path::EnumProperty for #name #ty_generics #where_clause {
+            fn get_str(&self, prop: &str) -> ::core::option::Option<&'static str> {
+                match self {
+                    #(#arms),*
+                }
+            }
+        }
+    })
+}
diff --git a/crates/strum_macros/src/macros/enum_try_as.rs b/crates/strum_macros/src/macros/enum_try_as.rs
new file mode 100644
index 0000000..088a1f1
--- /dev/null
+++ b/crates/strum_macros/src/macros/enum_try_as.rs
@@ -0,0 +1,80 @@
+use crate::helpers::{non_enum_error, snakify, HasStrumVariantProperties};
+use proc_macro2::TokenStream;
+use quote::{format_ident, quote, ToTokens};
+use syn::{Data, DeriveInput};
+
+pub fn enum_try_as_inner(ast: &DeriveInput) -> syn::Result<TokenStream> {
+    let variants = match &ast.data {
+        Data::Enum(v) => &v.variants,
+        _ => return Err(non_enum_error()),
+    };
+
+    let enum_name = &ast.ident;
+
+    let variants: Vec<_> = variants
+        .iter()
+        .filter_map(|variant| {
+            if variant.get_variant_properties().ok()?.disabled.is_some() {
+                return None;
+            }
+
+            match &variant.fields {
+                syn::Fields::Unnamed(values) => {
+                    let variant_name = &variant.ident;
+                    let types: Vec<_> = values.unnamed.iter().map(|field| {
+                        field.to_token_stream()
+                    }).collect();
+                    let field_names: Vec<_> = values.unnamed.iter().enumerate().map(|(i, _)| {
+                        let name = "x".repeat(i + 1);
+                        let name = format_ident!("{}", name);
+                        quote! {#name}
+                    }).collect();
+
+                    let move_fn_name = format_ident!("try_as_{}", snakify(&variant_name.to_string()));
+                    let ref_fn_name = format_ident!("try_as_{}_ref", snakify(&variant_name.to_string()));
+                    let mut_fn_name = format_ident!("try_as_{}_mut", snakify(&variant_name.to_string()));
+
+                    Some(quote! {
+                        #[must_use]
+                        #[inline]
+                        pub fn #move_fn_name(self) -> ::core::option::Option<(#(#types),*)> {
+                            match self {
+                                #enum_name::#variant_name (#(#field_names),*) => Some((#(#field_names),*)),
+                                _ => None
+                            }
+                        }
+
+                        #[must_use]
+                        #[inline]
+                        pub const fn #ref_fn_name(&self) -> ::core::option::Option<(#(&#types),*)> {
+                            match self {
+                                #enum_name::#variant_name (#(#field_names),*) => Some((#(#field_names),*)),
+                                _ => None
+                            }
+                        }
+
+                        #[must_use]
+                        #[inline]
+                        pub fn #mut_fn_name(&mut self) -> ::core::option::Option<(#(&mut #types),*)> {
+                            match self {
+                                #enum_name::#variant_name (#(#field_names),*) => Some((#(#field_names),*)),
+                                _ => None
+                            }
+                        }
+                    })
+                },
+                _ => {
+                    return None;
+                }
+            }
+
+        })
+        .collect();
+
+    Ok(quote! {
+        impl #enum_name {
+            #(#variants)*
+        }
+    }
+    .into())
+}
diff --git a/crates/strum_macros/src/macros/enum_variant_names.rs b/crates/strum_macros/src/macros/enum_variant_names.rs
new file mode 100644
index 0000000..c54d45d
--- /dev/null
+++ b/crates/strum_macros/src/macros/enum_variant_names.rs
@@ -0,0 +1,34 @@
+use proc_macro2::TokenStream;
+use quote::quote;
+use syn::{Data, DeriveInput};
+
+use crate::helpers::{non_enum_error, HasStrumVariantProperties, HasTypeProperties};
+
+pub fn enum_variant_names_inner(ast: &DeriveInput) -> syn::Result<TokenStream> {
+    let name = &ast.ident;
+    let gen = &ast.generics;
+    let (impl_generics, ty_generics, where_clause) = gen.split_for_impl();
+
+    let variants = match &ast.data {
+        Data::Enum(v) => &v.variants,
+        _ => return Err(non_enum_error()),
+    };
+
+    // Derives for the generated enum
+    let type_properties = ast.get_type_properties()?;
+    let strum_module_path = type_properties.crate_module_path();
+
+    let names = variants
+        .iter()
+        .map(|v| {
+            let props = v.get_variant_properties()?;
+            Ok(props.get_preferred_name(type_properties.case_style))
+        })
+        .collect::<syn::Result<Vec<_>>>()?;
+
+    Ok(quote! {
+        impl #impl_generics #strum_module_path::VariantNames for #name #ty_generics #where_clause {
+            const VARIANTS: &'static [&'static str] = &[ #(#names),* ];
+        }
+    })
+}
diff --git a/crates/strum_macros/src/macros/from_repr.rs b/crates/strum_macros/src/macros/from_repr.rs
new file mode 100644
index 0000000..92fd7ad
--- /dev/null
+++ b/crates/strum_macros/src/macros/from_repr.rs
@@ -0,0 +1,152 @@
+use heck::ToShoutySnakeCase;
+use proc_macro2::{Span, TokenStream};
+use quote::{format_ident, quote, ToTokens};
+use syn::{Data, DeriveInput, Fields, PathArguments, Type, TypeParen};
+
+use crate::helpers::{non_enum_error, HasStrumVariantProperties};
+
+pub fn from_repr_inner(ast: &DeriveInput) -> syn::Result<TokenStream> {
+    let name = &ast.ident;
+    let gen = &ast.generics;
+    let (impl_generics, ty_generics, where_clause) = gen.split_for_impl();
+    let vis = &ast.vis;
+    let attrs = &ast.attrs;
+
+    let mut discriminant_type: Type = syn::parse("usize".parse().unwrap()).unwrap();
+    for attr in attrs {
+        let path = attr.path();
+
+        let mut ts = if let Ok(ts) = attr
+            .meta
+            .require_list()
+            .map(|metas| metas.to_token_stream().into_iter())
+        {
+            ts
+        } else {
+            continue;
+        };
+        // Discard the path
+        let _ = ts.next();
+        let tokens: TokenStream = ts.collect();
+
+        if path.leading_colon.is_some() {
+            continue;
+        }
+        if path.segments.len() != 1 {
+            continue;
+        }
+        let segment = path.segments.first().unwrap();
+        if segment.ident != "repr" {
+            continue;
+        }
+        if segment.arguments != PathArguments::None {
+            continue;
+        }
+        let typ_paren = match syn::parse2::<Type>(tokens.clone()) {
+            Ok(Type::Paren(TypeParen { elem, .. })) => *elem,
+            _ => continue,
+        };
+        let inner_path = match &typ_paren {
+            Type::Path(t) => t,
+            _ => continue,
+        };
+        if let Some(seg) = inner_path.path.segments.last() {
+            for t in &[
+                "u8", "u16", "u32", "u64", "usize", "i8", "i16", "i32", "i64", "isize",
+            ] {
+                if seg.ident == t {
+                    discriminant_type = typ_paren;
+                    break;
+                }
+            }
+        }
+    }
+
+    if gen.lifetimes().count() > 0 {
+        return Err(syn::Error::new(
+            Span::call_site(),
+            "This macro doesn't support enums with lifetimes. \
+             The resulting enums would be unbounded.",
+        ));
+    }
+
+    let variants = match &ast.data {
+        Data::Enum(v) => &v.variants,
+        _ => return Err(non_enum_error()),
+    };
+
+    let mut arms = Vec::new();
+    let mut constant_defs = Vec::new();
+    let mut has_additional_data = false;
+    let mut prev_const_var_ident = None;
+    for variant in variants {
+        if variant.get_variant_properties()?.disabled.is_some() {
+            continue;
+        }
+
+        let ident = &variant.ident;
+        let params = match &variant.fields {
+            Fields::Unit => quote! {},
+            Fields::Unnamed(fields) => {
+                has_additional_data = true;
+                let defaults = ::core::iter::repeat(quote!(::core::default::Default::default()))
+                    .take(fields.unnamed.len());
+                quote! { (#(#defaults),*) }
+            }
+            Fields::Named(fields) => {
+                has_additional_data = true;
+                let fields = fields
+                    .named
+                    .iter()
+                    .map(|field| field.ident.as_ref().unwrap());
+                quote! { {#(#fields: ::core::default::Default::default()),*} }
+            }
+        };
+
+        let const_var_str = format!("{}_DISCRIMINANT", variant.ident).to_shouty_snake_case();
+        let const_var_ident = format_ident!("{}", const_var_str);
+
+        let const_val_expr = match &variant.discriminant {
+            Some((_, expr)) => quote! { #expr },
+            None => match &prev_const_var_ident {
+                Some(prev) => quote! { #prev + 1 },
+                None => quote! { 0 },
+            },
+        };
+
+        constant_defs.push(quote! {const #const_var_ident: #discriminant_type = #const_val_expr;});
+        arms.push(quote! {v if v == #const_var_ident => ::core::option::Option::Some(#name::#ident #params)});
+
+        prev_const_var_ident = Some(const_var_ident);
+    }
+
+    arms.push(quote! { _ => ::core::option::Option::None });
+
+    let const_if_possible = if has_additional_data {
+        quote! {}
+    } else {
+        #[rustversion::before(1.46)]
+        fn filter_by_rust_version(_: TokenStream) -> TokenStream {
+            quote! {}
+        }
+
+        #[rustversion::since(1.46)]
+        fn filter_by_rust_version(s: TokenStream) -> TokenStream {
+            s
+        }
+        filter_by_rust_version(quote! { const })
+    };
+
+    Ok(quote! {
+        #[allow(clippy::use_self)]
+        impl #impl_generics #name #ty_generics #where_clause {
+            #[doc = "Try to create [Self] from the raw representation"]
+            #vis #const_if_possible fn from_repr(discriminant: #discriminant_type) -> Option<#name #ty_generics> {
+                #(#constant_defs)*
+                match discriminant {
+                    #(#arms),*
+                }
+            }
+        }
+    })
+}
diff --git a/crates/strum_macros/src/macros/mod.rs b/crates/strum_macros/src/macros/mod.rs
new file mode 100644
index 0000000..8df8cd6
--- /dev/null
+++ b/crates/strum_macros/src/macros/mod.rs
@@ -0,0 +1,16 @@
+pub mod enum_count;
+pub mod enum_discriminants;
+pub mod enum_is;
+pub mod enum_iter;
+pub mod enum_messages;
+pub mod enum_properties;
+pub mod enum_try_as;
+pub mod enum_variant_names;
+pub mod from_repr;
+
+mod strings;
+
+pub use self::strings::as_ref_str;
+pub use self::strings::display;
+pub use self::strings::from_string;
+pub use self::strings::to_string;
diff --git a/crates/strum_macros/src/macros/strings/as_ref_str.rs b/crates/strum_macros/src/macros/strings/as_ref_str.rs
new file mode 100644
index 0000000..617b887
--- /dev/null
+++ b/crates/strum_macros/src/macros/strings/as_ref_str.rs
@@ -0,0 +1,117 @@
+use proc_macro2::TokenStream;
+use quote::quote;
+use syn::{parse_quote, Data, DeriveInput, Fields};
+
+use crate::helpers::{non_enum_error, HasStrumVariantProperties, HasTypeProperties};
+
+fn get_arms(ast: &DeriveInput) -> syn::Result<Vec<TokenStream>> {
+    let name = &ast.ident;
+    let mut arms = Vec::new();
+    let variants = match &ast.data {
+        Data::Enum(v) => &v.variants,
+        _ => return Err(non_enum_error()),
+    };
+
+    let type_properties = ast.get_type_properties()?;
+
+    for variant in variants {
+        let ident = &variant.ident;
+        let variant_properties = variant.get_variant_properties()?;
+
+        if variant_properties.disabled.is_some() {
+            continue;
+        }
+
+        // Look at all the serialize attributes.
+        // Use `to_string` attribute (not `as_ref_str` or something) to keep things consistent
+        // (i.e. always `enum.as_ref().to_string() == enum.to_string()`).
+        let output = variant_properties.get_preferred_name(type_properties.case_style);
+        let params = match variant.fields {
+            Fields::Unit => quote! {},
+            Fields::Unnamed(..) => quote! { (..) },
+            Fields::Named(..) => quote! { {..} },
+        };
+
+        arms.push(quote! { #name::#ident #params => #output });
+    }
+
+    if arms.len() < variants.len() {
+        arms.push(quote! {
+            _ => panic!(
+                "AsRef::<str>::as_ref() or AsStaticRef::<str>::as_static() \
+                 called on disabled variant.",
+            )
+        });
+    }
+
+    Ok(arms)
+}
+
+pub fn as_ref_str_inner(ast: &DeriveInput) -> syn::Result<TokenStream> {
+    let name = &ast.ident;
+    let (impl_generics, ty_generics, where_clause) = ast.generics.split_for_impl();
+    let arms = get_arms(ast)?;
+    Ok(quote! {
+        impl #impl_generics ::core::convert::AsRef<str> for #name #ty_generics #where_clause {
+            fn as_ref(&self) -> &str {
+                match *self {
+                    #(#arms),*
+                }
+            }
+        }
+    })
+}
+
+pub enum GenerateTraitVariant {
+    AsStaticStr,
+    From,
+}
+
+pub fn as_static_str_inner(
+    ast: &DeriveInput,
+    trait_variant: &GenerateTraitVariant,
+) -> syn::Result<TokenStream> {
+    let name = &ast.ident;
+    let (impl_generics, ty_generics, where_clause) = ast.generics.split_for_impl();
+    let arms = get_arms(ast)?;
+    let type_properties = ast.get_type_properties()?;
+    let strum_module_path = type_properties.crate_module_path();
+
+    let mut generics = ast.generics.clone();
+    generics
+        .params
+        .push(syn::GenericParam::Lifetime(syn::LifetimeParam::new(
+            parse_quote!('_derivative_strum),
+        )));
+    let (impl_generics2, _, _) = generics.split_for_impl();
+    let arms2 = arms.clone();
+    let arms3 = arms.clone();
+
+    Ok(match trait_variant {
+        GenerateTraitVariant::AsStaticStr => quote! {
+            impl #impl_generics #strum_module_path::AsStaticRef<str> for #name #ty_generics #where_clause {
+                fn as_static(&self) -> &'static str {
+                    match *self {
+                        #(#arms),*
+                    }
+                }
+            }
+        },
+        GenerateTraitVariant::From => quote! {
+            impl #impl_generics ::core::convert::From<#name #ty_generics> for &'static str #where_clause {
+                fn from(x: #name #ty_generics) -> &'static str {
+                    match x {
+                        #(#arms2),*
+                    }
+                }
+            }
+            impl #impl_generics2 ::core::convert::From<&'_derivative_strum #name #ty_generics> for &'static str #where_clause {
+                fn from(x: &'_derivative_strum #name #ty_generics) -> &'static str {
+                    match *x {
+                        #(#arms3),*
+                    }
+                }
+            }
+        },
+    })
+}
diff --git a/crates/strum_macros/src/macros/strings/display.rs b/crates/strum_macros/src/macros/strings/display.rs
new file mode 100644
index 0000000..fcc5936
--- /dev/null
+++ b/crates/strum_macros/src/macros/strings/display.rs
@@ -0,0 +1,65 @@
+use proc_macro2::TokenStream;
+use quote::quote;
+use syn::{Data, DeriveInput, Fields};
+
+use crate::helpers::{non_enum_error, HasStrumVariantProperties, HasTypeProperties};
+
+pub fn display_inner(ast: &DeriveInput) -> syn::Result<TokenStream> {
+    let name = &ast.ident;
+    let (impl_generics, ty_generics, where_clause) = ast.generics.split_for_impl();
+    let variants = match &ast.data {
+        Data::Enum(v) => &v.variants,
+        _ => return Err(non_enum_error()),
+    };
+
+    let type_properties = ast.get_type_properties()?;
+
+    let mut arms = Vec::new();
+    for variant in variants {
+        let ident = &variant.ident;
+        let variant_properties = variant.get_variant_properties()?;
+
+        if variant_properties.disabled.is_some() {
+            continue;
+        }
+
+        // Look at all the serialize attributes.
+        let output = variant_properties.get_preferred_name(type_properties.case_style);
+
+        let params = match variant.fields {
+            Fields::Unit => quote! {},
+            Fields::Unnamed(..) => quote! { (..) },
+            Fields::Named(..) => quote! { {..} },
+        };
+
+        if variant_properties.to_string.is_none() && variant_properties.default.is_some() {
+            match &variant.fields {
+                Fields::Unnamed(fields) if fields.unnamed.len() == 1 => {
+                    arms.push(quote! { #name::#ident(ref s) => f.pad(s) });
+                }
+                _ => {
+                    return Err(syn::Error::new_spanned(
+                        variant,
+                        "Default only works on newtype structs with a single String field",
+                    ))
+                }
+            }
+        } else {
+            arms.push(quote! { #name::#ident #params => f.pad(#output) });
+        }
+    }
+
+    if arms.len() < variants.len() {
+        arms.push(quote! { _ => panic!("fmt() called on disabled variant.") });
+    }
+
+    Ok(quote! {
+        impl #impl_generics ::core::fmt::Display for #name #ty_generics #where_clause {
+            fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::result::Result<(), ::core::fmt::Error> {
+                match *self {
+                    #(#arms),*
+                }
+            }
+        }
+    })
+}
diff --git a/crates/strum_macros/src/macros/strings/from_string.rs b/crates/strum_macros/src/macros/strings/from_string.rs
new file mode 100644
index 0000000..2d25591
--- /dev/null
+++ b/crates/strum_macros/src/macros/strings/from_string.rs
@@ -0,0 +1,180 @@
+use proc_macro2::TokenStream;
+use quote::quote;
+use syn::{Data, DeriveInput, Fields};
+
+use crate::helpers::{
+    non_enum_error, occurrence_error, HasStrumVariantProperties, HasTypeProperties,
+};
+
+pub fn from_string_inner(ast: &DeriveInput) -> syn::Result<TokenStream> {
+    let name = &ast.ident;
+    let (impl_generics, ty_generics, where_clause) = ast.generics.split_for_impl();
+    let variants = match &ast.data {
+        Data::Enum(v) => &v.variants,
+        _ => return Err(non_enum_error()),
+    };
+
+    let type_properties = ast.get_type_properties()?;
+    let strum_module_path = type_properties.crate_module_path();
+
+    let mut default_kw = None;
+    let mut default =
+        quote! { ::core::result::Result::Err(#strum_module_path::ParseError::VariantNotFound) };
+
+    let mut phf_exact_match_arms = Vec::new();
+    let mut standard_match_arms = Vec::new();
+    for variant in variants {
+        let ident = &variant.ident;
+        let variant_properties = variant.get_variant_properties()?;
+
+        if variant_properties.disabled.is_some() {
+            continue;
+        }
+
+        if let Some(kw) = variant_properties.default {
+            if let Some(fst_kw) = default_kw {
+                return Err(occurrence_error(fst_kw, kw, "default"));
+            }
+
+            match &variant.fields {
+                Fields::Unnamed(fields) if fields.unnamed.len() == 1 => {}
+                _ => {
+                    return Err(syn::Error::new_spanned(
+                        variant,
+                        "Default only works on newtype structs with a single String field",
+                    ))
+                }
+            }
+
+            default_kw = Some(kw);
+            default = quote! {
+                ::core::result::Result::Ok(#name::#ident(s.into()))
+            };
+            continue;
+        }
+
+        let params = match &variant.fields {
+            Fields::Unit => quote! {},
+            Fields::Unnamed(fields) => {
+                let defaults =
+                    ::core::iter::repeat(quote!(Default::default())).take(fields.unnamed.len());
+                quote! { (#(#defaults),*) }
+            }
+            Fields::Named(fields) => {
+                let fields = fields
+                    .named
+                    .iter()
+                    .map(|field| field.ident.as_ref().unwrap());
+                quote! { {#(#fields: Default::default()),*} }
+            }
+        };
+
+        let is_ascii_case_insensitive = variant_properties
+            .ascii_case_insensitive
+            .unwrap_or(type_properties.ascii_case_insensitive);
+
+        // If we don't have any custom variants, add the default serialized name.
+        for serialization in variant_properties.get_serializations(type_properties.case_style) {
+            if type_properties.use_phf {
+                phf_exact_match_arms.push(quote! { #serialization => #name::#ident #params, });
+
+                if is_ascii_case_insensitive {
+                    // Store the lowercase and UPPERCASE variants in the phf map to capture 
+                    let ser_string = serialization.value();
+
+                    let lower =
+                        syn::LitStr::new(&ser_string.to_ascii_lowercase(), serialization.span());
+                    let upper =
+                        syn::LitStr::new(&ser_string.to_ascii_uppercase(), serialization.span());
+                    phf_exact_match_arms.push(quote! { #lower => #name::#ident #params, });
+                    phf_exact_match_arms.push(quote! { #upper => #name::#ident #params, });
+                    standard_match_arms.push(quote! { s if s.eq_ignore_ascii_case(#serialization) => #name::#ident #params, });
+                }
+            } else {
+                standard_match_arms.push(if !is_ascii_case_insensitive {
+                    quote! { #serialization => #name::#ident #params, }
+                } else {
+                    quote! { s if s.eq_ignore_ascii_case(#serialization) => #name::#ident #params, }
+                });
+            }
+        }
+    }
+
+    let phf_body = if phf_exact_match_arms.is_empty() {
+        quote!()
+    } else {
+        quote! {
+            use #strum_module_path::_private_phf_reexport_for_macro_if_phf_feature as phf;
+            static PHF: phf::Map<&'static str, #name> = phf::phf_map! {
+                #(#phf_exact_match_arms)*
+            };
+            if let Some(value) = PHF.get(s).cloned() {
+                return ::core::result::Result::Ok(value);
+            }
+        }
+    };
+    let standard_match_body = if standard_match_arms.is_empty() {
+        default
+    } else {
+        quote! {
+            ::core::result::Result::Ok(match s {
+                #(#standard_match_arms)*
+                _ => return #default,
+            })
+        }
+    };
+
+    let from_str = quote! {
+        #[allow(clippy::use_self)]
+        impl #impl_generics ::core::str::FromStr for #name #ty_generics #where_clause {
+            type Err = #strum_module_path::ParseError;
+            fn from_str(s: &str) -> ::core::result::Result< #name #ty_generics , <Self as ::core::str::FromStr>::Err> {
+                #phf_body
+                #standard_match_body
+            }
+        }
+    };
+
+    let try_from_str = try_from_str(
+        name,
+        &impl_generics,
+        &ty_generics,
+        where_clause,
+        &strum_module_path,
+    );
+
+    Ok(quote! {
+        #from_str
+        #try_from_str
+    })
+}
+
+#[rustversion::before(1.34)]
+fn try_from_str(
+    _name: &proc_macro2::Ident,
+    _impl_generics: &syn::ImplGenerics,
+    _ty_generics: &syn::TypeGenerics,
+    _where_clause: Option<&syn::WhereClause>,
+    _strum_module_path: &syn::Path,
+) -> TokenStream {
+    Default::default()
+}
+
+#[rustversion::since(1.34)]
+fn try_from_str(
+    name: &proc_macro2::Ident,
+    impl_generics: &syn::ImplGenerics,
+    ty_generics: &syn::TypeGenerics,
+    where_clause: Option<&syn::WhereClause>,
+    strum_module_path: &syn::Path,
+) -> TokenStream {
+    quote! {
+        #[allow(clippy::use_self)]
+        impl #impl_generics ::core::convert::TryFrom<&str> for #name #ty_generics #where_clause {
+            type Error = #strum_module_path::ParseError;
+            fn try_from(s: &str) -> ::core::result::Result< #name #ty_generics , <Self as ::core::convert::TryFrom<&str>>::Error> {
+                ::core::str::FromStr::from_str(s)
+            }
+        }
+    }
+}
diff --git a/crates/strum_macros/src/macros/strings/mod.rs b/crates/strum_macros/src/macros/strings/mod.rs
new file mode 100644
index 0000000..e416f4b
--- /dev/null
+++ b/crates/strum_macros/src/macros/strings/mod.rs
@@ -0,0 +1,4 @@
+pub mod as_ref_str;
+pub mod display;
+pub mod from_string;
+pub mod to_string;
diff --git a/crates/strum_macros/src/macros/strings/to_string.rs b/crates/strum_macros/src/macros/strings/to_string.rs
new file mode 100644
index 0000000..9a1e661
--- /dev/null
+++ b/crates/strum_macros/src/macros/strings/to_string.rs
@@ -0,0 +1,67 @@
+use proc_macro2::TokenStream;
+use quote::quote;
+use syn::{Data, DeriveInput, Fields};
+
+use crate::helpers::{non_enum_error, HasStrumVariantProperties, HasTypeProperties};
+
+pub fn to_string_inner(ast: &DeriveInput) -> syn::Result<TokenStream> {
+    let name = &ast.ident;
+    let (impl_generics, ty_generics, where_clause) = ast.generics.split_for_impl();
+    let variants = match &ast.data {
+        Data::Enum(v) => &v.variants,
+        _ => return Err(non_enum_error()),
+    };
+
+    let type_properties = ast.get_type_properties()?;
+    let mut arms = Vec::new();
+    for variant in variants {
+        let ident = &variant.ident;
+        let variant_properties = variant.get_variant_properties()?;
+
+        if variant_properties.disabled.is_some() {
+            continue;
+        }
+
+        // display variants like Green("lime") as "lime"
+        if variant_properties.to_string.is_none() && variant_properties.default.is_some() {
+            match &variant.fields {
+                Fields::Unnamed(fields) if fields.unnamed.len() == 1 => {
+                    arms.push(quote! { #name::#ident(ref s) => ::std::string::String::from(s) });
+                    continue;
+                }
+                _ => {
+                    return Err(syn::Error::new_spanned(
+                        variant,
+                        "Default only works on newtype structs with a single String field",
+                    ))
+                }
+            }
+        }
+
+        // Look at all the serialize attributes.
+        let output = variant_properties.get_preferred_name(type_properties.case_style);
+
+        let params = match variant.fields {
+            Fields::Unit => quote! {},
+            Fields::Unnamed(..) => quote! { (..) },
+            Fields::Named(..) => quote! { {..} },
+        };
+
+        arms.push(quote! { #name::#ident #params => ::std::string::String::from(#output) });
+    }
+
+    if arms.len() < variants.len() {
+        arms.push(quote! { _ => panic!("to_string() called on disabled variant.") });
+    }
+
+    Ok(quote! {
+        #[allow(clippy::use_self)]
+        impl #impl_generics ::std::string::ToString for #name #ty_generics #where_clause {
+            fn to_string(&self) -> ::std::string::String {
+                match *self {
+                    #(#arms),*
+                }
+            }
+        }
+    })
+}
diff --git a/crates/syn-mid/.cargo-checksum.json b/crates/syn-mid/.cargo-checksum.json
new file mode 100644
index 0000000..a2177c9
--- /dev/null
+++ b/crates/syn-mid/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"CHANGELOG.md":"79ef4a7595b5e6532129687833c09d713715d6ab399d8f7a3a2cc5a6d265d8bd","Cargo.toml":"a27103ee50b54729979789dff3e586df0b3737219eb209e9a44f0e3fbd27cdfd","LICENSE-APACHE":"0d542e0c8804e39aa7f37eb00da5a762149dc682d7829451287e11b938e94594","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"cb32d6b690b9ac37121eec9e68cffb6507ebc190ccc62f78c5e8f424bee073e4","src/func.rs":"f7abef0aecdcfb0038791e9824d23eee7eb11f6833f13aa0f0e87569f44510e4","src/lib.rs":"b26307a0f236c42374158f0e19997b451379a98863036cba6c94cd2f10811df9","src/macros.rs":"4ba7fcf36e3c2251068a12174d0dc9be0165117f6ca34e84e6a4d1dee92b2070","src/pat.rs":"d901acbe3197b9eaf76c307f8c292a80c42d439574ab597858e5999d349f5f33","src/path.rs":"cc68d0314d55692353fffc2c9cf9c4464d2f80cfe12ab9cde6960fb50d1b15ae"},"package":"b5dc35bb08dd1ca3dfb09dce91fd2d13294d6711c88897d9a9d60acf39bce049"}
\ No newline at end of file
diff --git a/crates/syn-mid/Android.bp b/crates/syn-mid/Android.bp
new file mode 100644
index 0000000..1e90936
--- /dev/null
+++ b/crates/syn-mid/Android.bp
@@ -0,0 +1,30 @@
+// This file is generated by cargo_embargo.
+// Do not modify this file because the changes will be overridden on upgrade.
+
+package {
+    default_applicable_licenses: ["external_rust_crates_syn-mid_license"],
+    default_team: "trendy_team_android_rust",
+}
+
+license {
+    name: "external_rust_crates_syn-mid_license",
+    visibility: [":__subpackages__"],
+    license_kinds: ["SPDX-license-identifier-Apache-2.0"],
+    license_text: ["LICENSE"],
+}
+
+rust_library_host {
+    name: "libsyn_mid",
+    host_cross_supported: false,
+    crate_name: "syn_mid",
+    cargo_env_compat: true,
+    cargo_pkg_version: "0.6.0",
+    crate_root: "src/lib.rs",
+    edition: "2021",
+    rustlibs: [
+        "libproc_macro2",
+        "libquote",
+        "libsyn",
+    ],
+    compile_multilib: "first",
+}
diff --git a/crates/syn-mid/CHANGELOG.md b/crates/syn-mid/CHANGELOG.md
new file mode 100644
index 0000000..82ee4b0
--- /dev/null
+++ b/crates/syn-mid/CHANGELOG.md
@@ -0,0 +1,87 @@
+# Changelog
+
+All notable changes to this project will be documented in this file.
+
+This project adheres to [Semantic Versioning](https://semver.org).
+
+<!--
+Note: In this file, do not use the hard wrap in the middle of a sentence for compatibility with GitHub comment style markdown rendering.
+-->
+
+## [Unreleased]
+
+## [0.6.0] - 2023-09-30
+
+- Update to syn 2.0. ([#26](https://github.com/taiki-e/syn-mid/pull/26))
+
+## [0.5.4] - 2023-06-29
+
+- Increase the minimum supported Rust version from Rust 1.31 to Rust 1.56.
+
+- Update minimal version of `proc-macro2` to 1.0.60.
+
+## [0.5.3] - 2021-01-05
+
+- Exclude unneeded files from crates.io.
+
+## [0.5.2] - 2020-12-29
+
+- Documentation improvements.
+
+## [0.5.1] - 2020-10-14
+
+- Implement `Parse` for `Signature`.
+
+- Update parser implementations based on `syn` 1.0.44. This includes some bugfixes.
+
+- Disable default features of `proc-macro2` and `quote`.
+
+## [0.5.0] - 2019-12-09
+
+- Added `Signature` type. ([#13](https://github.com/taiki-e/syn-mid/pull/13))
+
+## [0.4.0] - 2019-08-15
+
+- Updated all data structures based on `syn` 1.0.
+
+- Updated `proc-macro2`, `syn`, and `quote` to 1.0.
+
+- Bumped the minimum required version from Rust 1.30 to Rust 1.31.
+
+## [0.3.0] - 2019-02-18
+
+- Removed support for unneeded syntax.
+
+- Removed unneeded types and fields.
+
+- Implemented `Parse` for `Block`.
+
+- Changed `clone-impls` feature to "disabled by default".
+
+- Removed `extra-traits` feature.
+
+- Bumped the minimum required version from Rust 1.15 to Rust 1.30.
+
+## [0.2.0] - 2019-02-15
+
+- Reduced features.
+
+- Fixed bugs.
+
+## [0.1.0] - 2019-02-14
+
+**Note:** This release has been yanked.
+
+Initial release
+
+[Unreleased]: https://github.com/taiki-e/syn-mid/compare/v0.6.0...HEAD
+[0.6.0]: https://github.com/taiki-e/syn-mid/compare/v0.5.4...v0.6.0
+[0.5.4]: https://github.com/taiki-e/syn-mid/compare/v0.5.3...v0.5.4
+[0.5.3]: https://github.com/taiki-e/syn-mid/compare/v0.5.2...v0.5.3
+[0.5.2]: https://github.com/taiki-e/syn-mid/compare/v0.5.1...v0.5.2
+[0.5.1]: https://github.com/taiki-e/syn-mid/compare/v0.5.0...v0.5.1
+[0.5.0]: https://github.com/taiki-e/syn-mid/compare/v0.4.0...v0.5.0
+[0.4.0]: https://github.com/taiki-e/syn-mid/compare/v0.3.0...v0.4.0
+[0.3.0]: https://github.com/taiki-e/syn-mid/compare/v0.2.0...v0.3.0
+[0.2.0]: https://github.com/taiki-e/syn-mid/compare/v0.1.0...v0.2.0
+[0.1.0]: https://github.com/taiki-e/syn-mid/releases/tag/v0.1.0
diff --git a/crates/syn-mid/Cargo.lock b/crates/syn-mid/Cargo.lock
new file mode 100644
index 0000000..7ed8696
--- /dev/null
+++ b/crates/syn-mid/Cargo.lock
@@ -0,0 +1,47 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.86"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77"
+dependencies = [
+ "unicode-ident",
+]
+
+[[package]]
+name = "quote"
+version = "1.0.37"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af"
+dependencies = [
+ "proc-macro2",
+]
+
+[[package]]
+name = "syn"
+version = "2.0.76"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "578e081a14e0cefc3279b0472138c513f37b41a08d5a3cca9b6e4e8ceb6cd525"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
+]
+
+[[package]]
+name = "syn-mid"
+version = "0.6.0"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "unicode-ident"
+version = "1.0.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
diff --git a/crates/syn-mid/Cargo.toml b/crates/syn-mid/Cargo.toml
new file mode 100644
index 0000000..a646f02
--- /dev/null
+++ b/crates/syn-mid/Cargo.toml
@@ -0,0 +1,61 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2021"
+rust-version = "1.56"
+name = "syn-mid"
+version = "0.6.0"
+exclude = [
+    "/.*",
+    "/tools",
+]
+description = """
+Providing the features between \"full\" and \"derive\" of syn.
+"""
+readme = "README.md"
+keywords = [
+    "syn",
+    "macros",
+]
+categories = [
+    "development-tools::procedural-macro-helpers",
+    "parser-implementations",
+]
+license = "Apache-2.0 OR MIT"
+repository = "https://github.com/taiki-e/syn-mid"
+
+[package.metadata.docs.rs]
+all-features = true
+targets = ["x86_64-unknown-linux-gnu"]
+
+[lib]
+doc-scrape-examples = false
+
+[dependencies.proc-macro2]
+version = "1.0.60"
+default-features = false
+
+[dependencies.quote]
+version = "1"
+default-features = false
+
+[dependencies.syn]
+version = "2"
+features = [
+    "parsing",
+    "printing",
+    "derive",
+]
+default-features = false
+
+[features]
+clone-impls = ["syn/clone-impls"]
diff --git a/crates/syn-mid/LICENSE b/crates/syn-mid/LICENSE
new file mode 120000
index 0000000..6b579aa
--- /dev/null
+++ b/crates/syn-mid/LICENSE
@@ -0,0 +1 @@
+LICENSE-APACHE
\ No newline at end of file
diff --git a/crates/syn-mid/LICENSE-APACHE b/crates/syn-mid/LICENSE-APACHE
new file mode 100644
index 0000000..f433b1a
--- /dev/null
+++ b/crates/syn-mid/LICENSE-APACHE
@@ -0,0 +1,177 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
diff --git a/crates/syn-mid/LICENSE-MIT b/crates/syn-mid/LICENSE-MIT
new file mode 100644
index 0000000..31aa793
--- /dev/null
+++ b/crates/syn-mid/LICENSE-MIT
@@ -0,0 +1,23 @@
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/crates/syn-mid/METADATA b/crates/syn-mid/METADATA
new file mode 100644
index 0000000..df66819
--- /dev/null
+++ b/crates/syn-mid/METADATA
@@ -0,0 +1,23 @@
+# This project was upgraded with external_updater.
+# Usage: tools/external_updater/updater.sh update rust/crates/syn-mid
+# For more info, check https://cs.android.com/android/platform/superproject/+/main:tools/external_updater/README.md
+
+name: "syn-mid"
+description: "Providing the features between \"full\" and \"derive\" of syn."
+third_party {
+  url {
+    type: HOMEPAGE
+    value: "https://crates.io/crates/syn-mid"
+  }
+  url {
+    type: ARCHIVE
+    value: "https://static.crates.io/crates/syn-mid/syn-mid-0.6.0.crate"
+  }
+  version: "0.6.0"
+  license_type: NOTICE
+  last_upgrade_date {
+    year: 2023
+    month: 11
+    day: 15
+  }
+}
diff --git a/crates/syn-mid/MODULE_LICENSE_APACHE2 b/crates/syn-mid/MODULE_LICENSE_APACHE2
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/crates/syn-mid/MODULE_LICENSE_APACHE2
diff --git a/crates/syn-mid/NOTICE b/crates/syn-mid/NOTICE
new file mode 120000
index 0000000..7a694c9
--- /dev/null
+++ b/crates/syn-mid/NOTICE
@@ -0,0 +1 @@
+LICENSE
\ No newline at end of file
diff --git a/crates/syn-mid/README.md b/crates/syn-mid/README.md
new file mode 100644
index 0000000..cfd6f7e
--- /dev/null
+++ b/crates/syn-mid/README.md
@@ -0,0 +1,63 @@
+# syn-mid
+
+[![crates.io](https://img.shields.io/crates/v/syn-mid?style=flat-square&logo=rust)](https://crates.io/crates/syn-mid)
+[![docs.rs](https://img.shields.io/badge/docs.rs-syn--mid-blue?style=flat-square&logo=docs.rs)](https://docs.rs/syn-mid)
+[![license](https://img.shields.io/badge/license-Apache--2.0_OR_MIT-blue?style=flat-square)](#license)
+[![rustc](https://img.shields.io/badge/rustc-1.56+-blue?style=flat-square&logo=rust)](https://www.rust-lang.org)
+[![build status](https://img.shields.io/github/actions/workflow/status/taiki-e/syn-mid/ci.yml?branch=main&style=flat-square&logo=github)](https://github.com/taiki-e/syn-mid/actions)
+
+<!-- tidy:crate-doc:start -->
+Providing the features between "full" and "derive" of syn.
+
+This crate provides the following two unique data structures.
+
+- [`syn_mid::ItemFn`] -- A function whose body is not parsed.
+
+  ```text
+  fn process(n: usize) -> Result<()> { ... }
+  ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ^     ^
+  ```
+
+- [`syn_mid::Block`] -- A block whose body is not parsed.
+
+  ```text
+  { ... }
+  ^     ^
+  ```
+
+Other data structures are the same as data structures of [syn]. These are
+defined in this crate because they cannot be used in [syn] without "full"
+feature.
+
+## Usage
+
+Add this to your `Cargo.toml`:
+
+```toml
+[dependencies]
+syn-mid = "0.5"
+```
+
+*Compiler support: requires rustc 1.56+*
+
+[**Examples**](https://github.com/taiki-e/syn-mid/tree/HEAD/examples)
+
+## Optional features
+
+- **`clone-impls`** — Clone impls for all syntax tree types.
+
+[syn]: https://github.com/dtolnay/syn
+
+<!-- tidy:crate-doc:end -->
+
+[`syn_mid::Block`]: https://docs.rs/syn-mid/latest/syn_mid/struct.Block.html
+[`syn_mid::ItemFn`]: https://docs.rs/syn-mid/latest/syn_mid/struct.ItemFn.html
+
+## License
+
+Licensed under either of [Apache License, Version 2.0](LICENSE-APACHE) or
+[MIT license](LICENSE-MIT) at your option.
+
+Unless you explicitly state otherwise, any contribution intentionally submitted
+for inclusion in the work by you, as defined in the Apache-2.0 license, shall
+be dual licensed as above, without any additional terms or conditions.
diff --git a/crates/syn-mid/cargo_embargo.json b/crates/syn-mid/cargo_embargo.json
new file mode 100644
index 0000000..b49e77c
--- /dev/null
+++ b/crates/syn-mid/cargo_embargo.json
@@ -0,0 +1,9 @@
+{
+  "package": {
+    "syn-mid": {
+      "device_supported": false,
+      "host_first_multilib": true
+    }
+  },
+  "run_cargo": false
+}
diff --git a/crates/syn-mid/src/func.rs b/crates/syn-mid/src/func.rs
new file mode 100644
index 0000000..477841b
--- /dev/null
+++ b/crates/syn-mid/src/func.rs
@@ -0,0 +1,404 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+// Based on https://github.com/dtolnay/syn/blob/2.0.37/src/item.rs.
+
+use proc_macro2::TokenStream;
+use syn::{
+    punctuated::Punctuated, token, Abi, Attribute, Generics, Ident, Lifetime, ReturnType, Token,
+    Type, Visibility,
+};
+
+use super::{Pat, PatType};
+
+ast_struct! {
+    /// A free-standing function: `fn process(n: usize) -> Result<()> { ...
+    /// }`.
+    pub struct ItemFn {
+        pub attrs: Vec<Attribute>,
+        pub vis: Visibility,
+        pub sig: Signature,
+        pub block: Box<Block>,
+    }
+}
+
+ast_struct! {
+    /// A braced block containing Rust statements.
+    pub struct Block {
+        pub brace_token: token::Brace,
+        /// Statements in a block
+        pub stmts: TokenStream,
+    }
+}
+
+ast_struct! {
+    /// A function signature in a trait or implementation: `unsafe fn
+    /// initialize(&self)`.
+    pub struct Signature {
+        pub constness: Option<Token![const]>,
+        pub asyncness: Option<Token![async]>,
+        pub unsafety: Option<Token![unsafe]>,
+        pub abi: Option<Abi>,
+        pub fn_token: Token![fn],
+        pub ident: Ident,
+        pub generics: Generics,
+        pub paren_token: token::Paren,
+        pub inputs: Punctuated<FnArg, Token![,]>,
+        pub variadic: Option<Variadic>,
+        pub output: ReturnType,
+    }
+}
+
+ast_enum_of_structs! {
+    /// An argument in a function signature: the `n: usize` in `fn f(n: usize)`.
+    pub enum FnArg {
+        /// The `self` argument of an associated method, whether taken by value
+        /// or by reference.
+        Receiver(Receiver),
+
+        /// A function argument accepted by pattern and type.
+        Typed(PatType),
+    }
+}
+
+ast_struct! {
+    /// The `self` argument of an associated method, whether taken by value
+    /// or by reference.
+    pub struct Receiver {
+        pub attrs: Vec<Attribute>,
+        pub reference: Option<(Token![&], Option<Lifetime>)>,
+        pub mutability: Option<Token![mut]>,
+        pub self_token: Token![self],
+        pub colon_token: Option<Token![:]>,
+        pub ty: Box<Type>,
+    }
+}
+
+ast_struct! {
+    /// The variadic argument of a foreign function.
+    pub struct Variadic {
+        pub attrs: Vec<Attribute>,
+        pub pat: Option<(Box<Pat>, Token![:])>,
+        pub dots: Token![...],
+        pub comma: Option<Token![,]>,
+    }
+}
+
+mod parsing {
+    use syn::{
+        braced, parenthesized,
+        parse::{discouraged::Speculative, Parse, ParseStream, Result},
+        punctuated::Punctuated,
+        Abi, Attribute, Error, Generics, Ident, Lifetime, Path, ReturnType, Token, Type, TypePath,
+        TypeReference, Visibility,
+    };
+
+    use super::{Block, FnArg, ItemFn, Receiver, Signature, Variadic};
+    use crate::pat::{Pat, PatType, PatWild};
+
+    impl Parse for Block {
+        fn parse(input: ParseStream<'_>) -> Result<Self> {
+            let content;
+            Ok(Self { brace_token: braced!(content in input), stmts: content.parse()? })
+        }
+    }
+
+    impl Parse for Signature {
+        fn parse(input: ParseStream<'_>) -> Result<Self> {
+            let constness: Option<Token![const]> = input.parse()?;
+            let asyncness: Option<Token![async]> = input.parse()?;
+            let unsafety: Option<Token![unsafe]> = input.parse()?;
+            let abi: Option<Abi> = input.parse()?;
+            let fn_token: Token![fn] = input.parse()?;
+            let ident: Ident = input.parse()?;
+            let mut generics: Generics = input.parse()?;
+
+            let content;
+            let paren_token = parenthesized!(content in input);
+            let (inputs, variadic) = parse_fn_args(&content)?;
+
+            let output: ReturnType = input.parse()?;
+            generics.where_clause = input.parse()?;
+
+            Ok(Self {
+                constness,
+                asyncness,
+                unsafety,
+                abi,
+                fn_token,
+                ident,
+                generics,
+                paren_token,
+                inputs,
+                variadic,
+                output,
+            })
+        }
+    }
+
+    impl Parse for ItemFn {
+        fn parse(input: ParseStream<'_>) -> Result<Self> {
+            let attrs = input.call(Attribute::parse_outer)?;
+            let vis: Visibility = input.parse()?;
+            let sig: Signature = input.parse()?;
+            let block = input.parse()?;
+            Ok(Self { attrs, vis, sig, block: Box::new(block) })
+        }
+    }
+
+    impl Parse for FnArg {
+        fn parse(input: ParseStream<'_>) -> Result<Self> {
+            let allow_variadic = false;
+            let attrs = input.call(Attribute::parse_outer)?;
+            match parse_fn_arg_or_variadic(input, attrs, allow_variadic)? {
+                FnArgOrVariadic::FnArg(arg) => Ok(arg),
+                FnArgOrVariadic::Variadic(_) => unreachable!(),
+            }
+        }
+    }
+
+    enum FnArgOrVariadic {
+        FnArg(FnArg),
+        Variadic(Variadic),
+    }
+
+    fn parse_fn_arg_or_variadic(
+        input: ParseStream<'_>,
+        attrs: Vec<Attribute>,
+        allow_variadic: bool,
+    ) -> Result<FnArgOrVariadic> {
+        let ahead = input.fork();
+        if let Ok(mut receiver) = ahead.parse::<Receiver>() {
+            input.advance_to(&ahead);
+            receiver.attrs = attrs;
+            return Ok(FnArgOrVariadic::FnArg(FnArg::Receiver(receiver)));
+        }
+
+        // Hack to parse pre-2018 syntax in
+        // test/ui/rfc-2565-param-attrs/param-attrs-pretty.rs
+        // because the rest of the test case is valuable.
+        if input.peek(Ident) && input.peek2(Token![<]) {
+            let span = input.fork().parse::<Ident>()?.span();
+            return Ok(FnArgOrVariadic::FnArg(FnArg::Typed(PatType {
+                attrs,
+                pat: Box::new(Pat::Wild(PatWild {
+                    attrs: Vec::new(),
+                    underscore_token: Token![_](span),
+                })),
+                colon_token: Token![:](span),
+                ty: input.parse()?,
+            })));
+        }
+
+        let pat = Box::new(Pat::parse_single(input)?);
+        let colon_token: Token![:] = input.parse()?;
+
+        if allow_variadic {
+            if let Some(dots) = input.parse::<Option<Token![...]>>()? {
+                return Ok(FnArgOrVariadic::Variadic(Variadic {
+                    attrs,
+                    pat: Some((pat, colon_token)),
+                    dots,
+                    comma: None,
+                }));
+            }
+        }
+
+        Ok(FnArgOrVariadic::FnArg(FnArg::Typed(PatType {
+            attrs,
+            pat,
+            colon_token,
+            ty: input.parse()?,
+        })))
+    }
+
+    impl Parse for Receiver {
+        fn parse(input: ParseStream<'_>) -> Result<Self> {
+            let reference = if input.peek(Token![&]) {
+                let ampersand: Token![&] = input.parse()?;
+                let lifetime: Option<Lifetime> = input.parse()?;
+                Some((ampersand, lifetime))
+            } else {
+                None
+            };
+            let mutability: Option<Token![mut]> = input.parse()?;
+            let self_token: Token![self] = input.parse()?;
+            let colon_token: Option<Token![:]> =
+                if reference.is_some() { None } else { input.parse()? };
+            let ty: Type = if colon_token.is_some() {
+                input.parse()?
+            } else {
+                let mut ty = Type::Path(TypePath {
+                    qself: None,
+                    path: Path::from(Ident::new("Self", self_token.span)),
+                });
+                if let Some((ampersand, lifetime)) = reference.as_ref() {
+                    ty = Type::Reference(TypeReference {
+                        and_token: Token![&](ampersand.span),
+                        lifetime: lifetime.clone(),
+                        mutability: mutability.as_ref().map(|m| Token![mut](m.span)),
+                        elem: Box::new(ty),
+                    });
+                }
+                ty
+            };
+            Ok(Self {
+                attrs: Vec::new(),
+                reference,
+                mutability,
+                self_token,
+                colon_token,
+                ty: Box::new(ty),
+            })
+        }
+    }
+
+    fn parse_fn_args(
+        input: ParseStream<'_>,
+    ) -> Result<(Punctuated<FnArg, Token![,]>, Option<Variadic>)> {
+        let mut args = Punctuated::new();
+        let mut variadic = None;
+        let mut has_receiver = false;
+
+        while !input.is_empty() {
+            let attrs = input.call(Attribute::parse_outer)?;
+
+            if let Some(dots) = input.parse::<Option<Token![...]>>()? {
+                variadic = Some(Variadic {
+                    attrs,
+                    pat: None,
+                    dots,
+                    comma: if input.is_empty() { None } else { Some(input.parse()?) },
+                });
+                break;
+            }
+
+            let allow_variadic = true;
+            let arg = match parse_fn_arg_or_variadic(input, attrs, allow_variadic)? {
+                FnArgOrVariadic::FnArg(arg) => arg,
+                FnArgOrVariadic::Variadic(arg) => {
+                    variadic = Some(Variadic {
+                        comma: if input.is_empty() { None } else { Some(input.parse()?) },
+                        ..arg
+                    });
+                    break;
+                }
+            };
+
+            match &arg {
+                FnArg::Receiver(receiver) if has_receiver => {
+                    return Err(Error::new(
+                        receiver.self_token.span,
+                        "unexpected second method receiver",
+                    ));
+                }
+                FnArg::Receiver(receiver) if !args.is_empty() => {
+                    return Err(Error::new(receiver.self_token.span, "unexpected method receiver"));
+                }
+                FnArg::Receiver(_) => has_receiver = true,
+                FnArg::Typed(_) => {}
+            }
+            args.push_value(arg);
+
+            if input.is_empty() {
+                break;
+            }
+
+            let comma: Token![,] = input.parse()?;
+            args.push_punct(comma);
+        }
+
+        Ok((args, variadic))
+    }
+}
+
+mod printing {
+    use proc_macro2::TokenStream;
+    use quote::{ToTokens, TokenStreamExt};
+    use syn::{Token, Type};
+
+    use super::{Block, ItemFn, Receiver, Signature, Variadic};
+
+    impl ToTokens for ItemFn {
+        fn to_tokens(&self, tokens: &mut TokenStream) {
+            tokens.append_all(&self.attrs);
+            self.vis.to_tokens(tokens);
+            self.sig.to_tokens(tokens);
+            self.block.to_tokens(tokens);
+        }
+    }
+
+    impl ToTokens for Block {
+        fn to_tokens(&self, tokens: &mut TokenStream) {
+            self.brace_token.surround(tokens, |tokens| {
+                tokens.append_all(self.stmts.clone());
+            });
+        }
+    }
+
+    impl ToTokens for Signature {
+        fn to_tokens(&self, tokens: &mut TokenStream) {
+            self.constness.to_tokens(tokens);
+            self.asyncness.to_tokens(tokens);
+            self.unsafety.to_tokens(tokens);
+            self.abi.to_tokens(tokens);
+            self.fn_token.to_tokens(tokens);
+            self.ident.to_tokens(tokens);
+            self.generics.to_tokens(tokens);
+            self.paren_token.surround(tokens, |tokens| {
+                self.inputs.to_tokens(tokens);
+                if let Some(variadic) = &self.variadic {
+                    if !self.inputs.empty_or_trailing() {
+                        <Token![,]>::default().to_tokens(tokens);
+                    }
+                    variadic.to_tokens(tokens);
+                }
+            });
+            self.output.to_tokens(tokens);
+            self.generics.where_clause.to_tokens(tokens);
+        }
+    }
+
+    impl ToTokens for Receiver {
+        fn to_tokens(&self, tokens: &mut TokenStream) {
+            tokens.append_all(&self.attrs);
+            if let Some((ampersand, lifetime)) = &self.reference {
+                ampersand.to_tokens(tokens);
+                lifetime.to_tokens(tokens);
+            }
+            self.mutability.to_tokens(tokens);
+            self.self_token.to_tokens(tokens);
+            if let Some(colon_token) = &self.colon_token {
+                colon_token.to_tokens(tokens);
+                self.ty.to_tokens(tokens);
+            } else {
+                let consistent = match (&self.reference, &self.mutability, &*self.ty) {
+                    (Some(_), mutability, Type::Reference(ty)) => {
+                        mutability.is_some() == ty.mutability.is_some()
+                            && match &*ty.elem {
+                                Type::Path(ty) => ty.qself.is_none() && ty.path.is_ident("Self"),
+                                _ => false,
+                            }
+                    }
+                    (None, _, Type::Path(ty)) => ty.qself.is_none() && ty.path.is_ident("Self"),
+                    _ => false,
+                };
+                if !consistent {
+                    <Token![:]>::default().to_tokens(tokens);
+                    self.ty.to_tokens(tokens);
+                }
+            }
+        }
+    }
+
+    impl ToTokens for Variadic {
+        fn to_tokens(&self, tokens: &mut TokenStream) {
+            tokens.append_all(&self.attrs);
+            if let Some((pat, colon)) = &self.pat {
+                pat.to_tokens(tokens);
+                colon.to_tokens(tokens);
+            }
+            self.dots.to_tokens(tokens);
+            self.comma.to_tokens(tokens);
+        }
+    }
+}
diff --git a/crates/syn-mid/src/lib.rs b/crates/syn-mid/src/lib.rs
new file mode 100644
index 0000000..ad0bf73
--- /dev/null
+++ b/crates/syn-mid/src/lib.rs
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+/*!
+<!-- tidy:crate-doc:start -->
+Providing the features between "full" and "derive" of syn.
+
+This crate provides the following two unique data structures.
+
+- [`syn_mid::ItemFn`] -- A function whose body is not parsed.
+
+  ```text
+  fn process(n: usize) -> Result<()> { ... }
+  ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ^     ^
+  ```
+
+- [`syn_mid::Block`] -- A block whose body is not parsed.
+
+  ```text
+  { ... }
+  ^     ^
+  ```
+
+Other data structures are the same as data structures of [syn]. These are
+defined in this crate because they cannot be used in [syn] without "full"
+feature.
+
+## Usage
+
+Add this to your `Cargo.toml`:
+
+```toml
+[dependencies]
+syn-mid = "0.5"
+```
+
+*Compiler support: requires rustc 1.56+*
+
+[**Examples**](https://github.com/taiki-e/syn-mid/tree/HEAD/examples)
+
+## Optional features
+
+- **`clone-impls`** — Clone impls for all syntax tree types.
+
+[syn]: https://github.com/dtolnay/syn
+
+<!-- tidy:crate-doc:end -->
+*/
+
+#![doc(test(
+    no_crate_inject,
+    attr(
+        deny(warnings, rust_2018_idioms, single_use_lifetimes),
+        allow(dead_code, unused_variables)
+    )
+))]
+#![forbid(unsafe_code)]
+#![warn(
+    rust_2018_idioms,
+    single_use_lifetimes,
+    unreachable_pub,
+    clippy::pedantic,
+    // Lints that may help when writing public library.
+    // missing_debug_implementations,
+    // missing_docs,
+    clippy::alloc_instead_of_core,
+    // clippy::exhaustive_enums, // TODO
+    // clippy::exhaustive_structs, // TODO
+    clippy::impl_trait_in_params,
+    // clippy::missing_inline_in_public_items,
+    // clippy::std_instead_of_alloc,
+    clippy::std_instead_of_core,
+)]
+#![allow(clippy::missing_errors_doc, clippy::module_name_repetitions)]
+
+// Many of the code contained in this crate are copies from https://github.com/dtolnay/syn.
+
+#[cfg(doc)]
+extern crate self as syn_mid;
+
+#[macro_use]
+mod macros;
+
+mod func;
+mod pat;
+mod path;
+
+#[doc(no_inline)]
+pub use syn::ExprPath as PatPath;
+
+pub use crate::{
+    func::{Block, FnArg, ItemFn, Receiver, Signature, Variadic},
+    pat::{
+        FieldPat, Pat, PatIdent, PatReference, PatRest, PatStruct, PatTuple, PatTupleStruct,
+        PatType, PatWild,
+    },
+};
diff --git a/crates/syn-mid/src/macros.rs b/crates/syn-mid/src/macros.rs
new file mode 100644
index 0000000..7ea574c
--- /dev/null
+++ b/crates/syn-mid/src/macros.rs
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+macro_rules! ast_struct {
+    (
+        [$($attrs_pub:tt)*]
+        struct $name:ident $($rest:tt)*
+    ) => {
+        #[cfg_attr(feature = "clone-impls", derive(Clone))]
+        $($attrs_pub)* struct $name $($rest)*
+    };
+
+    ($($tt:tt)*) => {
+        strip_attrs_pub!(ast_struct!($($tt)*));
+    };
+}
+
+macro_rules! ast_enum {
+    (
+        [$($attrs_pub:tt)*]
+        enum $name:ident $($rest:tt)*
+    ) => (
+        #[cfg_attr(feature = "clone-impls", derive(Clone))]
+        $($attrs_pub)* enum $name $($rest)*
+    );
+
+    ($($tt:tt)*) => {
+        strip_attrs_pub!(ast_enum!($($tt)*));
+    };
+}
+
+macro_rules! ast_enum_of_structs {
+    (
+        $(#[$enum_attr:meta])*
+        $pub:ident $enum:ident $name:ident $body:tt
+    ) => {
+        ast_enum!($(#[$enum_attr])* $pub $enum $name $body);
+        ast_enum_of_structs_impl!($pub $enum $name $body);
+    };
+}
+
+macro_rules! ast_enum_of_structs_impl {
+    (
+        $pub:ident $enum:ident $name:ident {
+            $(
+                $(#[$variant_attr:meta])*
+                $variant:ident $( ($member:ident) )*,
+            )*
+        }
+    ) => {
+        check_keyword_matches!(pub $pub);
+        check_keyword_matches!(enum $enum);
+
+        $(
+            $(
+                impl From<$member> for $name {
+                    fn from(e: $member) -> $name {
+                        $name::$variant(e)
+                    }
+                }
+            )*
+        )*
+
+        generate_to_tokens! {
+            ()
+            tokens
+            $name { $($variant $($member)*,)* }
+        }
+    };
+}
+
+macro_rules! generate_to_tokens {
+    (($($arms:tt)*) $tokens:ident $name:ident { $variant:ident, $($next:tt)*}) => {
+        generate_to_tokens!(
+            ($($arms)* $name::$variant => {})
+            $tokens $name { $($next)* }
+        );
+    };
+
+    (($($arms:tt)*) $tokens:ident $name:ident { $variant:ident $member:ident, $($next:tt)*}) => {
+        generate_to_tokens!(
+            ($($arms)* $name::$variant(_e) => quote::ToTokens::to_tokens(_e, $tokens),)
+            $tokens $name { $($next)* }
+        );
+    };
+
+    (($($arms:tt)*) $tokens:ident $name:ident {}) => {
+        impl quote::ToTokens for $name {
+            fn to_tokens(&self, $tokens: &mut proc_macro2::TokenStream) {
+                match self {
+                    $($arms)*
+                }
+            }
+        }
+    };
+}
+
+macro_rules! strip_attrs_pub {
+    ($mac:ident!($(#[$m:meta])* $pub:ident $($tt:tt)*)) => {
+        check_keyword_matches!(pub $pub);
+
+        $mac!([$(#[$m])* $pub] $($tt)*);
+    };
+}
+
+macro_rules! check_keyword_matches {
+    (enum enum) => {};
+    (pub pub) => {};
+}
diff --git a/crates/syn-mid/src/pat.rs b/crates/syn-mid/src/pat.rs
new file mode 100644
index 0000000..7a193fb
--- /dev/null
+++ b/crates/syn-mid/src/pat.rs
@@ -0,0 +1,418 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+// Based on https://github.com/dtolnay/syn/blob/2.0.37/src/item.rs.
+
+use syn::{punctuated::Punctuated, token, Attribute, Ident, Member, Path, Token, Type};
+
+use super::PatPath;
+
+ast_enum_of_structs! {
+    /// A pattern in a local binding, function signature, match expression, or
+    /// various other places.
+    #[non_exhaustive]
+    pub enum Pat {
+        /// A pattern that binds a new variable: `ref mut binding @ SUBPATTERN`.
+        Ident(PatIdent),
+
+        /// A path pattern like `Color::Red`.
+        Path(PatPath),
+
+        /// A reference pattern: `&mut var`.
+        Reference(PatReference),
+
+        /// A struct or struct variant pattern: `Variant { x, y, .. }`.
+        Struct(PatStruct),
+
+        /// A tuple pattern: `(a, b)`.
+        Tuple(PatTuple),
+
+        /// A tuple struct or tuple variant pattern: `Variant(x, y, .., z)`.
+        TupleStruct(PatTupleStruct),
+
+        /// A type ascription pattern: `foo: f64`.
+        Type(PatType),
+
+        /// A pattern that matches any value: `_`.
+        Wild(PatWild),
+    }
+}
+
+ast_struct! {
+    /// A pattern that binds a new variable: `ref mut binding @ SUBPATTERN`.
+    pub struct PatIdent {
+        pub attrs: Vec<Attribute>,
+        pub by_ref: Option<Token![ref]>,
+        pub mutability: Option<Token![mut]>,
+        pub ident: Ident,
+    }
+}
+
+ast_struct! {
+    /// A reference pattern: `&mut var`.
+    pub struct PatReference {
+        pub attrs: Vec<Attribute>,
+        pub and_token: Token![&],
+        pub mutability: Option<Token![mut]>,
+        pub pat: Box<Pat>,
+    }
+}
+
+ast_struct! {
+    /// The dots in a tuple pattern: `[0, 1, ..]`.
+    pub struct PatRest {
+        pub attrs: Vec<Attribute>,
+        pub dot2_token: Token![..],
+    }
+}
+
+ast_struct! {
+    /// A struct or struct variant pattern: `Variant { x, y, .. }`.
+    pub struct PatStruct {
+        pub attrs: Vec<Attribute>,
+        pub path: Path,
+        pub brace_token: token::Brace,
+        pub fields: Punctuated<FieldPat, Token![,]>,
+        pub rest: Option<PatRest>,
+    }
+}
+
+ast_struct! {
+    /// A tuple pattern: `(a, b)`.
+    pub struct PatTuple {
+        pub attrs: Vec<Attribute>,
+        pub paren_token: token::Paren,
+        pub elems: Punctuated<Pat, Token![,]>,
+    }
+}
+
+ast_struct! {
+    /// A tuple struct or tuple variant pattern: `Variant(x, y, .., z)`.
+    pub struct PatTupleStruct {
+        pub attrs: Vec<Attribute>,
+        pub path: Path,
+        pub paren_token: token::Paren,
+        pub elems: Punctuated<Pat, Token![,]>,
+    }
+}
+
+ast_struct! {
+    /// A type ascription pattern: `foo: f64`.
+    pub struct PatType {
+        pub attrs: Vec<Attribute>,
+        pub pat: Box<Pat>,
+        pub colon_token: Token![:],
+        pub ty: Box<Type>,
+    }
+}
+
+ast_struct! {
+    /// A pattern that matches any value: `_`.
+    pub struct PatWild {
+        pub attrs: Vec<Attribute>,
+        pub underscore_token: Token![_],
+    }
+}
+
+ast_struct! {
+    /// A single field in a struct pattern.
+    ///
+    /// Patterns like the fields of Foo `{ x, ref y, ref mut z }` are treated
+    /// the same as `x: x, y: ref y, z: ref mut z` but there is no colon token.
+    pub struct FieldPat {
+        pub attrs: Vec<Attribute>,
+        pub member: Member,
+        pub colon_token: Option<Token![:]>,
+        pub pat: Box<Pat>,
+    }
+}
+
+mod parsing {
+    use syn::{
+        braced,
+        ext::IdentExt,
+        parenthesized,
+        parse::{ParseStream, Result},
+        punctuated::Punctuated,
+        token, Attribute, ExprPath, Ident, Member, Path, Token,
+    };
+
+    use super::{
+        FieldPat, Pat, PatIdent, PatReference, PatRest, PatStruct, PatTuple, PatTupleStruct,
+        PatWild,
+    };
+    use crate::path;
+
+    impl Pat {
+        /// Parse a pattern that does _not_ involve `|` at the top level.
+        pub fn parse_single(input: ParseStream<'_>) -> Result<Self> {
+            let lookahead = input.lookahead1();
+            if lookahead.peek(Ident)
+                && (input.peek2(Token![::])
+                    || input.peek2(Token![!])
+                    || input.peek2(token::Brace)
+                    || input.peek2(token::Paren)
+                    || input.peek2(Token![..]))
+                || input.peek(Token![self]) && input.peek2(Token![::])
+                || lookahead.peek(Token![::])
+                || lookahead.peek(Token![<])
+                || input.peek(Token![Self])
+                || input.peek(Token![super])
+                || input.peek(Token![crate])
+            {
+                pat_path_or_struct(input)
+            } else if lookahead.peek(Token![_]) {
+                input.call(pat_wild).map(Pat::Wild)
+            } else if lookahead.peek(Token![ref])
+                || lookahead.peek(Token![mut])
+                || input.peek(Token![self])
+                || input.peek(Ident)
+            {
+                input.call(pat_ident).map(Pat::Ident)
+            } else if lookahead.peek(Token![&]) {
+                input.call(pat_reference).map(Pat::Reference)
+            } else if lookahead.peek(token::Paren) {
+                input.call(pat_paren_or_tuple)
+            } else {
+                Err(lookahead.error())
+            }
+        }
+    }
+
+    fn pat_path_or_struct(input: ParseStream<'_>) -> Result<Pat> {
+        let path = path::parse_path(input)?;
+
+        if input.peek(token::Brace) {
+            pat_struct(input, path).map(Pat::Struct)
+        } else if input.peek(token::Paren) {
+            pat_tuple_struct(input, path).map(Pat::TupleStruct)
+        } else {
+            Ok(Pat::Path(ExprPath { attrs: Vec::new(), qself: None, path }))
+        }
+    }
+
+    fn pat_wild(input: ParseStream<'_>) -> Result<PatWild> {
+        Ok(PatWild { attrs: Vec::new(), underscore_token: input.parse()? })
+    }
+
+    fn pat_ident(input: ParseStream<'_>) -> Result<PatIdent> {
+        Ok(PatIdent {
+            attrs: Vec::new(),
+            by_ref: input.parse()?,
+            mutability: input.parse()?,
+            ident: input.call(Ident::parse_any)?,
+        })
+    }
+
+    fn pat_tuple_struct(input: ParseStream<'_>, path: Path) -> Result<PatTupleStruct> {
+        let content;
+        let paren_token = parenthesized!(content in input);
+
+        let mut elems = Punctuated::new();
+        while !content.is_empty() {
+            let value = Pat::parse_single(&content)?;
+            elems.push_value(value);
+            if content.is_empty() {
+                break;
+            }
+            let punct = content.parse()?;
+            elems.push_punct(punct);
+        }
+
+        Ok(PatTupleStruct { attrs: Vec::new(), path, paren_token, elems })
+    }
+
+    fn pat_struct(input: ParseStream<'_>, path: Path) -> Result<PatStruct> {
+        let content;
+        let brace_token = braced!(content in input);
+
+        let mut fields = Punctuated::new();
+        let mut rest = None;
+        while !content.is_empty() {
+            let attrs = content.call(Attribute::parse_outer)?;
+            if content.peek(Token![..]) {
+                rest = Some(PatRest { attrs, dot2_token: content.parse()? });
+                break;
+            }
+            let mut value = content.call(field_pat)?;
+            value.attrs = attrs;
+            fields.push_value(value);
+            if content.is_empty() {
+                break;
+            }
+            let punct: Token![,] = content.parse()?;
+            fields.push_punct(punct);
+        }
+
+        Ok(PatStruct { attrs: Vec::new(), path, brace_token, fields, rest })
+    }
+
+    fn field_pat(input: ParseStream<'_>) -> Result<FieldPat> {
+        let boxed: Option<Token![box]> = input.parse()?;
+        let by_ref: Option<Token![ref]> = input.parse()?;
+        let mutability: Option<Token![mut]> = input.parse()?;
+
+        let member = if boxed.is_some() || by_ref.is_some() || mutability.is_some() {
+            input.parse().map(Member::Named)
+        } else {
+            input.parse()
+        }?;
+
+        if boxed.is_none() && by_ref.is_none() && mutability.is_none() && input.peek(Token![:])
+            || is_unnamed(&member)
+        {
+            return Ok(FieldPat {
+                attrs: Vec::new(),
+                member,
+                colon_token: Some(input.parse()?),
+                pat: Box::new(Pat::parse_single(input)?),
+            });
+        }
+
+        let ident = match member {
+            Member::Named(ident) => ident,
+            Member::Unnamed(_) => unreachable!(),
+        };
+
+        let pat =
+            Pat::Ident(PatIdent { attrs: Vec::new(), by_ref, mutability, ident: ident.clone() });
+
+        Ok(FieldPat {
+            attrs: Vec::new(),
+            member: Member::Named(ident),
+            colon_token: None,
+            pat: Box::new(pat),
+        })
+    }
+
+    fn pat_paren_or_tuple(input: ParseStream<'_>) -> Result<Pat> {
+        let content;
+        let paren_token = parenthesized!(content in input);
+
+        let mut elems = Punctuated::new();
+        while !content.is_empty() {
+            let value = Pat::parse_single(&content)?;
+            if content.is_empty() {
+                elems.push_value(value);
+                break;
+            }
+            elems.push_value(value);
+            let punct = content.parse()?;
+            elems.push_punct(punct);
+        }
+
+        Ok(Pat::Tuple(PatTuple { attrs: Vec::new(), paren_token, elems }))
+    }
+
+    fn pat_reference(input: ParseStream<'_>) -> Result<PatReference> {
+        Ok(PatReference {
+            attrs: Vec::new(),
+            and_token: input.parse()?,
+            mutability: input.parse()?,
+            pat: Box::new(Pat::parse_single(input)?),
+        })
+    }
+
+    fn is_unnamed(member: &Member) -> bool {
+        match member {
+            Member::Named(_) => false,
+            Member::Unnamed(_) => true,
+        }
+    }
+}
+
+mod printing {
+    use proc_macro2::TokenStream;
+    use quote::{ToTokens, TokenStreamExt};
+    use syn::Token;
+
+    use super::{
+        FieldPat, PatIdent, PatReference, PatRest, PatStruct, PatTuple, PatTupleStruct, PatType,
+        PatWild,
+    };
+
+    impl ToTokens for PatIdent {
+        fn to_tokens(&self, tokens: &mut TokenStream) {
+            tokens.append_all(&self.attrs);
+            self.by_ref.to_tokens(tokens);
+            self.mutability.to_tokens(tokens);
+            self.ident.to_tokens(tokens);
+        }
+    }
+
+    impl ToTokens for PatReference {
+        fn to_tokens(&self, tokens: &mut TokenStream) {
+            tokens.append_all(&self.attrs);
+            self.and_token.to_tokens(tokens);
+            self.mutability.to_tokens(tokens);
+            self.pat.to_tokens(tokens);
+        }
+    }
+
+    impl ToTokens for PatRest {
+        fn to_tokens(&self, tokens: &mut TokenStream) {
+            tokens.append_all(&self.attrs);
+            self.dot2_token.to_tokens(tokens);
+        }
+    }
+
+    impl ToTokens for PatStruct {
+        fn to_tokens(&self, tokens: &mut TokenStream) {
+            tokens.append_all(&self.attrs);
+            self.path.to_tokens(tokens);
+            self.brace_token.surround(tokens, |tokens| {
+                self.fields.to_tokens(tokens);
+                // Note: We need a comma before the dot2 token if it is present.
+                if !self.fields.empty_or_trailing() && self.rest.is_some() {
+                    <Token![,]>::default().to_tokens(tokens);
+                }
+                self.rest.to_tokens(tokens);
+            });
+        }
+    }
+
+    impl ToTokens for PatTuple {
+        fn to_tokens(&self, tokens: &mut TokenStream) {
+            tokens.append_all(&self.attrs);
+            self.paren_token.surround(tokens, |tokens| {
+                self.elems.to_tokens(tokens);
+            });
+        }
+    }
+
+    impl ToTokens for PatTupleStruct {
+        fn to_tokens(&self, tokens: &mut TokenStream) {
+            tokens.append_all(&self.attrs);
+            self.path.to_tokens(tokens);
+            self.paren_token.surround(tokens, |tokens| {
+                self.elems.to_tokens(tokens);
+            });
+        }
+    }
+
+    impl ToTokens for PatType {
+        fn to_tokens(&self, tokens: &mut TokenStream) {
+            tokens.append_all(&self.attrs);
+            self.pat.to_tokens(tokens);
+            self.colon_token.to_tokens(tokens);
+            self.ty.to_tokens(tokens);
+        }
+    }
+
+    impl ToTokens for PatWild {
+        fn to_tokens(&self, tokens: &mut TokenStream) {
+            tokens.append_all(&self.attrs);
+            self.underscore_token.to_tokens(tokens);
+        }
+    }
+
+    impl ToTokens for FieldPat {
+        fn to_tokens(&self, tokens: &mut TokenStream) {
+            tokens.append_all(&self.attrs);
+            if let Some(colon_token) = &self.colon_token {
+                self.member.to_tokens(tokens);
+                colon_token.to_tokens(tokens);
+            }
+            self.pat.to_tokens(tokens);
+        }
+    }
+}
diff --git a/crates/syn-mid/src/path.rs b/crates/syn-mid/src/path.rs
new file mode 100644
index 0000000..a643b74
--- /dev/null
+++ b/crates/syn-mid/src/path.rs
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+// Based on https://github.com/dtolnay/syn/blob/2.0.37/src/path.rs.
+
+use syn::{
+    ext::IdentExt,
+    parse::{ParseStream, Result},
+    punctuated::Punctuated,
+    Ident, Path, PathArguments, PathSegment, Token,
+};
+
+fn parse_path_segment(input: ParseStream<'_>) -> Result<PathSegment> {
+    if input.peek(Token![super]) || input.peek(Token![self]) || input.peek(Token![crate]) {
+        let ident = input.call(Ident::parse_any)?;
+        return Ok(PathSegment::from(ident));
+    }
+
+    let ident =
+        if input.peek(Token![Self]) { input.call(Ident::parse_any)? } else { input.parse()? };
+
+    if input.peek(Token![::]) && input.peek3(Token![<]) {
+        Ok(PathSegment { ident, arguments: PathArguments::AngleBracketed(input.parse()?) })
+    } else {
+        Ok(PathSegment::from(ident))
+    }
+}
+
+pub(crate) fn parse_path(input: ParseStream<'_>) -> Result<Path> {
+    Ok(Path {
+        leading_colon: input.parse()?,
+        segments: {
+            let mut segments = Punctuated::new();
+            let value = parse_path_segment(input)?;
+            segments.push_value(value);
+            while input.peek(Token![::]) {
+                let punct: Token![::] = input.parse()?;
+                segments.push_punct(punct);
+                let value = parse_path_segment(input)?;
+                segments.push_value(value);
+            }
+            segments
+        },
+    })
+}
diff --git a/crates/sync_wrapper/.cargo-checksum.json b/crates/sync_wrapper/.cargo-checksum.json
new file mode 100644
index 0000000..dd44981
--- /dev/null
+++ b/crates/sync_wrapper/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.toml":"7fa965b7d5c0bff8117f796e8855ee207006e5c2641ce26a7e570a751d88a88b","LICENSE":"0d542e0c8804e39aa7f37eb00da5a762149dc682d7829451287e11b938e94594","README.md":"61e995daa67a37597f76b78ca3c61916a42a66034f01e9473ee7b7753029ca3a","src/lib.rs":"824fa08776b004a2315172fe5ed23dcf14315bfb01bb2115a663ed70a4aeac30"},"package":"a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394"}
\ No newline at end of file
diff --git a/crates/sync_wrapper/Android.bp b/crates/sync_wrapper/Android.bp
new file mode 100644
index 0000000..0551ed3
--- /dev/null
+++ b/crates/sync_wrapper/Android.bp
@@ -0,0 +1,30 @@
+// This file is generated by cargo_embargo.
+// Do not modify this file because the changes will be overridden on upgrade.
+
+package {
+    default_applicable_licenses: ["external_rust_crates_sync_wrapper_license"],
+    default_team: "trendy_team_android_rust",
+}
+
+license {
+    name: "external_rust_crates_sync_wrapper_license",
+    visibility: [":__subpackages__"],
+    license_kinds: ["SPDX-license-identifier-Apache-2.0"],
+    license_text: ["LICENSE"],
+}
+
+rust_library {
+    name: "libsync_wrapper",
+    host_supported: true,
+    crate_name: "sync_wrapper",
+    cargo_env_compat: true,
+    cargo_pkg_version: "1.0.1",
+    crate_root: "src/lib.rs",
+    edition: "2018",
+    apex_available: [
+        "//apex_available:platform",
+        "//apex_available:anyapex",
+    ],
+    product_available: true,
+    vendor_available: true,
+}
diff --git a/crates/sync_wrapper/Cargo.lock b/crates/sync_wrapper/Cargo.lock
new file mode 100644
index 0000000..21d38e9
--- /dev/null
+++ b/crates/sync_wrapper/Cargo.lock
@@ -0,0 +1,169 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "autocfg"
+version = "1.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0"
+
+[[package]]
+name = "futures"
+version = "0.3.30"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0"
+dependencies = [
+ "futures-channel",
+ "futures-core",
+ "futures-executor",
+ "futures-io",
+ "futures-sink",
+ "futures-task",
+ "futures-util",
+]
+
+[[package]]
+name = "futures-channel"
+version = "0.3.30"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78"
+dependencies = [
+ "futures-core",
+ "futures-sink",
+]
+
+[[package]]
+name = "futures-core"
+version = "0.3.30"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d"
+
+[[package]]
+name = "futures-executor"
+version = "0.3.30"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d"
+dependencies = [
+ "futures-core",
+ "futures-task",
+ "futures-util",
+]
+
+[[package]]
+name = "futures-io"
+version = "0.3.30"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1"
+
+[[package]]
+name = "futures-macro"
+version = "0.3.30"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "futures-sink"
+version = "0.3.30"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5"
+
+[[package]]
+name = "futures-task"
+version = "0.3.30"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004"
+
+[[package]]
+name = "futures-util"
+version = "0.3.30"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48"
+dependencies = [
+ "futures-channel",
+ "futures-core",
+ "futures-io",
+ "futures-macro",
+ "futures-sink",
+ "futures-task",
+ "memchr",
+ "pin-project-lite",
+ "pin-utils",
+ "slab",
+]
+
+[[package]]
+name = "memchr"
+version = "2.7.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3"
+
+[[package]]
+name = "pin-project-lite"
+version = "0.2.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02"
+
+[[package]]
+name = "pin-utils"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.86"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77"
+dependencies = [
+ "unicode-ident",
+]
+
+[[package]]
+name = "quote"
+version = "1.0.37"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af"
+dependencies = [
+ "proc-macro2",
+]
+
+[[package]]
+name = "slab"
+version = "0.4.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67"
+dependencies = [
+ "autocfg",
+]
+
+[[package]]
+name = "syn"
+version = "2.0.76"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "578e081a14e0cefc3279b0472138c513f37b41a08d5a3cca9b6e4e8ceb6cd525"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
+]
+
+[[package]]
+name = "sync_wrapper"
+version = "1.0.1"
+dependencies = [
+ "futures",
+ "futures-core",
+ "pin-project-lite",
+]
+
+[[package]]
+name = "unicode-ident"
+version = "1.0.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
diff --git a/crates/sync_wrapper/Cargo.toml b/crates/sync_wrapper/Cargo.toml
new file mode 100644
index 0000000..327ec67
--- /dev/null
+++ b/crates/sync_wrapper/Cargo.toml
@@ -0,0 +1,41 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2018"
+name = "sync_wrapper"
+version = "1.0.1"
+authors = ["Actyx AG <developer@actyx.io>"]
+description = "A tool for enlisting the compiler's help in proving the absence of concurrency"
+homepage = "https://docs.rs/sync_wrapper"
+documentation = "https://docs.rs/sync_wrapper"
+readme = "README.md"
+keywords = ["concurrency"]
+categories = ["concurrency"]
+license = "Apache-2.0"
+repository = "https://github.com/Actyx/sync_wrapper"
+
+[package.metadata.docs.rs]
+features = ["futures"]
+
+[dependencies.futures-core]
+version = "0.3"
+optional = true
+default-features = false
+
+[dev-dependencies.futures]
+version = "0.3"
+
+[dev-dependencies.pin-project-lite]
+version = "0.2.7"
+
+[features]
+futures = ["futures-core"]
diff --git a/crates/sync_wrapper/LICENSE b/crates/sync_wrapper/LICENSE
new file mode 100644
index 0000000..f433b1a
--- /dev/null
+++ b/crates/sync_wrapper/LICENSE
@@ -0,0 +1,177 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
diff --git a/crates/sync_wrapper/METADATA b/crates/sync_wrapper/METADATA
new file mode 100644
index 0000000..82a2319
--- /dev/null
+++ b/crates/sync_wrapper/METADATA
@@ -0,0 +1,20 @@
+name: "sync_wrapper"
+description: "A tool for enlisting the compiler\'s help in proving the absence of concurrency"
+third_party {
+  identifier {
+    type: "crates.io"
+    value: "sync_wrapper"
+  }
+  identifier {
+    type: "Archive"
+    value: "https://static.crates.io/crates/sync_wrapper/sync_wrapper-1.0.1.crate"
+    primary_source: true
+  }
+  version: "1.0.1"
+  license_type: NOTICE
+  last_upgrade_date {
+    year: 2024
+    month: 6
+    day: 3
+  }
+}
diff --git a/crates/sync_wrapper/MODULE_LICENSE_APACHE2 b/crates/sync_wrapper/MODULE_LICENSE_APACHE2
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/crates/sync_wrapper/MODULE_LICENSE_APACHE2
diff --git a/crates/sync_wrapper/README.md b/crates/sync_wrapper/README.md
new file mode 100644
index 0000000..20261c9
--- /dev/null
+++ b/crates/sync_wrapper/README.md
@@ -0,0 +1,8 @@
+[![Latest Version](https://img.shields.io/crates/v/sync_wrapper.svg)](https://crates.io/crates/sync_wrapper)
+[![Rust Documentation](https://docs.rs/sync_wrapper/badge.svg)](https://docs.rs/sync_wrapper)
+
+# SyncWrapper
+
+A mutual exclusion primitive that relies on static type information only.
+
+This library is inspired by [this discussion](https://internals.rust-lang.org/t/what-shall-sync-mean-across-an-await/12020/2).
diff --git a/crates/sync_wrapper/cargo_embargo.json b/crates/sync_wrapper/cargo_embargo.json
new file mode 100644
index 0000000..cb908d7
--- /dev/null
+++ b/crates/sync_wrapper/cargo_embargo.json
@@ -0,0 +1,3 @@
+{
+  "run_cargo": false
+}
diff --git a/crates/sync_wrapper/patches/std.diff b/crates/sync_wrapper/patches/std.diff
new file mode 100644
index 0000000..758a120
--- /dev/null
+++ b/crates/sync_wrapper/patches/std.diff
@@ -0,0 +1,15 @@
+diff --git a/src/lib.rs b/src/lib.rs
+index 2800d9a..ccd8454 100644
+--- a/src/lib.rs
++++ b/src/lib.rs
+@@ -20,6 +20,10 @@
+ #![doc(html_favicon_url = "https://developer.actyx.com/img/favicon.ico")]
+ #![no_std]
+ 
++// ANDROID: Use std to allow building as a dylib.
++#[cfg(android_dylib)]
++extern crate std;
++
+ use core::{
+     fmt::{self, Debug, Formatter},
+     pin::Pin,
diff --git a/crates/sync_wrapper/src/lib.rs b/crates/sync_wrapper/src/lib.rs
new file mode 100644
index 0000000..ccd8454
--- /dev/null
+++ b/crates/sync_wrapper/src/lib.rs
@@ -0,0 +1,252 @@
+/*
+ * Copyright 2020 Actyx AG
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+//! A mutual exclusion primitive that relies on static type information only
+//!
+//! This library is inspired by [this discussion](https://internals.rust-lang.org/t/what-shall-sync-mean-across-an-await/12020/2).
+#![doc(html_logo_url = "https://developer.actyx.com/img/logo.svg")]
+#![doc(html_favicon_url = "https://developer.actyx.com/img/favicon.ico")]
+#![no_std]
+
+// ANDROID: Use std to allow building as a dylib.
+#[cfg(android_dylib)]
+extern crate std;
+
+use core::{
+    fmt::{self, Debug, Formatter},
+    pin::Pin,
+    future::Future,
+    task::{Context, Poll},
+};
+
+/// A mutual exclusion primitive that relies on static type information only
+///
+/// In some cases synchronization can be proven statically: whenever you hold an exclusive `&mut`
+/// reference, the Rust type system ensures that no other part of the program can hold another
+/// reference to the data. Therefore it is safe to access it even if the current thread obtained
+/// this reference via a channel. Whenever this is the case, the overhead of allocating and locking
+/// a [`Mutex`] can be avoided by using this static version.
+///
+/// One example where this is often applicable is [`Future`], which requires an exclusive reference
+/// for its [`poll`] method: While a given `Future` implementation may not be safe to access by
+/// multiple threads concurrently, the executor can only run the `Future` on one thread at any
+/// given time, making it [`Sync`] in practice as long as the implementation is `Send`. You can
+/// therefore use the static mutex to prove that your data structure is `Sync` even though it
+/// contains such a `Future`.
+///
+/// # Example
+///
+/// ```
+/// use sync_wrapper::SyncWrapper;
+/// use std::future::Future;
+///
+/// struct MyThing {
+///     future: SyncWrapper<Box<dyn Future<Output = String> + Send>>,
+/// }
+///
+/// impl MyThing {
+///     // all accesses to `self.future` now require an exclusive reference or ownership
+/// }
+///
+/// fn assert_sync<T: Sync>() {}
+///
+/// assert_sync::<MyThing>();
+/// ```
+///
+/// [`Mutex`]: https://doc.rust-lang.org/std/sync/struct.Mutex.html
+/// [`Future`]: https://doc.rust-lang.org/std/future/trait.Future.html
+/// [`poll`]: https://doc.rust-lang.org/std/future/trait.Future.html#method.poll
+/// [`Sync`]: https://doc.rust-lang.org/std/marker/trait.Sync.html
+#[repr(transparent)]
+pub struct SyncWrapper<T>(T);
+
+impl<T> SyncWrapper<T> {
+    /// Creates a new static mutex containing the given value.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use sync_wrapper::SyncWrapper;
+    ///
+    /// let mutex = SyncWrapper::new(42);
+    /// ```
+    pub const fn new(value: T) -> Self {
+        Self(value)
+    }
+
+    /// Acquires a reference to the protected value.
+    ///
+    /// This is safe because it requires an exclusive reference to the mutex. Therefore this method
+    /// neither panics nor does it return an error. This is in contrast to [`Mutex::get_mut`] which
+    /// returns an error if another thread panicked while holding the lock. It is not recommended
+    /// to send an exclusive reference to a potentially damaged value to another thread for further
+    /// processing.
+    ///
+    /// [`Mutex::get_mut`]: https://doc.rust-lang.org/std/sync/struct.Mutex.html#method.get_mut
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use sync_wrapper::SyncWrapper;
+    ///
+    /// let mut mutex = SyncWrapper::new(42);
+    /// let value = mutex.get_mut();
+    /// *value = 0;
+    /// assert_eq!(*mutex.get_mut(), 0);
+    /// ```
+    pub fn get_mut(&mut self) -> &mut T {
+        &mut self.0
+    }
+
+    /// Acquires a pinned reference to the protected value.
+    ///
+    /// See [`Self::get_mut`] for why this method is safe.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use std::future::Future;
+    /// use std::pin::Pin;
+    /// use std::task::{Context, Poll};
+    ///
+    /// use pin_project_lite::pin_project;
+    /// use sync_wrapper::SyncWrapper;
+    ///
+    /// pin_project! {
+    ///     struct FutureWrapper<F> {
+    ///         #[pin]
+    ///         inner: SyncWrapper<F>,
+    ///     }
+    /// }
+    ///
+    /// impl<F: Future> Future for FutureWrapper<F> {
+    ///     type Output = F::Output;
+    ///
+    ///     fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+    ///         self.project().inner.get_pin_mut().poll(cx)
+    ///     }
+    /// }
+    /// ```
+    pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut T> {
+        unsafe { Pin::map_unchecked_mut(self, |this| &mut this.0) }
+    }
+
+    /// Consumes this mutex, returning the underlying data.
+    ///
+    /// This is safe because it requires ownership of the mutex, therefore this method will neither
+    /// panic nor does it return an error. This is in contrast to [`Mutex::into_inner`] which
+    /// returns an error if another thread panicked while holding the lock. It is not recommended
+    /// to send an exclusive reference to a potentially damaged value to another thread for further
+    /// processing.
+    ///
+    /// [`Mutex::into_inner`]: https://doc.rust-lang.org/std/sync/struct.Mutex.html#method.into_inner
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use sync_wrapper::SyncWrapper;
+    ///
+    /// let mut mutex = SyncWrapper::new(42);
+    /// assert_eq!(mutex.into_inner(), 42);
+    /// ```
+    pub fn into_inner(self) -> T {
+        self.0
+    }
+}
+
+// this is safe because the only operations permitted on this data structure require exclusive
+// access or ownership
+unsafe impl<T> Sync for SyncWrapper<T> {}
+
+impl<T> Debug for SyncWrapper<T> {
+    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
+        f.pad("SyncWrapper")
+    }
+}
+
+impl<T: Default> Default for SyncWrapper<T> {
+    fn default() -> Self {
+        Self::new(T::default())
+    }
+}
+
+impl<T> From<T> for SyncWrapper<T> {
+    fn from(value: T) -> Self {
+        Self::new(value)
+    }
+}
+
+/// `Future` which is `Sync`.
+///
+/// # Examples
+///
+/// ```
+/// use sync_wrapper::{SyncWrapper, SyncFuture};
+///
+/// let fut = async { 1 };
+/// let fut = SyncFuture::new(fut);
+/// ```
+pub struct SyncFuture<F> {
+    inner: SyncWrapper<F>
+}
+impl <F: Future> SyncFuture<F> {
+    pub fn new(inner: F) -> Self {
+        Self { inner: SyncWrapper::new(inner) }
+    }
+    pub fn into_inner(self) -> F {
+        self.inner.into_inner()
+    }
+}
+impl <F: Future> Future for SyncFuture<F> {
+    type Output = F::Output;
+    fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+        let inner = unsafe { self.map_unchecked_mut(|x| x.inner.get_mut()) };
+        inner.poll(cx)
+    }
+}
+
+/// `Stream` which is `Sync`.
+///
+/// # Examples
+///
+/// ```
+/// use sync_wrapper::SyncStream;
+/// use futures::stream;
+///
+/// let st = stream::iter(vec![1]);
+/// let st = SyncStream::new(st);
+/// ```
+#[cfg(feature = "futures")]
+pub struct SyncStream<S> {
+    inner: SyncWrapper<S>
+}
+#[cfg(feature = "futures")]
+impl <S: futures_core::Stream> SyncStream<S> {
+    pub fn new(inner: S) -> Self {
+        Self { inner: SyncWrapper::new(inner) }
+    }
+    pub fn into_inner(self) -> S {
+        self.inner.into_inner()
+    }
+}
+#[cfg(feature = "futures")]
+impl <S: futures_core::Stream> futures_core::Stream for SyncStream<S> {
+    type Item = S::Item;
+    fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+        let inner = unsafe { self.map_unchecked_mut(|x| x.inner.get_mut()) };
+        inner.poll_next(cx)
+    }
+}
+
diff --git a/crates/synstructure/.cargo-checksum.json b/crates/synstructure/.cargo-checksum.json
new file mode 100644
index 0000000..bb02a4c
--- /dev/null
+++ b/crates/synstructure/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.toml":"b9f3ce31a9ac80dc24f4afac7108b7cff44399f8d7503dc4addea4d256431a73","LICENSE":"219920e865eee70b7dcfc948a86b099e7f4fe2de01bcca2ca9a20c0a033f2b59","README.md":"a528e7356db49ea813c3290dd4f6b15d8e6c0a870cfc07a2df0f3d1381c575bf","src/lib.rs":"b8d9885399b22a5ee92b51e4ca757427da07775e940b25e1573180d9e8faf7d0","src/macros.rs":"e7cf1808faf5dac5ca25bd40ad99e95c2aab4f9899bd9327898761ea86271f7c"},"package":"f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f"}
\ No newline at end of file
diff --git a/crates/synstructure/Android.bp b/crates/synstructure/Android.bp
new file mode 100644
index 0000000..0ab7526
--- /dev/null
+++ b/crates/synstructure/Android.bp
@@ -0,0 +1,32 @@
+// This file is generated by cargo_embargo.
+// Do not modify this file because the changes will be overridden on upgrade.
+
+package {
+    default_applicable_licenses: ["external_rust_crates_synstructure_license"],
+    default_team: "trendy_team_android_rust",
+}
+
+license {
+    name: "external_rust_crates_synstructure_license",
+    visibility: [":__subpackages__"],
+    license_kinds: ["SPDX-license-identifier-MIT"],
+    license_text: ["LICENSE"],
+}
+
+rust_library_host {
+    name: "libsynstructure",
+    host_cross_supported: false,
+    crate_name: "synstructure",
+    cargo_env_compat: true,
+    cargo_pkg_version: "0.12.6",
+    crate_root: "src/lib.rs",
+    edition: "2018",
+    features: ["proc-macro"],
+    rustlibs: [
+        "libproc_macro2",
+        "libquote",
+        "libsyn",
+        "libunicode_xid",
+    ],
+    compile_multilib: "first",
+}
diff --git a/crates/synstructure/Cargo.lock b/crates/synstructure/Cargo.lock
new file mode 100644
index 0000000..9ed42bd
--- /dev/null
+++ b/crates/synstructure/Cargo.lock
@@ -0,0 +1,61 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.86"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77"
+dependencies = [
+ "unicode-ident",
+]
+
+[[package]]
+name = "quote"
+version = "1.0.37"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af"
+dependencies = [
+ "proc-macro2",
+]
+
+[[package]]
+name = "syn"
+version = "1.0.109"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
+]
+
+[[package]]
+name = "synstructure"
+version = "0.12.6"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+ "synstructure_test_traits",
+ "unicode-xid",
+]
+
+[[package]]
+name = "synstructure_test_traits"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c6e0c1828b7994d45bbef3cbbe56a6157c42db92b82b1a5bf22fb09996d06eac"
+
+[[package]]
+name = "unicode-ident"
+version = "1.0.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
+
+[[package]]
+name = "unicode-xid"
+version = "0.2.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "229730647fbc343e3a80e463c1db7f78f3855d3f3739bee0dda773c9a037c90a"
diff --git a/crates/synstructure/Cargo.toml b/crates/synstructure/Cargo.toml
new file mode 100644
index 0000000..0339856
--- /dev/null
+++ b/crates/synstructure/Cargo.toml
@@ -0,0 +1,44 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2018"
+name = "synstructure"
+version = "0.12.6"
+authors = ["Nika Layzell <nika@thelayzells.com>"]
+include = ["src/**/*", "Cargo.toml", "README.md", "LICENSE"]
+description = "Helper methods and macros for custom derives"
+documentation = "https://docs.rs/synstructure"
+readme = "README.md"
+keywords = ["syn", "macros", "derive", "expand_substructure", "enum"]
+license = "MIT"
+repository = "https://github.com/mystor/synstructure"
+[dependencies.proc-macro2]
+version = "1"
+default-features = false
+
+[dependencies.quote]
+version = "1"
+default-features = false
+
+[dependencies.syn]
+version = "1"
+features = ["derive", "parsing", "printing", "clone-impls", "visit", "extra-traits"]
+default-features = false
+
+[dependencies.unicode-xid]
+version = "0.2"
+[dev-dependencies.synstructure_test_traits]
+version = "0.1"
+
+[features]
+default = ["proc-macro"]
+proc-macro = ["proc-macro2/proc-macro", "syn/proc-macro", "quote/proc-macro"]
diff --git a/crates/synstructure/LICENSE b/crates/synstructure/LICENSE
new file mode 100644
index 0000000..f78f1c1
--- /dev/null
+++ b/crates/synstructure/LICENSE
@@ -0,0 +1,7 @@
+Copyright 2016 Nika Layzell
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/crates/synstructure/METADATA b/crates/synstructure/METADATA
new file mode 100644
index 0000000..760b301
--- /dev/null
+++ b/crates/synstructure/METADATA
@@ -0,0 +1,19 @@
+name: "synstructure"
+description: "Helper methods and macros for custom derives"
+third_party {
+  url {
+    type: HOMEPAGE
+    value: "https://crates.io/crates/synstructure"
+  }
+  url {
+    type: ARCHIVE
+    value: "https://static.crates.io/crates/synstructure/synstructure-0.12.6.crate"
+  }
+  version: "0.12.6"
+  license_type: NOTICE
+  last_upgrade_date {
+    year: 2022
+    month: 9
+    day: 6
+  }
+}
diff --git a/crates/synstructure/MODULE_LICENSE_MIT b/crates/synstructure/MODULE_LICENSE_MIT
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/crates/synstructure/MODULE_LICENSE_MIT
diff --git a/crates/synstructure/README.md b/crates/synstructure/README.md
new file mode 100644
index 0000000..b72b7bd
--- /dev/null
+++ b/crates/synstructure/README.md
@@ -0,0 +1,159 @@
+# synstructure
+
+[![Latest Version](https://img.shields.io/crates/v/synstructure.svg)](https://crates.io/crates/synstructure)
+[![Documentation](https://docs.rs/synstructure/badge.svg)](https://docs.rs/synstructure)
+[![Build Status](https://travis-ci.org/mystor/synstructure.svg?branch=master)](https://travis-ci.org/mystor/synstructure)
+[![Rustc Version 1.31+](https://img.shields.io/badge/rustc-1.31+-lightgray.svg)](https://blog.rust-lang.org/2018/12/06/Rust-1.31-and-rust-2018.html)
+
+> NOTE: What follows is an exerpt from the module level documentation. For full
+> details read the docs on [docs.rs](https://docs.rs/synstructure/)
+
+This crate provides helper types for matching against enum variants, and
+extracting bindings to each of the fields in the deriving Struct or Enum in
+a generic way.
+
+If you are writing a `#[derive]` which needs to perform some operation on
+every field, then you have come to the right place!
+
+# Example: `WalkFields`
+### Trait Implementation
+```rust
+pub trait WalkFields: std::any::Any {
+    fn walk_fields(&self, walk: &mut FnMut(&WalkFields));
+}
+impl WalkFields for i32 {
+    fn walk_fields(&self, _walk: &mut FnMut(&WalkFields)) {}
+}
+```
+
+### Custom Derive
+```rust
+#[macro_use]
+extern crate synstructure;
+#[macro_use]
+extern crate quote;
+extern crate proc_macro2;
+
+fn walkfields_derive(s: synstructure::Structure) -> proc_macro2::TokenStream {
+    let body = s.each(|bi| quote!{
+        walk(#bi)
+    });
+
+    s.bound_impl(quote!(example_traits::WalkFields), quote!{
+        fn walk_fields(&self, walk: &mut FnMut(&example_traits::WalkFields)) {
+            match *self { #body }
+        }
+    })
+}
+decl_derive!([WalkFields] => walkfields_derive);
+
+/*
+ * Test Case
+ */
+fn main() {
+    test_derive! {
+        walkfields_derive {
+            enum A<T> {
+                B(i32, T),
+                C(i32),
+            }
+        }
+        expands to {
+            #[allow(non_upper_case_globals)]
+            const _DERIVE_example_traits_WalkFields_FOR_A: () = {
+                extern crate example_traits;
+                impl<T> example_traits::WalkFields for A<T>
+                    where T: example_traits::WalkFields
+                {
+                    fn walk_fields(&self, walk: &mut FnMut(&example_traits::WalkFields)) {
+                        match *self {
+                            A::B(ref __binding_0, ref __binding_1,) => {
+                                { walk(__binding_0) }
+                                { walk(__binding_1) }
+                            }
+                            A::C(ref __binding_0,) => {
+                                { walk(__binding_0) }
+                            }
+                        }
+                    }
+                }
+            };
+        }
+    }
+}
+```
+
+# Example: `Interest`
+### Trait Implementation
+```rust
+pub trait Interest {
+    fn interesting(&self) -> bool;
+}
+impl Interest for i32 {
+    fn interesting(&self) -> bool { *self > 0 }
+}
+```
+
+### Custom Derive
+```rust
+#[macro_use]
+extern crate synstructure;
+#[macro_use]
+extern crate quote;
+extern crate proc_macro2;
+
+fn interest_derive(mut s: synstructure::Structure) -> proc_macro2::TokenStream {
+    let body = s.fold(false, |acc, bi| quote!{
+        #acc || example_traits::Interest::interesting(#bi)
+    });
+
+    s.bound_impl(quote!(example_traits::Interest), quote!{
+        fn interesting(&self) -> bool {
+            match *self {
+                #body
+            }
+        }
+    })
+}
+decl_derive!([Interest] => interest_derive);
+
+/*
+ * Test Case
+ */
+fn main() {
+    test_derive!{
+        interest_derive {
+            enum A<T> {
+                B(i32, T),
+                C(i32),
+            }
+        }
+        expands to {
+            #[allow(non_upper_case_globals)]
+            const _DERIVE_example_traits_Interest_FOR_A: () = {
+                extern crate example_traits;
+                impl<T> example_traits::Interest for A<T>
+                    where T: example_traits::Interest
+                {
+                    fn interesting(&self) -> bool {
+                        match *self {
+                            A::B(ref __binding_0, ref __binding_1,) => {
+                                false ||
+                                    example_traits::Interest::interesting(__binding_0) ||
+                                    example_traits::Interest::interesting(__binding_1)
+                            }
+                            A::C(ref __binding_0,) => {
+                                false ||
+                                    example_traits::Interest::interesting(__binding_0)
+                            }
+                        }
+                    }
+                }
+            };
+        }
+    }
+}
+```
+
+For more example usage, consider investigating the `abomonation_derive` crate,
+which makes use of this crate, and is fairly simple.
diff --git a/crates/synstructure/TEST_MAPPING b/crates/synstructure/TEST_MAPPING
new file mode 100644
index 0000000..5f54cfa
--- /dev/null
+++ b/crates/synstructure/TEST_MAPPING
@@ -0,0 +1,17 @@
+// Generated by update_crate_tests.py for tests that depend on this crate.
+{
+  "imports": [
+    {
+      "path": "external/rust/crates/zeroize"
+    },
+    {
+      "path": "external/uwb/src"
+    },
+    {
+      "path": "system/keymint/derive"
+    },
+    {
+      "path": "system/keymint/hal"
+    }
+  ]
+}
diff --git a/crates/synstructure/cargo_embargo.json b/crates/synstructure/cargo_embargo.json
new file mode 100644
index 0000000..e672222
--- /dev/null
+++ b/crates/synstructure/cargo_embargo.json
@@ -0,0 +1,12 @@
+{
+  "features": [
+    "proc-macro"
+  ],
+  "package": {
+    "synstructure": {
+      "device_supported": false,
+      "host_first_multilib": true
+    }
+  },
+  "run_cargo": false
+}
diff --git a/crates/synstructure/src/lib.rs b/crates/synstructure/src/lib.rs
new file mode 100644
index 0000000..2cd2b88
--- /dev/null
+++ b/crates/synstructure/src/lib.rs
@@ -0,0 +1,2488 @@
+//! This crate provides helper types for matching against enum variants, and
+//! extracting bindings to each of the fields in the deriving Struct or Enum in
+//! a generic way.
+//!
+//! If you are writing a `#[derive]` which needs to perform some operation on
+//! every field, then you have come to the right place!
+//!
+//! # Example: `WalkFields`
+//! ### Trait Implementation
+//! ```
+//! pub trait WalkFields: std::any::Any {
+//!     fn walk_fields(&self, walk: &mut FnMut(&WalkFields));
+//! }
+//! impl WalkFields for i32 {
+//!     fn walk_fields(&self, _walk: &mut FnMut(&WalkFields)) {}
+//! }
+//! ```
+//!
+//! ### Custom Derive
+//! ```
+//! # use quote::quote;
+//! fn walkfields_derive(s: synstructure::Structure) -> proc_macro2::TokenStream {
+//!     let body = s.each(|bi| quote!{
+//!         walk(#bi)
+//!     });
+//!
+//!     s.gen_impl(quote! {
+//!         extern crate synstructure_test_traits;
+//!
+//!         gen impl synstructure_test_traits::WalkFields for @Self {
+//!             fn walk_fields(&self, walk: &mut FnMut(&synstructure_test_traits::WalkFields)) {
+//!                 match *self { #body }
+//!             }
+//!         }
+//!     })
+//! }
+//! # const _IGNORE: &'static str = stringify!(
+//! synstructure::decl_derive!([WalkFields] => walkfields_derive);
+//! # );
+//!
+//! /*
+//!  * Test Case
+//!  */
+//! fn main() {
+//!     synstructure::test_derive! {
+//!         walkfields_derive {
+//!             enum A<T> {
+//!                 B(i32, T),
+//!                 C(i32),
+//!             }
+//!         }
+//!         expands to {
+//!             #[allow(non_upper_case_globals)]
+//!             const _DERIVE_synstructure_test_traits_WalkFields_FOR_A: () = {
+//!                 extern crate synstructure_test_traits;
+//!                 impl<T> synstructure_test_traits::WalkFields for A<T>
+//!                     where T: synstructure_test_traits::WalkFields
+//!                 {
+//!                     fn walk_fields(&self, walk: &mut FnMut(&synstructure_test_traits::WalkFields)) {
+//!                         match *self {
+//!                             A::B(ref __binding_0, ref __binding_1,) => {
+//!                                 { walk(__binding_0) }
+//!                                 { walk(__binding_1) }
+//!                             }
+//!                             A::C(ref __binding_0,) => {
+//!                                 { walk(__binding_0) }
+//!                             }
+//!                         }
+//!                     }
+//!                 }
+//!             };
+//!         }
+//!     }
+//! }
+//! ```
+//!
+//! # Example: `Interest`
+//! ### Trait Implementation
+//! ```
+//! pub trait Interest {
+//!     fn interesting(&self) -> bool;
+//! }
+//! impl Interest for i32 {
+//!     fn interesting(&self) -> bool { *self > 0 }
+//! }
+//! ```
+//!
+//! ### Custom Derive
+//! ```
+//! # use quote::quote;
+//! fn interest_derive(mut s: synstructure::Structure) -> proc_macro2::TokenStream {
+//!     let body = s.fold(false, |acc, bi| quote!{
+//!         #acc || synstructure_test_traits::Interest::interesting(#bi)
+//!     });
+//!
+//!     s.gen_impl(quote! {
+//!         extern crate synstructure_test_traits;
+//!         gen impl synstructure_test_traits::Interest for @Self {
+//!             fn interesting(&self) -> bool {
+//!                 match *self {
+//!                     #body
+//!                 }
+//!             }
+//!         }
+//!     })
+//! }
+//! # const _IGNORE: &'static str = stringify!(
+//! synstructure::decl_derive!([Interest] => interest_derive);
+//! # );
+//!
+//! /*
+//!  * Test Case
+//!  */
+//! fn main() {
+//!     synstructure::test_derive!{
+//!         interest_derive {
+//!             enum A<T> {
+//!                 B(i32, T),
+//!                 C(i32),
+//!             }
+//!         }
+//!         expands to {
+//!             #[allow(non_upper_case_globals)]
+//!             const _DERIVE_synstructure_test_traits_Interest_FOR_A: () = {
+//!                 extern crate synstructure_test_traits;
+//!                 impl<T> synstructure_test_traits::Interest for A<T>
+//!                     where T: synstructure_test_traits::Interest
+//!                 {
+//!                     fn interesting(&self) -> bool {
+//!                         match *self {
+//!                             A::B(ref __binding_0, ref __binding_1,) => {
+//!                                 false ||
+//!                                     synstructure_test_traits::Interest::interesting(__binding_0) ||
+//!                                     synstructure_test_traits::Interest::interesting(__binding_1)
+//!                             }
+//!                             A::C(ref __binding_0,) => {
+//!                                 false ||
+//!                                     synstructure_test_traits::Interest::interesting(__binding_0)
+//!                             }
+//!                         }
+//!                     }
+//!                 }
+//!             };
+//!         }
+//!     }
+//! }
+//! ```
+//!
+//! For more example usage, consider investigating the `abomonation_derive` crate,
+//! which makes use of this crate, and is fairly simple.
+
+#[cfg(all(
+    not(all(target_arch = "wasm32", any(target_os = "unknown", target_os = "wasi"))),
+    feature = "proc-macro"
+))]
+extern crate proc_macro;
+
+use std::collections::HashSet;
+
+use syn::parse::{ParseStream, Parser};
+use syn::visit::{self, Visit};
+use syn::{
+    braced, punctuated, token, Attribute, Data, DeriveInput, Error, Expr, Field, Fields,
+    FieldsNamed, FieldsUnnamed, GenericParam, Generics, Ident, PredicateType, Result, Token,
+    TraitBound, Type, TypeMacro, TypeParamBound, TypePath, WhereClause, WherePredicate,
+};
+
+use quote::{format_ident, quote_spanned, ToTokens};
+// re-export the quote! macro so we can depend on it being around in our macro's
+// implementations.
+#[doc(hidden)]
+pub use quote::quote;
+
+use unicode_xid::UnicodeXID;
+
+use proc_macro2::{Span, TokenStream, TokenTree};
+
+// NOTE: This module has documentation hidden, as it only exports macros (which
+// always appear in the root of the crate) and helper methods / re-exports used
+// in the implementation of those macros.
+#[doc(hidden)]
+pub mod macros;
+
+/// Changes how bounds are added
+#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
+pub enum AddBounds {
+    /// Add for fields and generics
+    Both,
+    /// Fields only
+    Fields,
+    /// Generics only
+    Generics,
+    /// None
+    None,
+    #[doc(hidden)]
+    __Nonexhaustive,
+}
+
+/// The type of binding to use when generating a pattern.
+#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
+pub enum BindStyle {
+    /// `x`
+    Move,
+    /// `mut x`
+    MoveMut,
+    /// `ref x`
+    Ref,
+    /// `ref mut x`
+    RefMut,
+}
+
+impl ToTokens for BindStyle {
+    fn to_tokens(&self, tokens: &mut TokenStream) {
+        match self {
+            BindStyle::Move => {}
+            BindStyle::MoveMut => quote_spanned!(Span::call_site() => mut).to_tokens(tokens),
+            BindStyle::Ref => quote_spanned!(Span::call_site() => ref).to_tokens(tokens),
+            BindStyle::RefMut => quote_spanned!(Span::call_site() => ref mut).to_tokens(tokens),
+        }
+    }
+}
+
+// Internal method for merging seen_generics arrays together.
+fn generics_fuse(res: &mut Vec<bool>, new: &[bool]) {
+    for (i, &flag) in new.iter().enumerate() {
+        if i == res.len() {
+            res.push(false);
+        }
+        if flag {
+            res[i] = true;
+        }
+    }
+}
+
+// Internal method for extracting the set of generics which have been matched.
+fn fetch_generics<'a>(set: &[bool], generics: &'a Generics) -> Vec<&'a Ident> {
+    let mut tys = vec![];
+    for (&seen, param) in set.iter().zip(generics.params.iter()) {
+        if seen {
+            if let GenericParam::Type(tparam) = param {
+                tys.push(&tparam.ident)
+            }
+        }
+    }
+    tys
+}
+
+// Internal method for sanitizing an identifier for hygiene purposes.
+fn sanitize_ident(s: &str) -> Ident {
+    let mut res = String::with_capacity(s.len());
+    for mut c in s.chars() {
+        if !UnicodeXID::is_xid_continue(c) {
+            c = '_'
+        }
+        // Deduplicate consecutive _ characters.
+        if res.ends_with('_') && c == '_' {
+            continue;
+        }
+        res.push(c);
+    }
+    Ident::new(&res, Span::call_site())
+}
+
+// Internal method to merge two Generics objects together intelligently.
+fn merge_generics(into: &mut Generics, from: &Generics) -> Result<()> {
+    // Try to add the param into `into`, and merge parmas with identical names.
+    for p in &from.params {
+        for op in &into.params {
+            match (op, p) {
+                (GenericParam::Type(otp), GenericParam::Type(tp)) => {
+                    // NOTE: This is only OK because syn ignores the span for equality purposes.
+                    if otp.ident == tp.ident {
+                        return Err(Error::new_spanned(
+                            p,
+                            format!(
+                                "Attempted to merge conflicting generic parameters: {} and {}",
+                                quote!(#op),
+                                quote!(#p)
+                            ),
+                        ));
+                    }
+                }
+                (GenericParam::Lifetime(olp), GenericParam::Lifetime(lp)) => {
+                    // NOTE: This is only OK because syn ignores the span for equality purposes.
+                    if olp.lifetime == lp.lifetime {
+                        return Err(Error::new_spanned(
+                            p,
+                            format!(
+                                "Attempted to merge conflicting generic parameters: {} and {}",
+                                quote!(#op),
+                                quote!(#p)
+                            ),
+                        ));
+                    }
+                }
+                // We don't support merging Const parameters, because that wouldn't make much sense.
+                _ => (),
+            }
+        }
+        into.params.push(p.clone());
+    }
+
+    // Add any where clauses from the input generics object.
+    if let Some(from_clause) = &from.where_clause {
+        into.make_where_clause()
+            .predicates
+            .extend(from_clause.predicates.iter().cloned());
+    }
+
+    Ok(())
+}
+
+/// Helper method which does the same thing as rustc 1.20's
+/// `Option::get_or_insert_with`. This method is used to keep backwards
+/// compatibility with rustc 1.15.
+fn get_or_insert_with<T, F>(opt: &mut Option<T>, f: F) -> &mut T
+where
+    F: FnOnce() -> T,
+{
+    if opt.is_none() {
+        *opt = Some(f());
+    }
+
+    match opt {
+        Some(v) => v,
+        None => unreachable!(),
+    }
+}
+
+/// Information about a specific binding. This contains both an `Ident`
+/// reference to the given field, and the syn `&'a Field` descriptor for that
+/// field.
+///
+/// This type supports `quote::ToTokens`, so can be directly used within the
+/// `quote!` macro. It expands to a reference to the matched field.
+#[derive(Debug, Clone, PartialEq, Eq, Hash)]
+pub struct BindingInfo<'a> {
+    /// The name which this BindingInfo will bind to.
+    pub binding: Ident,
+
+    /// The type of binding which this BindingInfo will create.
+    pub style: BindStyle,
+
+    field: &'a Field,
+
+    // These are used to determine which type parameters are avaliable.
+    generics: &'a Generics,
+    seen_generics: Vec<bool>,
+    // The original index of the binding
+    // this will not change when .filter() is called
+    index: usize,
+}
+
+impl<'a> ToTokens for BindingInfo<'a> {
+    fn to_tokens(&self, tokens: &mut TokenStream) {
+        self.binding.to_tokens(tokens);
+    }
+}
+
+impl<'a> BindingInfo<'a> {
+    /// Returns a reference to the underlying `syn` AST node which this
+    /// `BindingInfo` references
+    pub fn ast(&self) -> &'a Field {
+        self.field
+    }
+
+    /// Generates the pattern fragment for this field binding.
+    ///
+    /// # Example
+    /// ```
+    /// # use synstructure::*;
+    /// let di: syn::DeriveInput = syn::parse_quote! {
+    ///     enum A {
+    ///         B{ a: i32, b: i32 },
+    ///         C(u32),
+    ///     }
+    /// };
+    /// let s = Structure::new(&di);
+    ///
+    /// assert_eq!(
+    ///     s.variants()[0].bindings()[0].pat().to_string(),
+    ///     quote! {
+    ///         ref __binding_0
+    ///     }.to_string()
+    /// );
+    /// ```
+    pub fn pat(&self) -> TokenStream {
+        let BindingInfo { binding, style, .. } = self;
+        quote!(#style #binding)
+    }
+
+    /// Returns a list of the type parameters which are referenced in this
+    /// field's type.
+    ///
+    /// # Caveat
+    ///
+    /// If the field contains any macros in type position, all parameters will
+    /// be considered bound. This is because we cannot determine which type
+    /// parameters are bound by type macros.
+    ///
+    /// # Example
+    /// ```
+    /// # use synstructure::*;
+    /// let di: syn::DeriveInput = syn::parse_quote! {
+    ///     struct A<T, U> {
+    ///         a: Option<T>,
+    ///         b: U,
+    ///     }
+    /// };
+    /// let mut s = Structure::new(&di);
+    ///
+    /// assert_eq!(
+    ///     s.variants()[0].bindings()[0].referenced_ty_params(),
+    ///     &[&quote::format_ident!("T")]
+    /// );
+    /// ```
+    pub fn referenced_ty_params(&self) -> Vec<&'a Ident> {
+        fetch_generics(&self.seen_generics, self.generics)
+    }
+}
+
+/// This type is similar to `syn`'s `Variant` type, however each of the fields
+/// are references rather than owned. When this is used as the AST for a real
+/// variant, this struct simply borrows the fields of the `syn::Variant`,
+/// however this type may also be used as the sole variant for a struct.
+#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
+pub struct VariantAst<'a> {
+    pub attrs: &'a [Attribute],
+    pub ident: &'a Ident,
+    pub fields: &'a Fields,
+    pub discriminant: &'a Option<(token::Eq, Expr)>,
+}
+
+/// A wrapper around a `syn::DeriveInput`'s variant which provides utilities
+/// for destructuring `Variant`s with `match` expressions.
+#[derive(Debug, Clone, PartialEq, Eq, Hash)]
+pub struct VariantInfo<'a> {
+    pub prefix: Option<&'a Ident>,
+    bindings: Vec<BindingInfo<'a>>,
+    ast: VariantAst<'a>,
+    generics: &'a Generics,
+    // The original length of `bindings` before any `.filter()` calls
+    original_length: usize,
+}
+
+/// Helper function used by the VariantInfo constructor. Walks all of the types
+/// in `field` and returns a list of the type parameters from `ty_params` which
+/// are referenced in the field.
+fn get_ty_params(field: &Field, generics: &Generics) -> Vec<bool> {
+    // Helper type. Discovers all identifiers inside of the visited type,
+    // and calls a callback with them.
+    struct BoundTypeLocator<'a> {
+        result: Vec<bool>,
+        generics: &'a Generics,
+    }
+
+    impl<'a> Visit<'a> for BoundTypeLocator<'a> {
+        // XXX: This also (intentionally) captures paths like T::SomeType. Is
+        // this desirable?
+        fn visit_ident(&mut self, id: &Ident) {
+            for (idx, i) in self.generics.params.iter().enumerate() {
+                if let GenericParam::Type(tparam) = i {
+                    if tparam.ident == *id {
+                        self.result[idx] = true;
+                    }
+                }
+            }
+        }
+
+        fn visit_type_macro(&mut self, x: &'a TypeMacro) {
+            // If we see a type_mac declaration, then we can't know what type parameters
+            // it might be binding, so we presume it binds all of them.
+            for r in &mut self.result {
+                *r = true;
+            }
+            visit::visit_type_macro(self, x)
+        }
+    }
+
+    let mut btl = BoundTypeLocator {
+        result: vec![false; generics.params.len()],
+        generics,
+    };
+
+    btl.visit_type(&field.ty);
+
+    btl.result
+}
+
+impl<'a> VariantInfo<'a> {
+    fn new(ast: VariantAst<'a>, prefix: Option<&'a Ident>, generics: &'a Generics) -> Self {
+        let bindings = match ast.fields {
+            Fields::Unit => vec![],
+            Fields::Unnamed(FieldsUnnamed {
+                unnamed: fields, ..
+            })
+            | Fields::Named(FieldsNamed { named: fields, .. }) => {
+                fields
+                    .into_iter()
+                    .enumerate()
+                    .map(|(i, field)| {
+                        BindingInfo {
+                            // XXX: This has to be call_site to avoid privacy
+                            // when deriving on private fields.
+                            binding: format_ident!("__binding_{}", i),
+                            style: BindStyle::Ref,
+                            field,
+                            generics,
+                            seen_generics: get_ty_params(field, generics),
+                            index: i,
+                        }
+                    })
+                    .collect::<Vec<_>>()
+            }
+        };
+
+        let original_length = bindings.len();
+        VariantInfo {
+            prefix,
+            bindings,
+            ast,
+            generics,
+            original_length,
+        }
+    }
+
+    /// Returns a slice of the bindings in this Variant.
+    pub fn bindings(&self) -> &[BindingInfo<'a>] {
+        &self.bindings
+    }
+
+    /// Returns a mut slice of the bindings in this Variant.
+    pub fn bindings_mut(&mut self) -> &mut [BindingInfo<'a>] {
+        &mut self.bindings
+    }
+
+    /// Returns a `VariantAst` object which contains references to the
+    /// underlying `syn` AST node which this `Variant` was created from.
+    pub fn ast(&self) -> VariantAst<'a> {
+        self.ast
+    }
+
+    /// True if any bindings were omitted due to a `filter` call.
+    pub fn omitted_bindings(&self) -> bool {
+        self.original_length != self.bindings.len()
+    }
+
+    /// Generates the match-arm pattern which could be used to match against this Variant.
+    ///
+    /// # Example
+    /// ```
+    /// # use synstructure::*;
+    /// let di: syn::DeriveInput = syn::parse_quote! {
+    ///     enum A {
+    ///         B(i32, i32),
+    ///         C(u32),
+    ///     }
+    /// };
+    /// let s = Structure::new(&di);
+    ///
+    /// assert_eq!(
+    ///     s.variants()[0].pat().to_string(),
+    ///     quote!{
+    ///         A::B(ref __binding_0, ref __binding_1,)
+    ///     }.to_string()
+    /// );
+    /// ```
+    pub fn pat(&self) -> TokenStream {
+        let mut t = TokenStream::new();
+        if let Some(prefix) = self.prefix {
+            prefix.to_tokens(&mut t);
+            quote!(::).to_tokens(&mut t);
+        }
+        self.ast.ident.to_tokens(&mut t);
+        match self.ast.fields {
+            Fields::Unit => {
+                assert!(self.bindings.is_empty());
+            }
+            Fields::Unnamed(..) => token::Paren(Span::call_site()).surround(&mut t, |t| {
+                let mut expected_index = 0;
+                for binding in &self.bindings {
+                    while expected_index < binding.index {
+                        quote!(_,).to_tokens(t);
+                        expected_index += 1;
+                    }
+                    binding.pat().to_tokens(t);
+                    quote!(,).to_tokens(t);
+                    expected_index += 1;
+                }
+                if expected_index != self.original_length {
+                    quote!(..).to_tokens(t);
+                }
+            }),
+            Fields::Named(..) => token::Brace(Span::call_site()).surround(&mut t, |t| {
+                for binding in &self.bindings {
+                    binding.field.ident.to_tokens(t);
+                    quote!(:).to_tokens(t);
+                    binding.pat().to_tokens(t);
+                    quote!(,).to_tokens(t);
+                }
+                if self.omitted_bindings() {
+                    quote!(..).to_tokens(t);
+                }
+            }),
+        }
+        t
+    }
+
+    /// Generates the token stream required to construct the current variant.
+    ///
+    /// The init array initializes each of the fields in the order they are
+    /// written in `variant.ast().fields`.
+    ///
+    /// # Example
+    /// ```
+    /// # use synstructure::*;
+    /// let di: syn::DeriveInput = syn::parse_quote! {
+    ///     enum A {
+    ///         B(usize, usize),
+    ///         C{ v: usize },
+    ///     }
+    /// };
+    /// let s = Structure::new(&di);
+    ///
+    /// assert_eq!(
+    ///     s.variants()[0].construct(|_, i| quote!(#i)).to_string(),
+    ///
+    ///     quote!{
+    ///         A::B(0usize, 1usize,)
+    ///     }.to_string()
+    /// );
+    ///
+    /// assert_eq!(
+    ///     s.variants()[1].construct(|_, i| quote!(#i)).to_string(),
+    ///
+    ///     quote!{
+    ///         A::C{ v: 0usize, }
+    ///     }.to_string()
+    /// );
+    /// ```
+    pub fn construct<F, T>(&self, mut func: F) -> TokenStream
+    where
+        F: FnMut(&Field, usize) -> T,
+        T: ToTokens,
+    {
+        let mut t = TokenStream::new();
+        if let Some(prefix) = self.prefix {
+            quote!(#prefix ::).to_tokens(&mut t);
+        }
+        self.ast.ident.to_tokens(&mut t);
+
+        match &self.ast.fields {
+            Fields::Unit => (),
+            Fields::Unnamed(FieldsUnnamed { unnamed, .. }) => {
+                token::Paren::default().surround(&mut t, |t| {
+                    for (i, field) in unnamed.into_iter().enumerate() {
+                        func(field, i).to_tokens(t);
+                        quote!(,).to_tokens(t);
+                    }
+                })
+            }
+            Fields::Named(FieldsNamed { named, .. }) => {
+                token::Brace::default().surround(&mut t, |t| {
+                    for (i, field) in named.into_iter().enumerate() {
+                        field.ident.to_tokens(t);
+                        quote!(:).to_tokens(t);
+                        func(field, i).to_tokens(t);
+                        quote!(,).to_tokens(t);
+                    }
+                })
+            }
+        }
+        t
+    }
+
+    /// Runs the passed-in function once for each bound field, passing in a `BindingInfo`.
+    /// and generating a `match` arm which evaluates the returned tokens.
+    ///
+    /// This method will ignore fields which are ignored through the `filter`
+    /// method.
+    ///
+    /// # Example
+    /// ```
+    /// # use synstructure::*;
+    /// let di: syn::DeriveInput = syn::parse_quote! {
+    ///     enum A {
+    ///         B(i32, i32),
+    ///         C(u32),
+    ///     }
+    /// };
+    /// let s = Structure::new(&di);
+    ///
+    /// assert_eq!(
+    ///     s.variants()[0].each(|bi| quote!(println!("{:?}", #bi))).to_string(),
+    ///
+    ///     quote!{
+    ///         A::B(ref __binding_0, ref __binding_1,) => {
+    ///             { println!("{:?}", __binding_0) }
+    ///             { println!("{:?}", __binding_1) }
+    ///         }
+    ///     }.to_string()
+    /// );
+    /// ```
+    pub fn each<F, R>(&self, mut f: F) -> TokenStream
+    where
+        F: FnMut(&BindingInfo<'_>) -> R,
+        R: ToTokens,
+    {
+        let pat = self.pat();
+        let mut body = TokenStream::new();
+        for binding in &self.bindings {
+            token::Brace::default().surround(&mut body, |body| {
+                f(binding).to_tokens(body);
+            });
+        }
+        quote!(#pat => { #body })
+    }
+
+    /// Runs the passed-in function once for each bound field, passing in the
+    /// result of the previous call, and a `BindingInfo`. generating a `match`
+    /// arm which evaluates to the resulting tokens.
+    ///
+    /// This method will ignore fields which are ignored through the `filter`
+    /// method.
+    ///
+    /// # Example
+    /// ```
+    /// # use synstructure::*;
+    /// let di: syn::DeriveInput = syn::parse_quote! {
+    ///     enum A {
+    ///         B(i32, i32),
+    ///         C(u32),
+    ///     }
+    /// };
+    /// let s = Structure::new(&di);
+    ///
+    /// assert_eq!(
+    ///     s.variants()[0].fold(quote!(0), |acc, bi| quote!(#acc + #bi)).to_string(),
+    ///
+    ///     quote!{
+    ///         A::B(ref __binding_0, ref __binding_1,) => {
+    ///             0 + __binding_0 + __binding_1
+    ///         }
+    ///     }.to_string()
+    /// );
+    /// ```
+    pub fn fold<F, I, R>(&self, init: I, mut f: F) -> TokenStream
+    where
+        F: FnMut(TokenStream, &BindingInfo<'_>) -> R,
+        I: ToTokens,
+        R: ToTokens,
+    {
+        let pat = self.pat();
+        let body = self.bindings.iter().fold(quote!(#init), |i, bi| {
+            let r = f(i, bi);
+            quote!(#r)
+        });
+        quote!(#pat => { #body })
+    }
+
+    /// Filter the bindings created by this `Variant` object. This has 2 effects:
+    ///
+    /// * The bindings will no longer appear in match arms generated by methods
+    ///   on this `Variant` or its subobjects.
+    ///
+    /// * Impl blocks created with the `bound_impl` or `unsafe_bound_impl`
+    ///   method only consider type parameters referenced in the types of
+    ///   non-filtered fields.
+    ///
+    /// # Example
+    /// ```
+    /// # use synstructure::*;
+    /// let di: syn::DeriveInput = syn::parse_quote! {
+    ///     enum A {
+    ///         B{ a: i32, b: i32 },
+    ///         C{ a: u32 },
+    ///     }
+    /// };
+    /// let mut s = Structure::new(&di);
+    ///
+    /// s.variants_mut()[0].filter(|bi| {
+    ///     bi.ast().ident == Some(quote::format_ident!("b"))
+    /// });
+    ///
+    /// assert_eq!(
+    ///     s.each(|bi| quote!(println!("{:?}", #bi))).to_string(),
+    ///
+    ///     quote!{
+    ///         A::B{ b: ref __binding_1, .. } => {
+    ///             { println!("{:?}", __binding_1) }
+    ///         }
+    ///         A::C{ a: ref __binding_0, } => {
+    ///             { println!("{:?}", __binding_0) }
+    ///         }
+    ///     }.to_string()
+    /// );
+    /// ```
+    pub fn filter<F>(&mut self, f: F) -> &mut Self
+    where
+        F: FnMut(&BindingInfo<'_>) -> bool,
+    {
+        self.bindings.retain(f);
+        self
+    }
+
+    /// Remove the binding at the given index.
+    ///
+    /// # Panics
+    ///
+    /// Panics if the index is out of range.
+    pub fn remove_binding(&mut self, idx: usize) -> &mut Self {
+        self.bindings.remove(idx);
+        self
+    }
+
+    /// Updates the `BindStyle` for each of the passed-in fields by calling the
+    /// passed-in function for each `BindingInfo`.
+    ///
+    /// # Example
+    /// ```
+    /// # use synstructure::*;
+    /// let di: syn::DeriveInput = syn::parse_quote! {
+    ///     enum A {
+    ///         B(i32, i32),
+    ///         C(u32),
+    ///     }
+    /// };
+    /// let mut s = Structure::new(&di);
+    ///
+    /// s.variants_mut()[0].bind_with(|bi| BindStyle::RefMut);
+    ///
+    /// assert_eq!(
+    ///     s.each(|bi| quote!(println!("{:?}", #bi))).to_string(),
+    ///
+    ///     quote!{
+    ///         A::B(ref mut __binding_0, ref mut __binding_1,) => {
+    ///             { println!("{:?}", __binding_0) }
+    ///             { println!("{:?}", __binding_1) }
+    ///         }
+    ///         A::C(ref __binding_0,) => {
+    ///             { println!("{:?}", __binding_0) }
+    ///         }
+    ///     }.to_string()
+    /// );
+    /// ```
+    pub fn bind_with<F>(&mut self, mut f: F) -> &mut Self
+    where
+        F: FnMut(&BindingInfo<'_>) -> BindStyle,
+    {
+        for binding in &mut self.bindings {
+            binding.style = f(&binding);
+        }
+        self
+    }
+
+    /// Updates the binding name for each fo the passed-in fields by calling the
+    /// passed-in function for each `BindingInfo`.
+    ///
+    /// The function will be called with the `BindingInfo` and its index in the
+    /// enclosing variant.
+    ///
+    /// The default name is `__binding_{}` where `{}` is replaced with an
+    /// increasing number.
+    ///
+    /// # Example
+    /// ```
+    /// # use synstructure::*;
+    /// let di: syn::DeriveInput = syn::parse_quote! {
+    ///     enum A {
+    ///         B{ a: i32, b: i32 },
+    ///         C{ a: u32 },
+    ///     }
+    /// };
+    /// let mut s = Structure::new(&di);
+    ///
+    /// s.variants_mut()[0].binding_name(|bi, i| bi.ident.clone().unwrap());
+    ///
+    /// assert_eq!(
+    ///     s.each(|bi| quote!(println!("{:?}", #bi))).to_string(),
+    ///
+    ///     quote!{
+    ///         A::B{ a: ref a, b: ref b, } => {
+    ///             { println!("{:?}", a) }
+    ///             { println!("{:?}", b) }
+    ///         }
+    ///         A::C{ a: ref __binding_0, } => {
+    ///             { println!("{:?}", __binding_0) }
+    ///         }
+    ///     }.to_string()
+    /// );
+    /// ```
+    pub fn binding_name<F>(&mut self, mut f: F) -> &mut Self
+    where
+        F: FnMut(&Field, usize) -> Ident,
+    {
+        for (it, binding) in self.bindings.iter_mut().enumerate() {
+            binding.binding = f(binding.field, it);
+        }
+        self
+    }
+
+    /// Returns a list of the type parameters which are referenced in this
+    /// field's type.
+    ///
+    /// # Caveat
+    ///
+    /// If the field contains any macros in type position, all parameters will
+    /// be considered bound. This is because we cannot determine which type
+    /// parameters are bound by type macros.
+    ///
+    /// # Example
+    /// ```
+    /// # use synstructure::*;
+    /// let di: syn::DeriveInput = syn::parse_quote! {
+    ///     struct A<T, U> {
+    ///         a: Option<T>,
+    ///         b: U,
+    ///     }
+    /// };
+    /// let mut s = Structure::new(&di);
+    ///
+    /// assert_eq!(
+    ///     s.variants()[0].bindings()[0].referenced_ty_params(),
+    ///     &[&quote::format_ident!("T")]
+    /// );
+    /// ```
+    pub fn referenced_ty_params(&self) -> Vec<&'a Ident> {
+        let mut flags = Vec::new();
+        for binding in &self.bindings {
+            generics_fuse(&mut flags, &binding.seen_generics);
+        }
+        fetch_generics(&flags, self.generics)
+    }
+}
+
+/// A wrapper around a `syn::DeriveInput` which provides utilities for creating
+/// custom derive trait implementations.
+#[derive(Debug, Clone, PartialEq, Eq, Hash)]
+pub struct Structure<'a> {
+    variants: Vec<VariantInfo<'a>>,
+    omitted_variants: bool,
+    underscore_const: bool,
+    ast: &'a DeriveInput,
+    extra_impl: Vec<GenericParam>,
+    extra_predicates: Vec<WherePredicate>,
+    add_bounds: AddBounds,
+}
+
+impl<'a> Structure<'a> {
+    /// Create a new `Structure` with the variants and fields from the passed-in
+    /// `DeriveInput`.
+    ///
+    /// # Panics
+    ///
+    /// This method will panic if the provided AST node represents an untagged
+    /// union.
+    pub fn new(ast: &'a DeriveInput) -> Self {
+        Self::try_new(ast).expect("Unable to create synstructure::Structure")
+    }
+
+    /// Create a new `Structure` with the variants and fields from the passed-in
+    /// `DeriveInput`.
+    ///
+    /// Unlike `Structure::new`, this method does not panic if the provided AST
+    /// node represents an untagged union.
+    pub fn try_new(ast: &'a DeriveInput) -> Result<Self> {
+        let variants = match &ast.data {
+            Data::Enum(data) => (&data.variants)
+                .into_iter()
+                .map(|v| {
+                    VariantInfo::new(
+                        VariantAst {
+                            attrs: &v.attrs,
+                            ident: &v.ident,
+                            fields: &v.fields,
+                            discriminant: &v.discriminant,
+                        },
+                        Some(&ast.ident),
+                        &ast.generics,
+                    )
+                })
+                .collect::<Vec<_>>(),
+            Data::Struct(data) => {
+                // SAFETY NOTE: Normally putting an `Expr` in static storage
+                // wouldn't be safe, because it could contain `Term` objects
+                // which use thread-local interning. However, this static always
+                // contains the value `None`. Thus, it will never contain any
+                // unsafe values.
+                struct UnsafeMakeSync(Option<(token::Eq, Expr)>);
+                unsafe impl Sync for UnsafeMakeSync {}
+                static NONE_DISCRIMINANT: UnsafeMakeSync = UnsafeMakeSync(None);
+
+                vec![VariantInfo::new(
+                    VariantAst {
+                        attrs: &ast.attrs,
+                        ident: &ast.ident,
+                        fields: &data.fields,
+                        discriminant: &NONE_DISCRIMINANT.0,
+                    },
+                    None,
+                    &ast.generics,
+                )]
+            }
+            Data::Union(_) => {
+                return Err(Error::new_spanned(
+                    ast,
+                    "unexpected unsupported untagged union",
+                ));
+            }
+        };
+
+        Ok(Structure {
+            variants,
+            omitted_variants: false,
+            underscore_const: false,
+            ast,
+            extra_impl: vec![],
+            extra_predicates: vec![],
+            add_bounds: AddBounds::Both,
+        })
+    }
+
+    /// Returns a slice of the variants in this Structure.
+    pub fn variants(&self) -> &[VariantInfo<'a>] {
+        &self.variants
+    }
+
+    /// Returns a mut slice of the variants in this Structure.
+    pub fn variants_mut(&mut self) -> &mut [VariantInfo<'a>] {
+        &mut self.variants
+    }
+
+    /// Returns a reference to the underlying `syn` AST node which this
+    /// `Structure` was created from.
+    pub fn ast(&self) -> &'a DeriveInput {
+        self.ast
+    }
+
+    /// True if any variants were omitted due to a `filter_variants` call.
+    pub fn omitted_variants(&self) -> bool {
+        self.omitted_variants
+    }
+
+    /// Runs the passed-in function once for each bound field, passing in a `BindingInfo`.
+    /// and generating `match` arms which evaluate the returned tokens.
+    ///
+    /// This method will ignore variants or fields which are ignored through the
+    /// `filter` and `filter_variant` methods.
+    ///
+    /// # Example
+    /// ```
+    /// # use synstructure::*;
+    /// let di: syn::DeriveInput = syn::parse_quote! {
+    ///     enum A {
+    ///         B(i32, i32),
+    ///         C(u32),
+    ///     }
+    /// };
+    /// let s = Structure::new(&di);
+    ///
+    /// assert_eq!(
+    ///     s.each(|bi| quote!(println!("{:?}", #bi))).to_string(),
+    ///
+    ///     quote!{
+    ///         A::B(ref __binding_0, ref __binding_1,) => {
+    ///             { println!("{:?}", __binding_0) }
+    ///             { println!("{:?}", __binding_1) }
+    ///         }
+    ///         A::C(ref __binding_0,) => {
+    ///             { println!("{:?}", __binding_0) }
+    ///         }
+    ///     }.to_string()
+    /// );
+    /// ```
+    pub fn each<F, R>(&self, mut f: F) -> TokenStream
+    where
+        F: FnMut(&BindingInfo<'_>) -> R,
+        R: ToTokens,
+    {
+        let mut t = TokenStream::new();
+        for variant in &self.variants {
+            variant.each(&mut f).to_tokens(&mut t);
+        }
+        if self.omitted_variants {
+            quote!(_ => {}).to_tokens(&mut t);
+        }
+        t
+    }
+
+    /// Runs the passed-in function once for each bound field, passing in the
+    /// result of the previous call, and a `BindingInfo`. generating `match`
+    /// arms which evaluate to the resulting tokens.
+    ///
+    /// This method will ignore variants or fields which are ignored through the
+    /// `filter` and `filter_variant` methods.
+    ///
+    /// If a variant has been ignored, it will return the `init` value.
+    ///
+    /// # Example
+    /// ```
+    /// # use synstructure::*;
+    /// let di: syn::DeriveInput = syn::parse_quote! {
+    ///     enum A {
+    ///         B(i32, i32),
+    ///         C(u32),
+    ///     }
+    /// };
+    /// let s = Structure::new(&di);
+    ///
+    /// assert_eq!(
+    ///     s.fold(quote!(0), |acc, bi| quote!(#acc + #bi)).to_string(),
+    ///
+    ///     quote!{
+    ///         A::B(ref __binding_0, ref __binding_1,) => {
+    ///             0 + __binding_0 + __binding_1
+    ///         }
+    ///         A::C(ref __binding_0,) => {
+    ///             0 + __binding_0
+    ///         }
+    ///     }.to_string()
+    /// );
+    /// ```
+    pub fn fold<F, I, R>(&self, init: I, mut f: F) -> TokenStream
+    where
+        F: FnMut(TokenStream, &BindingInfo<'_>) -> R,
+        I: ToTokens,
+        R: ToTokens,
+    {
+        let mut t = TokenStream::new();
+        for variant in &self.variants {
+            variant.fold(&init, &mut f).to_tokens(&mut t);
+        }
+        if self.omitted_variants {
+            quote!(_ => { #init }).to_tokens(&mut t);
+        }
+        t
+    }
+
+    /// Runs the passed-in function once for each variant, passing in a
+    /// `VariantInfo`. and generating `match` arms which evaluate the returned
+    /// tokens.
+    ///
+    /// This method will ignore variants and not bind fields which are ignored
+    /// through the `filter` and `filter_variant` methods.
+    ///
+    /// # Example
+    /// ```
+    /// # use synstructure::*;
+    /// let di: syn::DeriveInput = syn::parse_quote! {
+    ///     enum A {
+    ///         B(i32, i32),
+    ///         C(u32),
+    ///     }
+    /// };
+    /// let s = Structure::new(&di);
+    ///
+    /// assert_eq!(
+    ///     s.each_variant(|v| {
+    ///         let name = &v.ast().ident;
+    ///         quote!(println!(stringify!(#name)))
+    ///     }).to_string(),
+    ///
+    ///     quote!{
+    ///         A::B(ref __binding_0, ref __binding_1,) => {
+    ///             println!(stringify!(B))
+    ///         }
+    ///         A::C(ref __binding_0,) => {
+    ///             println!(stringify!(C))
+    ///         }
+    ///     }.to_string()
+    /// );
+    /// ```
+    pub fn each_variant<F, R>(&self, mut f: F) -> TokenStream
+    where
+        F: FnMut(&VariantInfo<'_>) -> R,
+        R: ToTokens,
+    {
+        let mut t = TokenStream::new();
+        for variant in &self.variants {
+            let pat = variant.pat();
+            let body = f(variant);
+            quote!(#pat => { #body }).to_tokens(&mut t);
+        }
+        if self.omitted_variants {
+            quote!(_ => {}).to_tokens(&mut t);
+        }
+        t
+    }
+
+    /// Filter the bindings created by this `Structure` object. This has 2 effects:
+    ///
+    /// * The bindings will no longer appear in match arms generated by methods
+    ///   on this `Structure` or its subobjects.
+    ///
+    /// * Impl blocks created with the `bound_impl` or `unsafe_bound_impl`
+    ///   method only consider type parameters referenced in the types of
+    ///   non-filtered fields.
+    ///
+    /// # Example
+    /// ```
+    /// # use synstructure::*;
+    /// let di: syn::DeriveInput = syn::parse_quote! {
+    ///     enum A {
+    ///         B{ a: i32, b: i32 },
+    ///         C{ a: u32 },
+    ///     }
+    /// };
+    /// let mut s = Structure::new(&di);
+    ///
+    /// s.filter(|bi| {
+    ///     bi.ast().ident == Some(quote::format_ident!("a"))
+    /// });
+    ///
+    /// assert_eq!(
+    ///     s.each(|bi| quote!(println!("{:?}", #bi))).to_string(),
+    ///
+    ///     quote!{
+    ///         A::B{ a: ref __binding_0, .. } => {
+    ///             { println!("{:?}", __binding_0) }
+    ///         }
+    ///         A::C{ a: ref __binding_0, } => {
+    ///             { println!("{:?}", __binding_0) }
+    ///         }
+    ///     }.to_string()
+    /// );
+    /// ```
+    pub fn filter<F>(&mut self, mut f: F) -> &mut Self
+    where
+        F: FnMut(&BindingInfo<'_>) -> bool,
+    {
+        for variant in &mut self.variants {
+            variant.filter(&mut f);
+        }
+        self
+    }
+
+    /// Specify additional where predicate bounds which should be generated by
+    /// impl-generating functions such as `gen_impl`, `bound_impl`, and
+    /// `unsafe_bound_impl`.
+    ///
+    /// # Example
+    /// ```
+    /// # use synstructure::*;
+    /// let di: syn::DeriveInput = syn::parse_quote! {
+    ///     enum A<T, U> {
+    ///         B(T),
+    ///         C(Option<U>),
+    ///     }
+    /// };
+    /// let mut s = Structure::new(&di);
+    ///
+    /// // Add an additional where predicate.
+    /// s.add_where_predicate(syn::parse_quote!(T: std::fmt::Display));
+    ///
+    /// assert_eq!(
+    ///     s.bound_impl(quote!(krate::Trait), quote!{
+    ///         fn a() {}
+    ///     }).to_string(),
+    ///     quote!{
+    ///         #[allow(non_upper_case_globals)]
+    ///         #[doc(hidden)]
+    ///         const _DERIVE_krate_Trait_FOR_A: () = {
+    ///             extern crate krate;
+    ///             impl<T, U> krate::Trait for A<T, U>
+    ///                 where T: std::fmt::Display,
+    ///                       T: krate::Trait,
+    ///                       Option<U>: krate::Trait,
+    ///                       U: krate::Trait
+    ///             {
+    ///                 fn a() {}
+    ///             }
+    ///         };
+    ///     }.to_string()
+    /// );
+    /// ```
+    pub fn add_where_predicate(&mut self, pred: WherePredicate) -> &mut Self {
+        self.extra_predicates.push(pred);
+        self
+    }
+
+    /// Specify which bounds should be generated by impl-generating functions
+    /// such as `gen_impl`, `bound_impl`, and `unsafe_bound_impl`.
+    ///
+    /// The default behaviour is to generate both field and generic bounds from
+    /// type parameters.
+    ///
+    /// # Example
+    /// ```
+    /// # use synstructure::*;
+    /// let di: syn::DeriveInput = syn::parse_quote! {
+    ///     enum A<T, U> {
+    ///         B(T),
+    ///         C(Option<U>),
+    ///     }
+    /// };
+    /// let mut s = Structure::new(&di);
+    ///
+    /// // Limit bounds to only generics.
+    /// s.add_bounds(AddBounds::Generics);
+    ///
+    /// assert_eq!(
+    ///     s.bound_impl(quote!(krate::Trait), quote!{
+    ///         fn a() {}
+    ///     }).to_string(),
+    ///     quote!{
+    ///         #[allow(non_upper_case_globals)]
+    ///         #[doc(hidden)]
+    ///         const _DERIVE_krate_Trait_FOR_A: () = {
+    ///             extern crate krate;
+    ///             impl<T, U> krate::Trait for A<T, U>
+    ///                 where T: krate::Trait,
+    ///                       U: krate::Trait
+    ///             {
+    ///                 fn a() {}
+    ///             }
+    ///         };
+    ///     }.to_string()
+    /// );
+    /// ```
+    pub fn add_bounds(&mut self, mode: AddBounds) -> &mut Self {
+        self.add_bounds = mode;
+        self
+    }
+
+    /// Filter the variants matched by this `Structure` object. This has 2 effects:
+    ///
+    /// * Match arms destructuring these variants will no longer be generated by
+    ///   methods on this `Structure`
+    ///
+    /// * Impl blocks created with the `bound_impl` or `unsafe_bound_impl`
+    ///   method only consider type parameters referenced in the types of
+    ///   fields in non-fitered variants.
+    ///
+    /// # Example
+    /// ```
+    /// # use synstructure::*;
+    /// let di: syn::DeriveInput = syn::parse_quote! {
+    ///     enum A {
+    ///         B(i32, i32),
+    ///         C(u32),
+    ///     }
+    /// };
+    ///
+    /// let mut s = Structure::new(&di);
+    ///
+    /// s.filter_variants(|v| v.ast().ident != "B");
+    ///
+    /// assert_eq!(
+    ///     s.each(|bi| quote!(println!("{:?}", #bi))).to_string(),
+    ///
+    ///     quote!{
+    ///         A::C(ref __binding_0,) => {
+    ///             { println!("{:?}", __binding_0) }
+    ///         }
+    ///         _ => {}
+    ///     }.to_string()
+    /// );
+    /// ```
+    pub fn filter_variants<F>(&mut self, f: F) -> &mut Self
+    where
+        F: FnMut(&VariantInfo<'_>) -> bool,
+    {
+        let before_len = self.variants.len();
+        self.variants.retain(f);
+        if self.variants.len() != before_len {
+            self.omitted_variants = true;
+        }
+        self
+    }
+
+    /// Remove the variant at the given index.
+    ///
+    /// # Panics
+    ///
+    /// Panics if the index is out of range.
+    pub fn remove_variant(&mut self, idx: usize) -> &mut Self {
+        self.variants.remove(idx);
+        self.omitted_variants = true;
+        self
+    }
+
+    /// Updates the `BindStyle` for each of the passed-in fields by calling the
+    /// passed-in function for each `BindingInfo`.
+    ///
+    /// # Example
+    /// ```
+    /// # use synstructure::*;
+    /// let di: syn::DeriveInput = syn::parse_quote! {
+    ///     enum A {
+    ///         B(i32, i32),
+    ///         C(u32),
+    ///     }
+    /// };
+    /// let mut s = Structure::new(&di);
+    ///
+    /// s.bind_with(|bi| BindStyle::RefMut);
+    ///
+    /// assert_eq!(
+    ///     s.each(|bi| quote!(println!("{:?}", #bi))).to_string(),
+    ///
+    ///     quote!{
+    ///         A::B(ref mut __binding_0, ref mut __binding_1,) => {
+    ///             { println!("{:?}", __binding_0) }
+    ///             { println!("{:?}", __binding_1) }
+    ///         }
+    ///         A::C(ref mut __binding_0,) => {
+    ///             { println!("{:?}", __binding_0) }
+    ///         }
+    ///     }.to_string()
+    /// );
+    /// ```
+    pub fn bind_with<F>(&mut self, mut f: F) -> &mut Self
+    where
+        F: FnMut(&BindingInfo<'_>) -> BindStyle,
+    {
+        for variant in &mut self.variants {
+            variant.bind_with(&mut f);
+        }
+        self
+    }
+
+    /// Updates the binding name for each fo the passed-in fields by calling the
+    /// passed-in function for each `BindingInfo`.
+    ///
+    /// The function will be called with the `BindingInfo` and its index in the
+    /// enclosing variant.
+    ///
+    /// The default name is `__binding_{}` where `{}` is replaced with an
+    /// increasing number.
+    ///
+    /// # Example
+    /// ```
+    /// # use synstructure::*;
+    /// let di: syn::DeriveInput = syn::parse_quote! {
+    ///     enum A {
+    ///         B{ a: i32, b: i32 },
+    ///         C{ a: u32 },
+    ///     }
+    /// };
+    /// let mut s = Structure::new(&di);
+    ///
+    /// s.binding_name(|bi, i| bi.ident.clone().unwrap());
+    ///
+    /// assert_eq!(
+    ///     s.each(|bi| quote!(println!("{:?}", #bi))).to_string(),
+    ///
+    ///     quote!{
+    ///         A::B{ a: ref a, b: ref b, } => {
+    ///             { println!("{:?}", a) }
+    ///             { println!("{:?}", b) }
+    ///         }
+    ///         A::C{ a: ref a, } => {
+    ///             { println!("{:?}", a) }
+    ///         }
+    ///     }.to_string()
+    /// );
+    /// ```
+    pub fn binding_name<F>(&mut self, mut f: F) -> &mut Self
+    where
+        F: FnMut(&Field, usize) -> Ident,
+    {
+        for variant in &mut self.variants {
+            variant.binding_name(&mut f);
+        }
+        self
+    }
+
+    /// Returns a list of the type parameters which are refrenced in the types
+    /// of non-filtered fields / variants.
+    ///
+    /// # Caveat
+    ///
+    /// If the struct contains any macros in type position, all parameters will
+    /// be considered bound. This is because we cannot determine which type
+    /// parameters are bound by type macros.
+    ///
+    /// # Example
+    /// ```
+    /// # use synstructure::*;
+    /// let di: syn::DeriveInput = syn::parse_quote! {
+    ///     enum A<T, U> {
+    ///         B(T, i32),
+    ///         C(Option<U>),
+    ///     }
+    /// };
+    /// let mut s = Structure::new(&di);
+    ///
+    /// s.filter_variants(|v| v.ast().ident != "C");
+    ///
+    /// assert_eq!(
+    ///     s.referenced_ty_params(),
+    ///     &[&quote::format_ident!("T")]
+    /// );
+    /// ```
+    pub fn referenced_ty_params(&self) -> Vec<&'a Ident> {
+        let mut flags = Vec::new();
+        for variant in &self.variants {
+            for binding in &variant.bindings {
+                generics_fuse(&mut flags, &binding.seen_generics);
+            }
+        }
+        fetch_generics(&flags, &self.ast.generics)
+    }
+
+    /// Adds an `impl<>` generic parameter.
+    /// This can be used when the trait to be derived needs some extra generic parameters.
+    ///
+    /// # Example
+    /// ```
+    /// # use synstructure::*;
+    /// let di: syn::DeriveInput = syn::parse_quote! {
+    ///     enum A<T, U> {
+    ///         B(T),
+    ///         C(Option<U>),
+    ///     }
+    /// };
+    /// let mut s = Structure::new(&di);
+    /// let generic: syn::GenericParam = syn::parse_quote!(X: krate::AnotherTrait);
+    ///
+    /// assert_eq!(
+    ///     s.add_impl_generic(generic)
+    ///         .bound_impl(quote!(krate::Trait<X>),
+    ///         quote!{
+    ///                 fn a() {}
+    ///         }
+    ///     ).to_string(),
+    ///     quote!{
+    ///         #[allow(non_upper_case_globals)]
+    ///         #[doc(hidden)]
+    ///         const _DERIVE_krate_Trait_X_FOR_A: () = {
+    ///             extern crate krate;
+    ///             impl<T, U, X: krate::AnotherTrait> krate::Trait<X> for A<T, U>
+    ///                 where T : krate :: Trait < X >,
+    ///                       Option<U>: krate::Trait<X>,
+    ///                       U: krate::Trait<X>
+    ///             {
+    ///                 fn a() {}
+    ///             }
+    ///         };
+    ///     }.to_string()
+    /// );
+    /// ```
+    pub fn add_impl_generic(&mut self, param: GenericParam) -> &mut Self {
+        self.extra_impl.push(param);
+        self
+    }
+
+    /// Add trait bounds for a trait with the given path for each type parmaeter
+    /// referenced in the types of non-filtered fields.
+    ///
+    /// # Caveat
+    ///
+    /// If the method contains any macros in type position, all parameters will
+    /// be considered bound. This is because we cannot determine which type
+    /// parameters are bound by type macros.
+    pub fn add_trait_bounds(
+        &self,
+        bound: &TraitBound,
+        where_clause: &mut Option<WhereClause>,
+        mode: AddBounds,
+    ) {
+        // If we have any explicit where predicates, make sure to add them first.
+        if !self.extra_predicates.is_empty() {
+            let clause = get_or_insert_with(&mut *where_clause, || WhereClause {
+                where_token: Default::default(),
+                predicates: punctuated::Punctuated::new(),
+            });
+            clause
+                .predicates
+                .extend(self.extra_predicates.iter().cloned());
+        }
+
+        let mut seen = HashSet::new();
+        let mut pred = |ty: Type| {
+            if !seen.contains(&ty) {
+                seen.insert(ty.clone());
+
+                // Add a predicate.
+                let clause = get_or_insert_with(&mut *where_clause, || WhereClause {
+                    where_token: Default::default(),
+                    predicates: punctuated::Punctuated::new(),
+                });
+                clause.predicates.push(WherePredicate::Type(PredicateType {
+                    lifetimes: None,
+                    bounded_ty: ty,
+                    colon_token: Default::default(),
+                    bounds: Some(punctuated::Pair::End(TypeParamBound::Trait(bound.clone())))
+                        .into_iter()
+                        .collect(),
+                }));
+            }
+        };
+
+        for variant in &self.variants {
+            for binding in &variant.bindings {
+                match mode {
+                    AddBounds::Both | AddBounds::Fields => {
+                        for &seen in &binding.seen_generics {
+                            if seen {
+                                pred(binding.ast().ty.clone());
+                                break;
+                            }
+                        }
+                    }
+                    _ => {}
+                }
+
+                match mode {
+                    AddBounds::Both | AddBounds::Generics => {
+                        for param in binding.referenced_ty_params() {
+                            pred(Type::Path(TypePath {
+                                qself: None,
+                                path: (*param).clone().into(),
+                            }));
+                        }
+                    }
+                    _ => {}
+                }
+            }
+        }
+    }
+
+    /// Configure whether to use `const _` instead of a generated const name in
+    /// code generated by `gen_impl` and `bound_impl`.
+    ///
+    /// This syntax is only supported by rust 1.37, and later versions.
+    ///
+    /// Defaults to `false` for backwards compatibility reasons.
+    ///
+    /// # Example
+    ///
+    /// ```
+    /// # use synstructure::*;
+    /// let di: syn::DeriveInput = syn::parse_quote! {
+    ///     struct MyStruct;
+    /// };
+    /// let mut s = Structure::new(&di);
+    ///
+    /// assert_eq!(
+    ///     s.underscore_const(true)
+    ///         .gen_impl(quote! { gen impl Trait for @Self { } })
+    ///         .to_string(),
+    ///     quote! {
+    ///         const _: () = {
+    ///             impl Trait for MyStruct { }
+    ///         };
+    ///     }
+    ///     .to_string()
+    /// );
+    ///
+    /// assert_eq!(
+    ///     s.underscore_const(false)
+    ///         .gen_impl(quote! { gen impl Trait for @Self { } })
+    ///         .to_string(),
+    ///     quote! {
+    ///         #[allow(non_upper_case_globals)]
+    ///         const _DERIVE_Trait_FOR_MyStruct: () = {
+    ///             impl Trait for MyStruct { }
+    ///         };
+    ///     }
+    ///     .to_string()
+    /// );
+    /// ```
+    pub fn underscore_const(&mut self, enabled: bool) -> &mut Self {
+        self.underscore_const = enabled;
+        self
+    }
+
+    /// > NOTE: This methods' features are superceded by `Structure::gen_impl`.
+    ///
+    /// Creates an `impl` block with the required generic type fields filled in
+    /// to implement the trait `path`.
+    ///
+    /// This method also adds where clauses to the impl requiring that all
+    /// referenced type parmaeters implement the trait `path`.
+    ///
+    /// # Hygiene and Paths
+    ///
+    /// This method wraps the impl block inside of a `const` (see the example
+    /// below). In this scope, the first segment of the passed-in path is
+    /// `extern crate`-ed in. If you don't want to generate that `extern crate`
+    /// item, use a global path.
+    ///
+    /// This means that if you are implementing `my_crate::Trait`, you simply
+    /// write `s.bound_impl(quote!(my_crate::Trait), quote!(...))`, and for the
+    /// entirety of the definition, you can refer to your crate as `my_crate`.
+    ///
+    /// # Caveat
+    ///
+    /// If the method contains any macros in type position, all parameters will
+    /// be considered bound. This is because we cannot determine which type
+    /// parameters are bound by type macros.
+    ///
+    /// # Panics
+    ///
+    /// Panics if the path string parameter is not a valid `TraitBound`.
+    ///
+    /// # Example
+    /// ```
+    /// # use synstructure::*;
+    /// let di: syn::DeriveInput = syn::parse_quote! {
+    ///     enum A<T, U> {
+    ///         B(T),
+    ///         C(Option<U>),
+    ///     }
+    /// };
+    /// let mut s = Structure::new(&di);
+    ///
+    /// s.filter_variants(|v| v.ast().ident != "B");
+    ///
+    /// assert_eq!(
+    ///     s.bound_impl(quote!(krate::Trait), quote!{
+    ///         fn a() {}
+    ///     }).to_string(),
+    ///     quote!{
+    ///         #[allow(non_upper_case_globals)]
+    ///         #[doc(hidden)]
+    ///         const _DERIVE_krate_Trait_FOR_A: () = {
+    ///             extern crate krate;
+    ///             impl<T, U> krate::Trait for A<T, U>
+    ///                 where Option<U>: krate::Trait,
+    ///                       U: krate::Trait
+    ///             {
+    ///                 fn a() {}
+    ///             }
+    ///         };
+    ///     }.to_string()
+    /// );
+    /// ```
+    pub fn bound_impl<P: ToTokens, B: ToTokens>(&self, path: P, body: B) -> TokenStream {
+        self.impl_internal(
+            path.into_token_stream(),
+            body.into_token_stream(),
+            quote!(),
+            None,
+        )
+    }
+
+    /// > NOTE: This methods' features are superceded by `Structure::gen_impl`.
+    ///
+    /// Creates an `impl` block with the required generic type fields filled in
+    /// to implement the unsafe trait `path`.
+    ///
+    /// This method also adds where clauses to the impl requiring that all
+    /// referenced type parmaeters implement the trait `path`.
+    ///
+    /// # Hygiene and Paths
+    ///
+    /// This method wraps the impl block inside of a `const` (see the example
+    /// below). In this scope, the first segment of the passed-in path is
+    /// `extern crate`-ed in. If you don't want to generate that `extern crate`
+    /// item, use a global path.
+    ///
+    /// This means that if you are implementing `my_crate::Trait`, you simply
+    /// write `s.bound_impl(quote!(my_crate::Trait), quote!(...))`, and for the
+    /// entirety of the definition, you can refer to your crate as `my_crate`.
+    ///
+    /// # Caveat
+    ///
+    /// If the method contains any macros in type position, all parameters will
+    /// be considered bound. This is because we cannot determine which type
+    /// parameters are bound by type macros.
+    ///
+    /// # Panics
+    ///
+    /// Panics if the path string parameter is not a valid `TraitBound`.
+    ///
+    /// # Example
+    /// ```
+    /// # use synstructure::*;
+    /// let di: syn::DeriveInput = syn::parse_quote! {
+    ///     enum A<T, U> {
+    ///         B(T),
+    ///         C(Option<U>),
+    ///     }
+    /// };
+    /// let mut s = Structure::new(&di);
+    ///
+    /// s.filter_variants(|v| v.ast().ident != "B");
+    ///
+    /// assert_eq!(
+    ///     s.unsafe_bound_impl(quote!(krate::Trait), quote!{
+    ///         fn a() {}
+    ///     }).to_string(),
+    ///     quote!{
+    ///         #[allow(non_upper_case_globals)]
+    ///         #[doc(hidden)]
+    ///         const _DERIVE_krate_Trait_FOR_A: () = {
+    ///             extern crate krate;
+    ///             unsafe impl<T, U> krate::Trait for A<T, U>
+    ///                 where Option<U>: krate::Trait,
+    ///                       U: krate::Trait
+    ///             {
+    ///                 fn a() {}
+    ///             }
+    ///         };
+    ///     }.to_string()
+    /// );
+    /// ```
+    pub fn unsafe_bound_impl<P: ToTokens, B: ToTokens>(&self, path: P, body: B) -> TokenStream {
+        self.impl_internal(
+            path.into_token_stream(),
+            body.into_token_stream(),
+            quote!(unsafe),
+            None,
+        )
+    }
+
+    /// > NOTE: This methods' features are superceded by `Structure::gen_impl`.
+    ///
+    /// Creates an `impl` block with the required generic type fields filled in
+    /// to implement the trait `path`.
+    ///
+    /// This method will not add any where clauses to the impl.
+    ///
+    /// # Hygiene and Paths
+    ///
+    /// This method wraps the impl block inside of a `const` (see the example
+    /// below). In this scope, the first segment of the passed-in path is
+    /// `extern crate`-ed in. If you don't want to generate that `extern crate`
+    /// item, use a global path.
+    ///
+    /// This means that if you are implementing `my_crate::Trait`, you simply
+    /// write `s.bound_impl(quote!(my_crate::Trait), quote!(...))`, and for the
+    /// entirety of the definition, you can refer to your crate as `my_crate`.
+    ///
+    /// # Panics
+    ///
+    /// Panics if the path string parameter is not a valid `TraitBound`.
+    ///
+    /// # Example
+    /// ```
+    /// # use synstructure::*;
+    /// let di: syn::DeriveInput = syn::parse_quote! {
+    ///     enum A<T, U> {
+    ///         B(T),
+    ///         C(Option<U>),
+    ///     }
+    /// };
+    /// let mut s = Structure::new(&di);
+    ///
+    /// s.filter_variants(|v| v.ast().ident != "B");
+    ///
+    /// assert_eq!(
+    ///     s.unbound_impl(quote!(krate::Trait), quote!{
+    ///         fn a() {}
+    ///     }).to_string(),
+    ///     quote!{
+    ///         #[allow(non_upper_case_globals)]
+    ///         #[doc(hidden)]
+    ///         const _DERIVE_krate_Trait_FOR_A: () = {
+    ///             extern crate krate;
+    ///             impl<T, U> krate::Trait for A<T, U> {
+    ///                 fn a() {}
+    ///             }
+    ///         };
+    ///     }.to_string()
+    /// );
+    /// ```
+    pub fn unbound_impl<P: ToTokens, B: ToTokens>(&self, path: P, body: B) -> TokenStream {
+        self.impl_internal(
+            path.into_token_stream(),
+            body.into_token_stream(),
+            quote!(),
+            Some(AddBounds::None),
+        )
+    }
+
+    /// > NOTE: This methods' features are superceded by `Structure::gen_impl`.
+    ///
+    /// Creates an `impl` block with the required generic type fields filled in
+    /// to implement the unsafe trait `path`.
+    ///
+    /// This method will not add any where clauses to the impl.
+    ///
+    /// # Hygiene and Paths
+    ///
+    /// This method wraps the impl block inside of a `const` (see the example
+    /// below). In this scope, the first segment of the passed-in path is
+    /// `extern crate`-ed in. If you don't want to generate that `extern crate`
+    /// item, use a global path.
+    ///
+    /// This means that if you are implementing `my_crate::Trait`, you simply
+    /// write `s.bound_impl(quote!(my_crate::Trait), quote!(...))`, and for the
+    /// entirety of the definition, you can refer to your crate as `my_crate`.
+    ///
+    /// # Panics
+    ///
+    /// Panics if the path string parameter is not a valid `TraitBound`.
+    ///
+    /// # Example
+    /// ```
+    /// # use synstructure::*;
+    /// let di: syn::DeriveInput = syn::parse_quote! {
+    ///     enum A<T, U> {
+    ///         B(T),
+    ///         C(Option<U>),
+    ///     }
+    /// };
+    /// let mut s = Structure::new(&di);
+    ///
+    /// s.filter_variants(|v| v.ast().ident != "B");
+    ///
+    /// assert_eq!(
+    ///     s.unsafe_unbound_impl(quote!(krate::Trait), quote!{
+    ///         fn a() {}
+    ///     }).to_string(),
+    ///     quote!{
+    ///         #[allow(non_upper_case_globals)]
+    ///         #[doc(hidden)]
+    ///         const _DERIVE_krate_Trait_FOR_A: () = {
+    ///             extern crate krate;
+    ///             unsafe impl<T, U> krate::Trait for A<T, U> {
+    ///                 fn a() {}
+    ///             }
+    ///         };
+    ///     }.to_string()
+    /// );
+    /// ```
+    #[deprecated]
+    pub fn unsafe_unbound_impl<P: ToTokens, B: ToTokens>(&self, path: P, body: B) -> TokenStream {
+        self.impl_internal(
+            path.into_token_stream(),
+            body.into_token_stream(),
+            quote!(unsafe),
+            Some(AddBounds::None),
+        )
+    }
+
+    fn impl_internal(
+        &self,
+        path: TokenStream,
+        body: TokenStream,
+        safety: TokenStream,
+        mode: Option<AddBounds>,
+    ) -> TokenStream {
+        let mode = mode.unwrap_or(self.add_bounds);
+        let name = &self.ast.ident;
+        let mut gen_clone = self.ast.generics.clone();
+        gen_clone.params.extend(self.extra_impl.clone().into_iter());
+        let (impl_generics, _, _) = gen_clone.split_for_impl();
+        let (_, ty_generics, where_clause) = self.ast.generics.split_for_impl();
+
+        let bound = syn::parse2::<TraitBound>(path)
+            .expect("`path` argument must be a valid rust trait bound");
+
+        let mut where_clause = where_clause.cloned();
+        self.add_trait_bounds(&bound, &mut where_clause, mode);
+
+        // This function is smart. If a global path is passed, no extern crate
+        // statement will be generated, however, a relative path will cause the
+        // crate which it is relative to to be imported within the current
+        // scope.
+        let mut extern_crate = quote!();
+        if bound.path.leading_colon.is_none() {
+            if let Some(seg) = bound.path.segments.first() {
+                let seg = &seg.ident;
+                extern_crate = quote! { extern crate #seg; };
+            }
+        }
+
+        let generated = quote! {
+            #extern_crate
+            #safety impl #impl_generics #bound for #name #ty_generics #where_clause {
+                #body
+            }
+        };
+
+        if self.underscore_const {
+            quote! {
+                const _: () = { #generated };
+            }
+        } else {
+            let dummy_const: Ident = sanitize_ident(&format!(
+                "_DERIVE_{}_FOR_{}",
+                (&bound).into_token_stream(),
+                name.into_token_stream(),
+            ));
+            quote! {
+                #[allow(non_upper_case_globals)]
+                #[doc(hidden)]
+                const #dummy_const: () = {
+                    #generated
+                };
+            }
+        }
+    }
+
+    /// Generate an impl block for the given struct. This impl block will
+    /// automatically use hygiene tricks to avoid polluting the caller's
+    /// namespace, and will automatically add trait bounds for generic type
+    /// parameters.
+    ///
+    /// # Syntax
+    ///
+    /// This function accepts its arguments as a `TokenStream`. The recommended way
+    /// to call this function is passing the result of invoking the `quote!`
+    /// macro to it.
+    ///
+    /// ```ignore
+    /// s.gen_impl(quote! {
+    ///     // You can write any items which you want to import into scope here.
+    ///     // For example, you may want to include an `extern crate` for the
+    ///     // crate which implements your trait. These items will only be
+    ///     // visible to the code you generate, and won't be exposed to the
+    ///     // consuming crate
+    ///     extern crate krate;
+    ///
+    ///     // You can also add `use` statements here to bring types or traits
+    ///     // into scope.
+    ///     //
+    ///     // WARNING: Try not to use common names here, because the stable
+    ///     // version of syn does not support hygiene and you could accidentally
+    ///     // shadow types from the caller crate.
+    ///     use krate::Trait as MyTrait;
+    ///
+    ///     // The actual impl block is a `gen impl` or `gen unsafe impl` block.
+    ///     // You can use `@Self` to refer to the structure's type.
+    ///     gen impl MyTrait for @Self {
+    ///         fn f(&self) { ... }
+    ///     }
+    /// })
+    /// ```
+    ///
+    /// The most common usage of this trait involves loading the crate the
+    /// target trait comes from with `extern crate`, and then invoking a `gen
+    /// impl` block.
+    ///
+    /// # Hygiene
+    ///
+    /// This method tries to handle hygiene intelligenly for both stable and
+    /// unstable proc-macro implementations, however there are visible
+    /// differences.
+    ///
+    /// The output of every `gen_impl` function is wrapped in a dummy `const`
+    /// value, to ensure that it is given its own scope, and any values brought
+    /// into scope are not leaked to the calling crate.
+    ///
+    /// By default, the above invocation may generate an output like the
+    /// following:
+    ///
+    /// ```ignore
+    /// const _DERIVE_krate_Trait_FOR_Struct: () = {
+    ///     extern crate krate;
+    ///     use krate::Trait as MyTrait;
+    ///     impl<T> MyTrait for Struct<T> where T: MyTrait {
+    ///         fn f(&self) { ... }
+    ///     }
+    /// };
+    /// ```
+    ///
+    /// The `Structure` may also be confired with the [`underscore_const`] method
+    /// to generate `const _` instead.
+    ///
+    /// ```ignore
+    /// const _: () = {
+    ///     extern crate krate;
+    ///     use krate::Trait as MyTrait;
+    ///     impl<T> MyTrait for Struct<T> where T: MyTrait {
+    ///         fn f(&self) { ... }
+    ///     }
+    /// };
+    /// ```
+    ///
+    /// ### Using the `std` crate
+    ///
+    /// If you are using `quote!()` to implement your trait, with the
+    /// `proc-macro2/nightly` feature, `std` isn't considered to be in scope for
+    /// your macro. This means that if you use types from `std` in your
+    /// procedural macro, you'll want to explicitly load it with an `extern
+    /// crate std;`.
+    ///
+    /// ### Absolute paths
+    ///
+    /// You should generally avoid using absolute paths in your generated code,
+    /// as they will resolve very differently when using the stable and nightly
+    /// versions of `proc-macro2`. Instead, load the crates you need to use
+    /// explictly with `extern crate` and
+    ///
+    /// # Trait Bounds
+    ///
+    /// This method will automatically add trait bounds for any type parameters
+    /// which are referenced within the types of non-ignored fields.
+    ///
+    /// Additional type parameters may be added with the generics syntax after
+    /// the `impl` keyword.
+    ///
+    /// ### Type Macro Caveat
+    ///
+    /// If the method contains any macros in type position, all parameters will
+    /// be considered bound. This is because we cannot determine which type
+    /// parameters are bound by type macros.
+    ///
+    /// # Errors
+    ///
+    /// This function will generate a `compile_error!` if additional type
+    /// parameters added by `impl<..>` conflict with generic type parameters on
+    /// the original struct.
+    ///
+    /// # Panics
+    ///
+    /// This function will panic if the input `TokenStream` is not well-formed.
+    ///
+    /// # Example Usage
+    ///
+    /// ```
+    /// # use synstructure::*;
+    /// let di: syn::DeriveInput = syn::parse_quote! {
+    ///     enum A<T, U> {
+    ///         B(T),
+    ///         C(Option<U>),
+    ///     }
+    /// };
+    /// let mut s = Structure::new(&di);
+    ///
+    /// s.filter_variants(|v| v.ast().ident != "B");
+    ///
+    /// assert_eq!(
+    ///     s.gen_impl(quote! {
+    ///         extern crate krate;
+    ///         gen impl krate::Trait for @Self {
+    ///             fn a() {}
+    ///         }
+    ///     }).to_string(),
+    ///     quote!{
+    ///         #[allow(non_upper_case_globals)]
+    ///         const _DERIVE_krate_Trait_FOR_A: () = {
+    ///             extern crate krate;
+    ///             impl<T, U> krate::Trait for A<T, U>
+    ///             where
+    ///                 Option<U>: krate::Trait,
+    ///                 U: krate::Trait
+    ///             {
+    ///                 fn a() {}
+    ///             }
+    ///         };
+    ///     }.to_string()
+    /// );
+    ///
+    /// // NOTE: You can also add extra generics after the impl
+    /// assert_eq!(
+    ///     s.gen_impl(quote! {
+    ///         extern crate krate;
+    ///         gen impl<X: krate::OtherTrait> krate::Trait<X> for @Self
+    ///         where
+    ///             X: Send + Sync,
+    ///         {
+    ///             fn a() {}
+    ///         }
+    ///     }).to_string(),
+    ///     quote!{
+    ///         #[allow(non_upper_case_globals)]
+    ///         const _DERIVE_krate_Trait_X_FOR_A: () = {
+    ///             extern crate krate;
+    ///             impl<X: krate::OtherTrait, T, U> krate::Trait<X> for A<T, U>
+    ///             where
+    ///                 X: Send + Sync,
+    ///                 Option<U>: krate::Trait<X>,
+    ///                 U: krate::Trait<X>
+    ///             {
+    ///                 fn a() {}
+    ///             }
+    ///         };
+    ///     }.to_string()
+    /// );
+    ///
+    /// // NOTE: you can generate multiple traits with a single call
+    /// assert_eq!(
+    ///     s.gen_impl(quote! {
+    ///         extern crate krate;
+    ///
+    ///         gen impl krate::Trait for @Self {
+    ///             fn a() {}
+    ///         }
+    ///
+    ///         gen impl krate::OtherTrait for @Self {
+    ///             fn b() {}
+    ///         }
+    ///     }).to_string(),
+    ///     quote!{
+    ///         #[allow(non_upper_case_globals)]
+    ///         const _DERIVE_krate_Trait_FOR_A: () = {
+    ///             extern crate krate;
+    ///             impl<T, U> krate::Trait for A<T, U>
+    ///             where
+    ///                 Option<U>: krate::Trait,
+    ///                 U: krate::Trait
+    ///             {
+    ///                 fn a() {}
+    ///             }
+    ///
+    ///             impl<T, U> krate::OtherTrait for A<T, U>
+    ///             where
+    ///                 Option<U>: krate::OtherTrait,
+    ///                 U: krate::OtherTrait
+    ///             {
+    ///                 fn b() {}
+    ///             }
+    ///         };
+    ///     }.to_string()
+    /// );
+    /// ```
+    ///
+    /// Use `add_bounds` to change which bounds are generated.
+    pub fn gen_impl(&self, cfg: TokenStream) -> TokenStream {
+        Parser::parse2(
+            |input: ParseStream<'_>| -> Result<TokenStream> { self.gen_impl_parse(input, true) },
+            cfg,
+        )
+        .expect("Failed to parse gen_impl")
+    }
+
+    fn gen_impl_parse(&self, input: ParseStream<'_>, wrap: bool) -> Result<TokenStream> {
+        fn parse_prefix(input: ParseStream<'_>) -> Result<Option<Token![unsafe]>> {
+            if input.parse::<Ident>()? != "gen" {
+                return Err(input.error("Expected keyword `gen`"));
+            }
+            let safety = input.parse::<Option<Token![unsafe]>>()?;
+            let _ = input.parse::<Token![impl]>()?;
+            Ok(safety)
+        }
+
+        let mut before = vec![];
+        loop {
+            if parse_prefix(&input.fork()).is_ok() {
+                break;
+            }
+            before.push(input.parse::<TokenTree>()?);
+        }
+
+        // Parse the prefix "for real"
+        let safety = parse_prefix(input)?;
+
+        // optional `<>`
+        let mut generics = input.parse::<Generics>()?;
+
+        // @bound
+        let bound = input.parse::<TraitBound>()?;
+
+        // `for @Self`
+        let _ = input.parse::<Token![for]>()?;
+        let _ = input.parse::<Token![@]>()?;
+        let _ = input.parse::<Token![Self]>()?;
+
+        // optional `where ...`
+        generics.where_clause = input.parse()?;
+
+        // Body of the impl
+        let body;
+        braced!(body in input);
+        let body = body.parse::<TokenStream>()?;
+
+        // Try to parse the next entry in sequence. If this fails, we'll fall
+        // back to just parsing the entire rest of the TokenStream.
+        let maybe_next_impl = self.gen_impl_parse(&input.fork(), false);
+
+        // Eat tokens to the end. Whether or not our speculative nested parse
+        // succeeded, we're going to want to consume the rest of our input.
+        let mut after = input.parse::<TokenStream>()?;
+        if let Ok(stream) = maybe_next_impl {
+            after = stream;
+        }
+        assert!(input.is_empty(), "Should've consumed the rest of our input");
+
+        /* Codegen Logic */
+        let name = &self.ast.ident;
+
+        // Add the generics from the original struct in, and then add any
+        // additional trait bounds which we need on the type.
+        if let Err(err) = merge_generics(&mut generics, &self.ast.generics) {
+            // Report the merge error as a `compile_error!`, as it may be
+            // triggerable by an end-user.
+            return Ok(err.to_compile_error());
+        }
+
+        self.add_trait_bounds(&bound, &mut generics.where_clause, self.add_bounds);
+        let (impl_generics, _, where_clause) = generics.split_for_impl();
+        let (_, ty_generics, _) = self.ast.generics.split_for_impl();
+
+        let generated = quote! {
+            #(#before)*
+            #safety impl #impl_generics #bound for #name #ty_generics #where_clause {
+                #body
+            }
+            #after
+        };
+
+        if wrap {
+            if self.underscore_const {
+                Ok(quote! {
+                    const _: () = { #generated };
+                })
+            } else {
+                let dummy_const: Ident = sanitize_ident(&format!(
+                    "_DERIVE_{}_FOR_{}",
+                    (&bound).into_token_stream(),
+                    name.into_token_stream(),
+                ));
+                Ok(quote! {
+                    #[allow(non_upper_case_globals)]
+                    const #dummy_const: () = {
+                        #generated
+                    };
+                })
+            }
+        } else {
+            Ok(generated)
+        }
+    }
+}
+
+/// Dumps an unpretty version of a tokenstream. Takes any type which implements
+/// `Display`.
+///
+/// This is mostly useful for visualizing the output of a procedural macro, as
+/// it makes it marginally more readable. It is used in the implementation of
+/// `test_derive!` to unprettily print the output.
+///
+/// # Stability
+///
+/// The stability of the output of this function is not guaranteed. Do not
+/// assert that the output of this function does not change between minor
+/// versions.
+///
+/// # Example
+///
+/// ```
+/// # use quote::quote;
+/// assert_eq!(
+///     synstructure::unpretty_print(quote! {
+///         #[allow(non_upper_case_globals)]
+///         const _DERIVE_krate_Trait_FOR_A: () = {
+///             extern crate krate;
+///             impl<T, U> krate::Trait for A<T, U>
+///             where
+///                 Option<U>: krate::Trait,
+///                 U: krate::Trait
+///             {
+///                 fn a() {}
+///             }
+///         };
+///     }),
+///     "# [
+///     allow (
+///         non_upper_case_globals )
+///     ]
+/// const _DERIVE_krate_Trait_FOR_A : (
+///     )
+/// = {
+///     extern crate krate ;
+///     impl < T , U > krate :: Trait for A < T , U > where Option < U > : krate :: Trait , U : krate :: Trait {
+///         fn a (
+///             )
+///         {
+///             }
+///         }
+///     }
+/// ;
+/// "
+/// )
+/// ```
+pub fn unpretty_print<T: std::fmt::Display>(ts: T) -> String {
+    let mut res = String::new();
+
+    let raw_s = ts.to_string();
+    let mut s = &raw_s[..];
+    let mut indent = 0;
+    while let Some(i) = s.find(&['(', '{', '[', ')', '}', ']', ';'][..]) {
+        match &s[i..=i] {
+            "(" | "{" | "[" => indent += 1,
+            ")" | "}" | "]" => indent -= 1,
+            _ => {}
+        }
+        res.push_str(&s[..=i]);
+        res.push('\n');
+        for _ in 0..indent {
+            res.push_str("    ");
+        }
+        s = trim_start_matches(&s[i + 1..], ' ');
+    }
+    res.push_str(s);
+    res
+}
+
+/// `trim_left_matches` has been deprecated in favor of `trim_start_matches`.
+/// This helper silences the warning, as we need to continue using
+/// `trim_left_matches` for rust 1.15 support.
+#[allow(deprecated)]
+fn trim_start_matches(s: &str, c: char) -> &str {
+    s.trim_left_matches(c)
+}
+
+/// Helper trait describing values which may be returned by macro implementation
+/// methods used by this crate's macros.
+pub trait MacroResult {
+    /// Convert this result into a `Result` for further processing / validation.
+    fn into_result(self) -> Result<TokenStream>;
+
+    /// Convert this result into a `proc_macro::TokenStream`, ready to return
+    /// from a native `proc_macro` implementation.
+    ///
+    /// If `into_result()` would return an `Err`, this method should instead
+    /// generate a `compile_error!` invocation to nicely report the error.
+    ///
+    /// *This method is available if `synstructure` is built with the
+    /// `"proc-macro"` feature.*
+    #[cfg(all(
+        not(all(target_arch = "wasm32", any(target_os = "unknown", target_os = "wasi"))),
+        feature = "proc-macro"
+    ))]
+    fn into_stream(self) -> proc_macro::TokenStream
+    where
+        Self: Sized,
+    {
+        match self.into_result() {
+            Ok(ts) => ts.into(),
+            Err(err) => err.to_compile_error().into(),
+        }
+    }
+}
+
+#[cfg(all(
+    not(all(target_arch = "wasm32", any(target_os = "unknown", target_os = "wasi"))),
+    feature = "proc-macro"
+))]
+impl MacroResult for proc_macro::TokenStream {
+    fn into_result(self) -> Result<TokenStream> {
+        Ok(self.into())
+    }
+
+    fn into_stream(self) -> proc_macro::TokenStream {
+        self
+    }
+}
+
+impl MacroResult for TokenStream {
+    fn into_result(self) -> Result<TokenStream> {
+        Ok(self)
+    }
+}
+
+impl<T: MacroResult> MacroResult for Result<T> {
+    fn into_result(self) -> Result<TokenStream> {
+        match self {
+            Ok(v) => v.into_result(),
+            Err(err) => Err(err),
+        }
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    // Regression test for #48
+    #[test]
+    fn test_each_enum() {
+        let di: syn::DeriveInput = syn::parse_quote! {
+         enum A {
+             Foo(usize, bool),
+             Bar(bool, usize),
+             Baz(usize, bool, usize),
+             Quux(bool, usize, bool)
+         }
+        };
+        let mut s = Structure::new(&di);
+
+        s.filter(|bi| bi.ast().ty.to_token_stream().to_string() == "bool");
+
+        assert_eq!(
+            s.each(|bi| quote!(do_something(#bi))).to_string(),
+            quote! {
+                A::Foo(_, ref __binding_1,) => { { do_something(__binding_1) } }
+                A::Bar(ref __binding_0, ..) => { { do_something(__binding_0) } }
+                A::Baz(_, ref __binding_1, ..) => { { do_something(__binding_1) } }
+                A::Quux(ref __binding_0, _, ref __binding_2,) => {
+                    {
+                        do_something(__binding_0)
+                    }
+                    {
+                        do_something(__binding_2)
+                    }
+                }
+            }
+            .to_string()
+        );
+    }
+}
diff --git a/crates/synstructure/src/macros.rs b/crates/synstructure/src/macros.rs
new file mode 100644
index 0000000..5b6557b
--- /dev/null
+++ b/crates/synstructure/src/macros.rs
@@ -0,0 +1,250 @@
+//! This module provides two utility macros for testing custom derives. They can
+//! be used together to eliminate some of the boilerplate required in order to
+//! declare and test custom derive implementations.
+
+// Re-exports used by the decl_derive! and test_derive!
+pub use proc_macro2::TokenStream as TokenStream2;
+pub use syn::{parse_str, DeriveInput};
+
+#[cfg(all(
+    not(all(target_arch = "wasm32", any(target_os = "unknown", target_os = "wasi"))),
+    feature = "proc-macro"
+))]
+pub use proc_macro::TokenStream;
+#[cfg(all(
+    not(all(target_arch = "wasm32", any(target_os = "unknown", target_os = "wasi"))),
+    feature = "proc-macro"
+))]
+pub use syn::parse;
+
+/// The `decl_derive!` macro declares a custom derive wrapper. It will parse the
+/// incoming `TokenStream` into a `synstructure::Structure` object, and pass it
+/// into the inner function.
+///
+/// Your inner function should take a `synstructure::Structure` by value, and
+/// return a type implementing `synstructure::MacroResult`, for example:
+///
+/// ```
+/// fn derive_simple(input: synstructure::Structure) -> proc_macro2::TokenStream {
+///     unimplemented!()
+/// }
+///
+/// fn derive_result(input: synstructure::Structure)
+///     -> syn::Result<proc_macro2::TokenStream>
+/// {
+///     unimplemented!()
+/// }
+/// ```
+///
+/// # Usage
+///
+/// ### Without Attributes
+/// ```
+/// fn derive_interesting(_input: synstructure::Structure) -> proc_macro2::TokenStream {
+///     quote::quote! { ... }
+/// }
+///
+/// # const _IGNORE: &'static str = stringify! {
+/// decl_derive!([Interesting] => derive_interesting);
+/// # };
+/// ```
+///
+/// ### With Attributes
+/// ```
+/// # fn main() {}
+/// fn derive_interesting(_input: synstructure::Structure) -> proc_macro2::TokenStream {
+///     quote::quote! { ... }
+/// }
+///
+/// # const _IGNORE: &'static str = stringify! {
+/// decl_derive!([Interesting, attributes(interesting_ignore)] => derive_interesting);
+/// # };
+/// ```
+///
+/// ### Decl Attributes & Doc Comments
+/// ```
+/// # fn main() {}
+/// fn derive_interesting(_input: synstructure::Structure) -> proc_macro2::TokenStream {
+///     quote::quote! { ... }
+/// }
+///
+/// # const _IGNORE: &'static str = stringify! {
+/// decl_derive! {
+///     [Interesting] =>
+///     #[allow(some_lint)]
+///     /// Documentation Comments
+///     derive_interesting
+/// }
+/// # };
+/// ```
+///
+/// *This macro is available if `synstructure` is built with the `"proc-macro"`
+/// feature.*
+#[cfg(all(
+    not(all(target_arch = "wasm32", any(target_os = "unknown", target_os = "wasi"))),
+    feature = "proc-macro"
+))]
+#[macro_export]
+macro_rules! decl_derive {
+    // XXX: Switch to using this variant everywhere?
+    ([$derives:ident $($derive_t:tt)*] => $(#[$($attrs:tt)*])* $inner:path) => {
+        #[proc_macro_derive($derives $($derive_t)*)]
+        #[allow(non_snake_case)]
+        $(#[$($attrs)*])*
+        pub fn $derives(
+            i: $crate::macros::TokenStream
+        ) -> $crate::macros::TokenStream {
+            match $crate::macros::parse::<$crate::macros::DeriveInput>(i) {
+                Ok(p) => {
+                    match $crate::Structure::try_new(&p) {
+                        Ok(s) => $crate::MacroResult::into_stream($inner(s)),
+                        Err(e) => e.to_compile_error().into(),
+                    }
+                }
+                Err(e) => e.to_compile_error().into(),
+            }
+        }
+    };
+}
+
+/// The `decl_attribute!` macro declares a custom attribute wrapper. It will
+/// parse the incoming `TokenStream` into a `synstructure::Structure` object,
+/// and pass it into the inner function.
+///
+/// Your inner function should have the following type:
+///
+/// ```
+/// fn attribute(
+///     attr: proc_macro2::TokenStream,
+///     structure: synstructure::Structure,
+/// ) -> proc_macro2::TokenStream {
+///     unimplemented!()
+/// }
+/// ```
+///
+/// # Usage
+///
+/// ```
+/// fn attribute_interesting(
+///     _attr: proc_macro2::TokenStream,
+///     _structure: synstructure::Structure,
+/// ) -> proc_macro2::TokenStream {
+///     quote::quote! { ... }
+/// }
+///
+/// # const _IGNORE: &'static str = stringify! {
+/// decl_attribute!([interesting] => attribute_interesting);
+/// # };
+/// ```
+///
+/// *This macro is available if `synstructure` is built with the `"proc-macro"`
+/// feature.*
+#[cfg(all(
+    not(all(target_arch = "wasm32", any(target_os = "unknown", target_os = "wasi"))),
+    feature = "proc-macro"
+))]
+#[macro_export]
+macro_rules! decl_attribute {
+    ([$attribute:ident] => $(#[$($attrs:tt)*])* $inner:path) => {
+        #[proc_macro_attribute]
+        $(#[$($attrs)*])*
+        pub fn $attribute(
+            attr: $crate::macros::TokenStream,
+            i: $crate::macros::TokenStream,
+        ) -> $crate::macros::TokenStream {
+            match $crate::macros::parse::<$crate::macros::DeriveInput>(i) {
+                Ok(p) => match $crate::Structure::try_new(&p) {
+                    Ok(s) => $crate::MacroResult::into_stream($inner(attr.into(), s)),
+                    Err(e) => e.to_compile_error().into(),
+                },
+                Err(e) => e.to_compile_error().into(),
+            }
+        }
+    };
+}
+
+/// Run a test on a custom derive. This macro expands both the original struct
+/// and the expansion to ensure that they compile correctly, and confirms that
+/// feeding the original struct into the named derive will produce the written
+/// output.
+///
+/// You can add `no_build` to the end of the macro invocation to disable
+/// checking that the written code compiles. This is useful in contexts where
+/// the procedural macro cannot depend on the crate where it is used during
+/// tests.
+///
+/// # Usage
+///
+/// ```
+/// fn test_derive_example(_s: synstructure::Structure)
+///     -> Result<proc_macro2::TokenStream, syn::Error>
+/// {
+///     Ok(quote::quote! { const YOUR_OUTPUT: &'static str = "here"; })
+/// }
+///
+/// fn main() {
+///     synstructure::test_derive!{
+///         test_derive_example {
+///             struct A;
+///         }
+///         expands to {
+///             const YOUR_OUTPUT: &'static str = "here";
+///         }
+///     }
+/// }
+/// ```
+#[macro_export]
+macro_rules! test_derive {
+    ($name:path { $($i:tt)* } expands to { $($o:tt)* }) => {
+        {
+            #[allow(dead_code)]
+            fn ensure_compiles() {
+                $($i)*
+                $($o)*
+            }
+
+            $crate::test_derive!($name { $($i)* } expands to { $($o)* } no_build);
+        }
+    };
+
+    ($name:path { $($i:tt)* } expands to { $($o:tt)* } no_build) => {
+        {
+            let i = stringify!( $($i)* );
+            let parsed = $crate::macros::parse_str::<$crate::macros::DeriveInput>(i)
+                .expect(concat!(
+                    "Failed to parse input to `#[derive(",
+                    stringify!($name),
+                    ")]`",
+                ));
+
+            let raw_res = $name($crate::Structure::new(&parsed));
+            let res = $crate::MacroResult::into_result(raw_res)
+                .expect(concat!(
+                    "Procedural macro failed for `#[derive(",
+                    stringify!($name),
+                    ")]`",
+                ));
+
+            let expected = stringify!( $($o)* )
+                .parse::<$crate::macros::TokenStream2>()
+                .expect("output should be a valid TokenStream");
+            let mut expected_toks = $crate::macros::TokenStream2::from(expected);
+            if res.to_string() != expected_toks.to_string() {
+                panic!("\
+test_derive failed:
+expected:
+```
+{}
+```
+
+got:
+```
+{}
+```\n",
+                    $crate::unpretty_print(&expected_toks),
+                    $crate::unpretty_print(&res),
+                );
+            }
+        }
+    };
+}
diff --git a/crates/tempfile/.cargo-checksum.json b/crates/tempfile/.cargo-checksum.json
new file mode 100644
index 0000000..26f8560
--- /dev/null
+++ b/crates/tempfile/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.toml":"685243e302f6e014de9c8e9b95596e5f63c7bf7fde42e8e66a41a6bc7fd5e803","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"8b427f5bc501764575e52ba4f9d95673cf8f6d80a86d0d06599852e1a9a20a36","NEWS":"4255c86ac140a4d08423cd05cbd0aa42ff796bb4b38579dd19cde289ee3baecd","README.md":"db6717cbd0b3cbbce5f3cdb8a80d8f2d90b1be251b4c1c647557ae0f78ec9748","src/dir.rs":"4499ff439b740f8d2f01458664e2bf72bbfdd1206226780c6a91fb309ef15707","src/error.rs":"cc7d8eace0fff11cb342158d2885d5637bfb14b24ef30755e808554772039c5f","src/file/imp/mod.rs":"f6da9fcd93f11889670a251fdd8231b5f4614e5a971b7b183f52b44af68568d5","src/file/imp/other.rs":"99c8f9f3251199fc31e7b88810134712e5725fb6fa14648696ed5cbea980fc5b","src/file/imp/unix.rs":"cf8eeceecfddc37c9eaf95a1ebe088314dc468f07fe357961d80817eef619ca4","src/file/imp/windows.rs":"03d81d71c404f0d448e1162825d6fbd57a78b4af8d4dc5287ec2e7c5a873d7cc","src/file/mod.rs":"bda4ee3998106089a4c0ccbc8e46dc22b7d3aec427487fd4e414fb132b378736","src/lib.rs":"e2b0df7e17cc6680a5bb0829d0433f069c6bf9eede2007d21e3b01a595df41a8","src/spooled.rs":"51fa1d7639027234e257d343a5d3c95f2e47899ba6a24f0abec8d4d729eba6d6","src/util.rs":"2bd80ee69009e7e36b596d0105bb00184cff04e899e9fcce2e4cc21f23dda073","tests/namedtempfile.rs":"0031cb33ae6faf45be103869b4d98af63bef4040dc489b323212eb7a7ef72a9a","tests/spooled.rs":"29e797d486d867cb6ac46d4cf126eb5868a069a4070c3f50ffa02fbb0b887934","tests/tempdir.rs":"771d555d4eaa410207d212eb3744e016e0b5a22f1f1b7199636a4fac5daaf952","tests/tempfile.rs":"92078a1e20a39af77c1daa9a422345d20c41584dd2010b4829911c8741d1c628"},"package":"5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4"}
\ No newline at end of file
diff --git a/crates/tempfile/Android.bp b/crates/tempfile/Android.bp
new file mode 100644
index 0000000..9f141a9
--- /dev/null
+++ b/crates/tempfile/Android.bp
@@ -0,0 +1,37 @@
+// This file is generated by cargo_embargo.
+// Do not modify this file because the changes will be overridden on upgrade.
+
+package {
+    default_applicable_licenses: ["external_rust_crates_tempfile_license"],
+    default_team: "trendy_team_android_rust",
+}
+
+license {
+    name: "external_rust_crates_tempfile_license",
+    visibility: [":__subpackages__"],
+    license_kinds: ["SPDX-license-identifier-Apache-2.0"],
+    license_text: ["LICENSE"],
+}
+
+rust_library {
+    name: "libtempfile",
+    host_supported: true,
+    crate_name: "tempfile",
+    cargo_env_compat: true,
+    cargo_pkg_version: "3.3.0",
+    crate_root: "src/lib.rs",
+    edition: "2018",
+    rustlibs: [
+        "libcfg_if",
+        "libfastrand",
+        "liblibc",
+        "libremove_dir_all",
+    ],
+    apex_available: [
+        "//apex_available:anyapex",
+        "//apex_available:platform",
+    ],
+    product_available: true,
+    vendor_available: true,
+    min_sdk_version: "29",
+}
diff --git a/crates/tempfile/Cargo.lock b/crates/tempfile/Cargo.lock
new file mode 100644
index 0000000..e328823
--- /dev/null
+++ b/crates/tempfile/Cargo.lock
@@ -0,0 +1,98 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "bitflags"
+version = "1.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
+
+[[package]]
+name = "cfg-if"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
+
+[[package]]
+name = "doc-comment"
+version = "0.3.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10"
+
+[[package]]
+name = "fastrand"
+version = "1.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be"
+dependencies = [
+ "instant",
+]
+
+[[package]]
+name = "instant"
+version = "0.1.13"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222"
+dependencies = [
+ "cfg-if",
+]
+
+[[package]]
+name = "libc"
+version = "0.2.158"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439"
+
+[[package]]
+name = "redox_syscall"
+version = "0.2.16"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a"
+dependencies = [
+ "bitflags",
+]
+
+[[package]]
+name = "remove_dir_all"
+version = "0.5.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7"
+dependencies = [
+ "winapi",
+]
+
+[[package]]
+name = "tempfile"
+version = "3.3.0"
+dependencies = [
+ "cfg-if",
+ "doc-comment",
+ "fastrand",
+ "libc",
+ "redox_syscall",
+ "remove_dir_all",
+ "winapi",
+]
+
+[[package]]
+name = "winapi"
+version = "0.3.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
+dependencies = [
+ "winapi-i686-pc-windows-gnu",
+ "winapi-x86_64-pc-windows-gnu",
+]
+
+[[package]]
+name = "winapi-i686-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
+
+[[package]]
+name = "winapi-x86_64-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
diff --git a/crates/tempfile/Cargo.toml b/crates/tempfile/Cargo.toml
new file mode 100644
index 0000000..253f866
--- /dev/null
+++ b/crates/tempfile/Cargo.toml
@@ -0,0 +1,43 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2018"
+name = "tempfile"
+version = "3.3.0"
+authors = ["Steven Allen <steven@stebalien.com>", "The Rust Project Developers", "Ashley Mannix <ashleymannix@live.com.au>", "Jason White <jasonaw0@gmail.com>"]
+exclude = ["/.travis.yml", "/appveyor.yml"]
+description = "A library for managing temporary files and directories."
+homepage = "http://stebalien.com/projects/tempfile-rs"
+documentation = "https://docs.rs/tempfile"
+keywords = ["tempfile", "tmpfile", "filesystem"]
+license = "MIT OR Apache-2.0"
+repository = "https://github.com/Stebalien/tempfile"
+[dependencies.cfg-if]
+version = "1"
+
+[dependencies.fastrand]
+version = "1.6.0"
+
+[dependencies.remove_dir_all]
+version = "0.5"
+[dev-dependencies.doc-comment]
+version = "0.3"
+
+[features]
+nightly = []
+[target."cfg(any(unix, target_os = \"wasi\"))".dependencies.libc]
+version = "0.2.27"
+[target."cfg(target_os = \"redox\")".dependencies.redox_syscall]
+version = "0.2.9"
+[target."cfg(windows)".dependencies.winapi]
+version = "0.3"
+features = ["fileapi", "handleapi", "winbase"]
diff --git a/crates/tempfile/LICENSE b/crates/tempfile/LICENSE
new file mode 120000
index 0000000..6b579aa
--- /dev/null
+++ b/crates/tempfile/LICENSE
@@ -0,0 +1 @@
+LICENSE-APACHE
\ No newline at end of file
diff --git a/crates/tempfile/LICENSE-APACHE b/crates/tempfile/LICENSE-APACHE
new file mode 100644
index 0000000..16fe87b
--- /dev/null
+++ b/crates/tempfile/LICENSE-APACHE
@@ -0,0 +1,201 @@
+                              Apache License
+                        Version 2.0, January 2004
+                     http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+   "License" shall mean the terms and conditions for use, reproduction,
+   and distribution as defined by Sections 1 through 9 of this document.
+
+   "Licensor" shall mean the copyright owner or entity authorized by
+   the copyright owner that is granting the License.
+
+   "Legal Entity" shall mean the union of the acting entity and all
+   other entities that control, are controlled by, or are under common
+   control with that entity. For the purposes of this definition,
+   "control" means (i) the power, direct or indirect, to cause the
+   direction or management of such entity, whether by contract or
+   otherwise, or (ii) ownership of fifty percent (50%) or more of the
+   outstanding shares, or (iii) beneficial ownership of such entity.
+
+   "You" (or "Your") shall mean an individual or Legal Entity
+   exercising permissions granted by this License.
+
+   "Source" form shall mean the preferred form for making modifications,
+   including but not limited to software source code, documentation
+   source, and configuration files.
+
+   "Object" form shall mean any form resulting from mechanical
+   transformation or translation of a Source form, including but
+   not limited to compiled object code, generated documentation,
+   and conversions to other media types.
+
+   "Work" shall mean the work of authorship, whether in Source or
+   Object form, made available under the License, as indicated by a
+   copyright notice that is included in or attached to the work
+   (an example is provided in the Appendix below).
+
+   "Derivative Works" shall mean any work, whether in Source or Object
+   form, that is based on (or derived from) the Work and for which the
+   editorial revisions, annotations, elaborations, or other modifications
+   represent, as a whole, an original work of authorship. For the purposes
+   of this License, Derivative Works shall not include works that remain
+   separable from, or merely link (or bind by name) to the interfaces of,
+   the Work and Derivative Works thereof.
+
+   "Contribution" shall mean any work of authorship, including
+   the original version of the Work and any modifications or additions
+   to that Work or Derivative Works thereof, that is intentionally
+   submitted to Licensor for inclusion in the Work by the copyright owner
+   or by an individual or Legal Entity authorized to submit on behalf of
+   the copyright owner. For the purposes of this definition, "submitted"
+   means any form of electronic, verbal, or written communication sent
+   to the Licensor or its representatives, including but not limited to
+   communication on electronic mailing lists, source code control systems,
+   and issue tracking systems that are managed by, or on behalf of, the
+   Licensor for the purpose of discussing and improving the Work, but
+   excluding communication that is conspicuously marked or otherwise
+   designated in writing by the copyright owner as "Not a Contribution."
+
+   "Contributor" shall mean Licensor and any individual or Legal Entity
+   on behalf of whom a Contribution has been received by Licensor and
+   subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+   this License, each Contributor hereby grants to You a perpetual,
+   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+   copyright license to reproduce, prepare Derivative Works of,
+   publicly display, publicly perform, sublicense, and distribute the
+   Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+   this License, each Contributor hereby grants to You a perpetual,
+   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+   (except as stated in this section) patent license to make, have made,
+   use, offer to sell, sell, import, and otherwise transfer the Work,
+   where such license applies only to those patent claims licensable
+   by such Contributor that are necessarily infringed by their
+   Contribution(s) alone or by combination of their Contribution(s)
+   with the Work to which such Contribution(s) was submitted. If You
+   institute patent litigation against any entity (including a
+   cross-claim or counterclaim in a lawsuit) alleging that the Work
+   or a Contribution incorporated within the Work constitutes direct
+   or contributory patent infringement, then any patent licenses
+   granted to You under this License for that Work shall terminate
+   as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+   Work or Derivative Works thereof in any medium, with or without
+   modifications, and in Source or Object form, provided that You
+   meet the following conditions:
+
+   (a) You must give any other recipients of the Work or
+       Derivative Works a copy of this License; and
+
+   (b) You must cause any modified files to carry prominent notices
+       stating that You changed the files; and
+
+   (c) You must retain, in the Source form of any Derivative Works
+       that You distribute, all copyright, patent, trademark, and
+       attribution notices from the Source form of the Work,
+       excluding those notices that do not pertain to any part of
+       the Derivative Works; and
+
+   (d) If the Work includes a "NOTICE" text file as part of its
+       distribution, then any Derivative Works that You distribute must
+       include a readable copy of the attribution notices contained
+       within such NOTICE file, excluding those notices that do not
+       pertain to any part of the Derivative Works, in at least one
+       of the following places: within a NOTICE text file distributed
+       as part of the Derivative Works; within the Source form or
+       documentation, if provided along with the Derivative Works; or,
+       within a display generated by the Derivative Works, if and
+       wherever such third-party notices normally appear. The contents
+       of the NOTICE file are for informational purposes only and
+       do not modify the License. You may add Your own attribution
+       notices within Derivative Works that You distribute, alongside
+       or as an addendum to the NOTICE text from the Work, provided
+       that such additional attribution notices cannot be construed
+       as modifying the License.
+
+   You may add Your own copyright statement to Your modifications and
+   may provide additional or different license terms and conditions
+   for use, reproduction, or distribution of Your modifications, or
+   for any such Derivative Works as a whole, provided Your use,
+   reproduction, and distribution of the Work otherwise complies with
+   the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+   any Contribution intentionally submitted for inclusion in the Work
+   by You to the Licensor shall be under the terms and conditions of
+   this License, without any additional terms or conditions.
+   Notwithstanding the above, nothing herein shall supersede or modify
+   the terms of any separate license agreement you may have executed
+   with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+   names, trademarks, service marks, or product names of the Licensor,
+   except as required for reasonable and customary use in describing the
+   origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+   agreed to in writing, Licensor provides the Work (and each
+   Contributor provides its Contributions) on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+   implied, including, without limitation, any warranties or conditions
+   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+   PARTICULAR PURPOSE. You are solely responsible for determining the
+   appropriateness of using or redistributing the Work and assume any
+   risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+   whether in tort (including negligence), contract, or otherwise,
+   unless required by applicable law (such as deliberate and grossly
+   negligent acts) or agreed to in writing, shall any Contributor be
+   liable to You for damages, including any direct, indirect, special,
+   incidental, or consequential damages of any character arising as a
+   result of this License or out of the use or inability to use the
+   Work (including but not limited to damages for loss of goodwill,
+   work stoppage, computer failure or malfunction, or any and all
+   other commercial damages or losses), even if such Contributor
+   has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+   the Work or Derivative Works thereof, You may choose to offer,
+   and charge a fee for, acceptance of support, warranty, indemnity,
+   or other liability obligations and/or rights consistent with this
+   License. However, in accepting such obligations, You may act only
+   on Your own behalf and on Your sole responsibility, not on behalf
+   of any other Contributor, and only if You agree to indemnify,
+   defend, and hold each Contributor harmless for any liability
+   incurred by, or claims asserted against, such Contributor by reason
+   of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+   To apply the Apache License to your work, attach the following
+   boilerplate notice, with the fields enclosed by brackets "[]"
+   replaced with your own identifying information. (Don't include
+   the brackets!)  The text should be enclosed in the appropriate
+   comment syntax for the file format. We also recommend that a
+   file or class name and description of purpose be included on the
+   same "printed page" as the copyright notice for easier
+   identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+	http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/crates/tempfile/LICENSE-MIT b/crates/tempfile/LICENSE-MIT
new file mode 100644
index 0000000..0c3270f
--- /dev/null
+++ b/crates/tempfile/LICENSE-MIT
@@ -0,0 +1,25 @@
+Copyright (c) 2015 Steven Allen
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/crates/tempfile/METADATA b/crates/tempfile/METADATA
new file mode 100644
index 0000000..92ec9f7
--- /dev/null
+++ b/crates/tempfile/METADATA
@@ -0,0 +1,23 @@
+# This project was upgraded with external_updater.
+# Usage: tools/external_updater/updater.sh update rust/crates/tempfile
+# For more info, check https://cs.android.com/android/platform/superproject/+/master:tools/external_updater/README.md
+
+name: "tempfile"
+description: "A library for managing temporary files and directories."
+third_party {
+  url {
+    type: HOMEPAGE
+    value: "https://crates.io/crates/tempfile"
+  }
+  url {
+    type: ARCHIVE
+    value: "https://static.crates.io/crates/tempfile/tempfile-3.3.0.crate"
+  }
+  version: "3.3.0"
+  license_type: NOTICE
+  last_upgrade_date {
+    year: 2023
+    month: 2
+    day: 6
+  }
+}
diff --git a/crates/tempfile/MODULE_LICENSE_APACHE2 b/crates/tempfile/MODULE_LICENSE_APACHE2
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/crates/tempfile/MODULE_LICENSE_APACHE2
diff --git a/crates/tempfile/NEWS b/crates/tempfile/NEWS
new file mode 100644
index 0000000..c284424
--- /dev/null
+++ b/crates/tempfile/NEWS
@@ -0,0 +1,206 @@
+3.3.0
+=====
+
+Features:
+
+* Replace rand with fastrand for a significantly smaller dependency tree. Cryptographic randomness
+  isn't necessary for temporary file names, and isn't all that helpful either.
+* Add limited WASI support.
+* Add a function to extract the inner data from a `SpooledTempFile`.
+
+Bug Fixes:
+
+* Make it possible to persist unnamed temporary files on linux by removing the `O_EXCL` flag.
+* Fix redox minimum crate version.
+
+3.2.0
+=====
+
+Features:
+
+* Bump rand dependency to `0.8`.
+* Bump cfg-if dependency to `1.0`
+
+Other than that, this release mostly includes small cleanups and simplifications.
+
+Breaking: The minimum rust version is now `1.40.0`.
+
+3.1.0
+=====
+
+Features:
+
+* Bump rand dependency to `0.7`.
+
+Breaking: The minimum rust version is now `1.32.0`.
+
+3.0.9
+=====
+
+Documentation:
+
+* Add an example for reopening a named temporary file.
+* Flesh out the security documentation.
+
+Features:
+
+* Introduce an `append` option to the builder.
+* Errors:
+  * No longer implement the soft-deprecated `description`.
+  * Implement `source` instead of `cause`.
+
+Breaking: The minimum rust version is now 1.30.
+
+3.0.8
+=====
+
+This is a bugfix release.
+
+Fixes:
+
+* Export `PathPersistError`.
+* Fix a bug where flushing a `SpooledTempFile` to disk could fail to write part
+  of the file in some rare, yet-to-reproduced cases.
+
+3.0.7
+=====
+
+Breaking:
+
+* `Builder::prefix` and `Builder::suffix` now accept a generic `&AsRef<OsStr>`.
+  This could affect type inference.
+* Temporary files (except unnamed temporary files on Windows and Linux >= 3.11)
+  now use absolute path names. This will break programs that create temporary
+  files relative to their current working directory when they don't have the
+  search permission (x) on some ancestor directory. This is only likely to
+  affect programs with strange chroot-less filesystem sandboxes. If you believe
+  you're affected by this issue, please comment on #40.
+
+Features:
+
+* Accept anything implementing `&AsRef<OsStr>` in the builder: &OsStr, &OsString, &Path, etc.
+
+Fixes:
+
+* Fix LFS support.
+* Use absolute paths for named temporary files to guard against changes in the
+  current directory.
+* Use absolute paths when creating unnamed temporary files on platforms that
+  can't create unlinked or auto-deleted temporary files. This fixes a very
+  unlikely race where the current directory could change while the temporary
+  file is being created.
+
+Misc:
+
+* Use modern stdlib features to avoid custom unsafe code. This reduces the
+  number of unsafe blocks from 12 to 4.
+
+3.0.6
+=====
+
+* Don't hide temporary files on windows, fixing #66 and #69.
+
+3.0.5
+=====
+
+Features:
+
+* Added a spooled temporary file implementation. This temporary file variant
+  starts out as an in-memory temporary file but "rolls-over" onto disk when it
+  grows over a specified size (#68).
+* Errors are now annotated with paths to make debugging easier (#73).
+
+Misc:
+
+* The rand version has been bumped to 0.6 (#74).
+
+Bugs:
+
+* Tempfile compiles again on Redox (#75).
+
+3.0.4
+=====
+
+* Now compiles on unsupported platforms.
+
+3.0.3
+=====
+
+* update rand to 0.5
+
+3.0.2
+=====
+
+* Actually *delete* temporary files on non-Linux unix systems (thanks to
+@oliverhenshaw for the fix and a test case).
+
+3.0.1
+=====
+
+* Restore NamedTempFile::new_in
+
+3.0.0
+=====
+
+* Adds temporary directory support (@KodrAus)
+* Allow closing named temporary files without deleting them (@jasonwhite)
+
+2.2.0
+=====
+
+* Redox Support
+
+2.1.6
+=====
+
+* Remove build script and bump minimum rustc version to 1.9.0
+
+2.1.5
+=====
+
+* Don't build platform-specific dependencies on all platforms.
+* Cleanup some documentation.
+
+2.1.4
+=====
+
+* Fix crates.io tags. No interesting changes.
+
+2.1.3
+=====
+
+Export `PersistError`.
+
+2.1.2
+=====
+
+Add `Read`/`Write`/`Seek` impls on `&NamedTempFile`. This mirrors the
+implementations on `&File`. One can currently just deref to a `&File` but these
+implementations are more discoverable.
+
+2.1.1
+=====
+
+Add LFS Support.
+
+2.1.0
+=====
+
+* Implement `AsRef<File>` for `NamedTempFile` allowing named temporary files to
+  be borrowed as `File`s.
+* Add a method to convert a `NamedTempFile` to an unnamed temporary `File`.
+
+2.0.1
+=====
+
+* Arm bugfix
+
+2.0.0
+=====
+
+This release replaces `TempFile` with a `tempfile()` function that returnes
+`std::fs::File` objects. These are significantly more useful because most rust
+libraries expect normal `File` objects.
+
+To continue supporting shared temporary files, this new version adds a
+`reopen()` method to `NamedTempFile`.
diff --git a/crates/tempfile/README.md b/crates/tempfile/README.md
new file mode 100644
index 0000000..1dba3a0
--- /dev/null
+++ b/crates/tempfile/README.md
@@ -0,0 +1,45 @@
+tempfile
+========
+
+[![Crate](https://img.shields.io/crates/v/tempfile.svg)](https://crates.io/crates/tempfile)
+[![Build Status](https://github.com/Stebalien/tempfile/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/Stebalien/tempfile/actions/workflows/ci.yml?query=branch%3Amaster)
+
+A secure, cross-platform, temporary file library for Rust. In addition to creating
+temporary files, this library also allows users to securely open multiple
+independent references to the same temporary file (useful for consumer/producer
+patterns and surprisingly difficult to implement securely).
+
+[Documentation](https://docs.rs/tempfile/)
+
+Usage
+-----
+
+Minimum required Rust version: 1.40.0
+
+Add this to your `Cargo.toml`:
+```toml
+[dependencies]
+tempfile = "3"
+```
+
+Example
+-------
+
+```rust
+use std::fs::File;
+use std::io::{Write, Read, Seek, SeekFrom};
+
+fn main() {
+    // Write
+    let mut tmpfile: File = tempfile::tempfile().unwrap();
+    write!(tmpfile, "Hello World!").unwrap();
+
+    // Seek to start
+    tmpfile.seek(SeekFrom::Start(0)).unwrap();
+
+    // Read
+    let mut buf = String::new();
+    tmpfile.read_to_string(&mut buf).unwrap();
+    assert_eq!("Hello World!", buf);
+}
+```
diff --git a/crates/tempfile/TEST_MAPPING b/crates/tempfile/TEST_MAPPING
new file mode 100644
index 0000000..a31ddb1
--- /dev/null
+++ b/crates/tempfile/TEST_MAPPING
@@ -0,0 +1,29 @@
+// Generated by update_crate_tests.py for tests that depend on this crate.
+{
+  "imports": [
+    {
+      "path": "external/uwb/src"
+    },
+    {
+      "path": "packages/modules/Virtualization/apkdmverity"
+    },
+    {
+      "path": "packages/modules/Virtualization/authfs"
+    },
+    {
+      "path": "packages/modules/Virtualization/avmd"
+    },
+    {
+      "path": "packages/modules/Virtualization/libs/devicemapper"
+    },
+    {
+      "path": "packages/modules/Virtualization/microdroid_manager"
+    },
+    {
+      "path": "packages/modules/Virtualization/virtualizationmanager"
+    },
+    {
+      "path": "packages/modules/Virtualization/zipfuse"
+    }
+  ]
+}
diff --git a/crates/tempfile/cargo_embargo.json b/crates/tempfile/cargo_embargo.json
new file mode 100644
index 0000000..3e8023e
--- /dev/null
+++ b/crates/tempfile/cargo_embargo.json
@@ -0,0 +1,8 @@
+{
+  "apex_available": [
+    "//apex_available:anyapex",
+    "//apex_available:platform"
+  ],
+  "min_sdk_version": "29",
+  "run_cargo": false
+}
diff --git a/crates/tempfile/src/dir.rs b/crates/tempfile/src/dir.rs
new file mode 100644
index 0000000..d5a944b
--- /dev/null
+++ b/crates/tempfile/src/dir.rs
@@ -0,0 +1,415 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use remove_dir_all::remove_dir_all;
+use std::mem;
+use std::path::{self, Path, PathBuf};
+use std::{fmt, fs, io};
+
+use crate::error::IoResultExt;
+use crate::Builder;
+
+/// Create a new temporary directory.
+///
+/// The `tempdir` function creates a directory in the file system
+/// and returns a [`TempDir`].
+/// The directory will be automatically deleted when the `TempDir`s
+/// destructor is run.
+///
+/// # Resource Leaking
+///
+/// See [the resource leaking][resource-leaking] docs on `TempDir`.
+///
+/// # Errors
+///
+/// If the directory can not be created, `Err` is returned.
+///
+/// # Examples
+///
+/// ```
+/// use tempfile::tempdir;
+/// use std::fs::File;
+/// use std::io::{self, Write};
+///
+/// # fn main() {
+/// #     if let Err(_) = run() {
+/// #         ::std::process::exit(1);
+/// #     }
+/// # }
+/// # fn run() -> Result<(), io::Error> {
+/// // Create a directory inside of `std::env::temp_dir()`
+/// let dir = tempdir()?;
+///
+/// let file_path = dir.path().join("my-temporary-note.txt");
+/// let mut file = File::create(file_path)?;
+/// writeln!(file, "Brian was here. Briefly.")?;
+///
+/// // `tmp_dir` goes out of scope, the directory as well as
+/// // `tmp_file` will be deleted here.
+/// drop(file);
+/// dir.close()?;
+/// # Ok(())
+/// # }
+/// ```
+///
+/// [`TempDir`]: struct.TempDir.html
+/// [resource-leaking]: struct.TempDir.html#resource-leaking
+pub fn tempdir() -> io::Result<TempDir> {
+    TempDir::new()
+}
+
+/// Create a new temporary directory.
+///
+/// The `tempdir` function creates a directory in the file system
+/// and returns a [`TempDir`].
+/// The directory will be automatically deleted when the `TempDir`s
+/// destructor is run.
+///
+/// # Resource Leaking
+///
+/// See [the resource leaking][resource-leaking] docs on `TempDir`.
+///
+/// # Errors
+///
+/// If the directory can not be created, `Err` is returned.
+///
+/// # Examples
+///
+/// ```
+/// use tempfile::tempdir;
+/// use std::fs::File;
+/// use std::io::{self, Write};
+///
+/// # fn main() {
+/// #     if let Err(_) = run() {
+/// #         ::std::process::exit(1);
+/// #     }
+/// # }
+/// # fn run() -> Result<(), io::Error> {
+/// // Create a directory inside of `std::env::temp_dir()`,
+/// let dir = tempdir()?;
+///
+/// let file_path = dir.path().join("my-temporary-note.txt");
+/// let mut file = File::create(file_path)?;
+/// writeln!(file, "Brian was here. Briefly.")?;
+///
+/// // `tmp_dir` goes out of scope, the directory as well as
+/// // `tmp_file` will be deleted here.
+/// drop(file);
+/// dir.close()?;
+/// # Ok(())
+/// # }
+/// ```
+///
+/// [`TempDir`]: struct.TempDir.html
+/// [resource-leaking]: struct.TempDir.html#resource-leaking
+pub fn tempdir_in<P: AsRef<Path>>(dir: P) -> io::Result<TempDir> {
+    TempDir::new_in(dir)
+}
+
+/// A directory in the filesystem that is automatically deleted when
+/// it goes out of scope.
+///
+/// The [`TempDir`] type creates a directory on the file system that
+/// is deleted once it goes out of scope. At construction, the
+/// `TempDir` creates a new directory with a randomly generated name.
+///
+/// The default constructor, [`TempDir::new()`], creates directories in
+/// the location returned by [`std::env::temp_dir()`], but `TempDir`
+/// can be configured to manage a temporary directory in any location
+/// by constructing with a [`Builder`].
+///
+/// After creating a `TempDir`, work with the file system by doing
+/// standard [`std::fs`] file system operations on its [`Path`],
+/// which can be retrieved with [`TempDir::path()`]. Once the `TempDir`
+/// value is dropped, the directory at the path will be deleted, along
+/// with any files and directories it contains. It is your responsibility
+/// to ensure that no further file system operations are attempted
+/// inside the temporary directory once it has been deleted.
+///
+/// # Resource Leaking
+///
+/// Various platform-specific conditions may cause `TempDir` to fail
+/// to delete the underlying directory. It's important to ensure that
+/// handles (like [`File`] and [`ReadDir`]) to files inside the
+/// directory are dropped before the `TempDir` goes out of scope. The
+/// `TempDir` destructor will silently ignore any errors in deleting
+/// the directory; to instead handle errors call [`TempDir::close()`].
+///
+/// Note that if the program exits before the `TempDir` destructor is
+/// run, such as via [`std::process::exit()`], by segfaulting, or by
+/// receiving a signal like `SIGINT`, then the temporary directory
+/// will not be deleted.
+///
+/// # Examples
+///
+/// Create a temporary directory with a generated name:
+///
+/// ```
+/// use std::fs::File;
+/// use std::io::Write;
+/// use tempfile::TempDir;
+///
+/// # use std::io;
+/// # fn run() -> Result<(), io::Error> {
+/// // Create a directory inside of `std::env::temp_dir()`
+/// let tmp_dir = TempDir::new()?;
+/// # Ok(())
+/// # }
+/// ```
+///
+/// Create a temporary directory with a prefix in its name:
+///
+/// ```
+/// use std::fs::File;
+/// use std::io::Write;
+/// use tempfile::Builder;
+///
+/// # use std::io;
+/// # fn run() -> Result<(), io::Error> {
+/// // Create a directory inside of `std::env::temp_dir()`,
+/// // whose name will begin with 'example'.
+/// let tmp_dir = Builder::new().prefix("example").tempdir()?;
+/// # Ok(())
+/// # }
+/// ```
+///
+/// [`File`]: http://doc.rust-lang.org/std/fs/struct.File.html
+/// [`Path`]: http://doc.rust-lang.org/std/path/struct.Path.html
+/// [`ReadDir`]: http://doc.rust-lang.org/std/fs/struct.ReadDir.html
+/// [`Builder`]: struct.Builder.html
+/// [`TempDir::close()`]: struct.TempDir.html#method.close
+/// [`TempDir::new()`]: struct.TempDir.html#method.new
+/// [`TempDir::path()`]: struct.TempDir.html#method.path
+/// [`TempDir`]: struct.TempDir.html
+/// [`std::env::temp_dir()`]: https://doc.rust-lang.org/std/env/fn.temp_dir.html
+/// [`std::fs`]: http://doc.rust-lang.org/std/fs/index.html
+/// [`std::process::exit()`]: http://doc.rust-lang.org/std/process/fn.exit.html
+pub struct TempDir {
+    path: Box<Path>,
+}
+
+impl TempDir {
+    /// Attempts to make a temporary directory inside of `env::temp_dir()`.
+    ///
+    /// See [`Builder`] for more configuration.
+    ///
+    /// The directory and everything inside it will be automatically deleted
+    /// once the returned `TempDir` is destroyed.
+    ///
+    /// # Errors
+    ///
+    /// If the directory can not be created, `Err` is returned.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use std::fs::File;
+    /// use std::io::Write;
+    /// use tempfile::TempDir;
+    ///
+    /// # use std::io;
+    /// # fn run() -> Result<(), io::Error> {
+    /// // Create a directory inside of `std::env::temp_dir()`
+    /// let tmp_dir = TempDir::new()?;
+    ///
+    /// let file_path = tmp_dir.path().join("my-temporary-note.txt");
+    /// let mut tmp_file = File::create(file_path)?;
+    /// writeln!(tmp_file, "Brian was here. Briefly.")?;
+    ///
+    /// // `tmp_dir` goes out of scope, the directory as well as
+    /// // `tmp_file` will be deleted here.
+    /// # Ok(())
+    /// # }
+    /// ```
+    ///
+    /// [`Builder`]: struct.Builder.html
+    pub fn new() -> io::Result<TempDir> {
+        Builder::new().tempdir()
+    }
+
+    /// Attempts to make a temporary directory inside of `dir`.
+    /// The directory and everything inside it will be automatically
+    /// deleted once the returned `TempDir` is destroyed.
+    ///
+    /// # Errors
+    ///
+    /// If the directory can not be created, `Err` is returned.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use std::fs::{self, File};
+    /// use std::io::Write;
+    /// use tempfile::TempDir;
+    ///
+    /// # use std::io;
+    /// # fn run() -> Result<(), io::Error> {
+    /// // Create a directory inside of the current directory
+    /// let tmp_dir = TempDir::new_in(".")?;
+    /// let file_path = tmp_dir.path().join("my-temporary-note.txt");
+    /// let mut tmp_file = File::create(file_path)?;
+    /// writeln!(tmp_file, "Brian was here. Briefly.")?;
+    /// # Ok(())
+    /// # }
+    /// ```
+    pub fn new_in<P: AsRef<Path>>(dir: P) -> io::Result<TempDir> {
+        Builder::new().tempdir_in(dir)
+    }
+
+    /// Accesses the [`Path`] to the temporary directory.
+    ///
+    /// [`Path`]: http://doc.rust-lang.org/std/path/struct.Path.html
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use tempfile::TempDir;
+    ///
+    /// # use std::io;
+    /// # fn run() -> Result<(), io::Error> {
+    /// let tmp_path;
+    ///
+    /// {
+    ///    let tmp_dir = TempDir::new()?;
+    ///    tmp_path = tmp_dir.path().to_owned();
+    ///
+    ///    // Check that the temp directory actually exists.
+    ///    assert!(tmp_path.exists());
+    ///
+    ///    // End of `tmp_dir` scope, directory will be deleted
+    /// }
+    ///
+    /// // Temp directory should be deleted by now
+    /// assert_eq!(tmp_path.exists(), false);
+    /// # Ok(())
+    /// # }
+    /// ```
+    pub fn path(&self) -> &path::Path {
+        self.path.as_ref()
+    }
+
+    /// Persist the temporary directory to disk, returning the [`PathBuf`] where it is located.
+    ///
+    /// This consumes the [`TempDir`] without deleting directory on the filesystem, meaning that
+    /// the directory will no longer be automatically deleted.
+    ///
+    /// [`TempDir`]: struct.TempDir.html
+    /// [`PathBuf`]: http://doc.rust-lang.org/std/path/struct.PathBuf.html
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use std::fs;
+    /// use tempfile::TempDir;
+    ///
+    /// # use std::io;
+    /// # fn run() -> Result<(), io::Error> {
+    /// let tmp_dir = TempDir::new()?;
+    ///
+    /// // Persist the temporary directory to disk,
+    /// // getting the path where it is.
+    /// let tmp_path = tmp_dir.into_path();
+    ///
+    /// // Delete the temporary directory ourselves.
+    /// fs::remove_dir_all(tmp_path)?;
+    /// # Ok(())
+    /// # }
+    /// ```
+    pub fn into_path(self) -> PathBuf {
+        // Prevent the Drop impl from being called.
+        let mut this = mem::ManuallyDrop::new(self);
+
+        // replace this.path with an empty Box, since an empty Box does not
+        // allocate any heap memory.
+        mem::replace(&mut this.path, PathBuf::new().into_boxed_path()).into()
+    }
+
+    /// Closes and removes the temporary directory, returning a `Result`.
+    ///
+    /// Although `TempDir` removes the directory on drop, in the destructor
+    /// any errors are ignored. To detect errors cleaning up the temporary
+    /// directory, call `close` instead.
+    ///
+    /// # Errors
+    ///
+    /// This function may return a variety of [`std::io::Error`]s that result from deleting
+    /// the files and directories contained with the temporary directory,
+    /// as well as from deleting the temporary directory itself. These errors
+    /// may be platform specific.
+    ///
+    /// [`std::io::Error`]: http://doc.rust-lang.org/std/io/struct.Error.html
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use std::fs::File;
+    /// use std::io::Write;
+    /// use tempfile::TempDir;
+    ///
+    /// # use std::io;
+    /// # fn run() -> Result<(), io::Error> {
+    /// // Create a directory inside of `std::env::temp_dir()`.
+    /// let tmp_dir = TempDir::new()?;
+    /// let file_path = tmp_dir.path().join("my-temporary-note.txt");
+    /// let mut tmp_file = File::create(file_path)?;
+    /// writeln!(tmp_file, "Brian was here. Briefly.")?;
+    ///
+    /// // By closing the `TempDir` explicitly we can check that it has
+    /// // been deleted successfully. If we don't close it explicitly,
+    /// // the directory will still be deleted when `tmp_dir` goes out
+    /// // of scope, but we won't know whether deleting the directory
+    /// // succeeded.
+    /// drop(tmp_file);
+    /// tmp_dir.close()?;
+    /// # Ok(())
+    /// # }
+    /// ```
+    pub fn close(mut self) -> io::Result<()> {
+        let result = remove_dir_all(self.path()).with_err_path(|| self.path());
+
+        // Set self.path to empty Box to release the memory, since an empty
+        // Box does not allocate any heap memory.
+        self.path = PathBuf::new().into_boxed_path();
+
+        // Prevent the Drop impl from being called.
+        mem::forget(self);
+
+        result
+    }
+}
+
+impl AsRef<Path> for TempDir {
+    fn as_ref(&self) -> &Path {
+        self.path()
+    }
+}
+
+impl fmt::Debug for TempDir {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        f.debug_struct("TempDir")
+            .field("path", &self.path())
+            .finish()
+    }
+}
+
+impl Drop for TempDir {
+    fn drop(&mut self) {
+        let _ = remove_dir_all(self.path());
+    }
+}
+
+pub(crate) fn create(path: PathBuf) -> io::Result<TempDir> {
+    fs::create_dir(&path)
+        .with_err_path(|| &path)
+        .map(|_| TempDir {
+            path: path.into_boxed_path(),
+        })
+}
diff --git a/crates/tempfile/src/error.rs b/crates/tempfile/src/error.rs
new file mode 100644
index 0000000..ed6b6cc
--- /dev/null
+++ b/crates/tempfile/src/error.rs
@@ -0,0 +1,45 @@
+use std::path::PathBuf;
+use std::{error, fmt, io};
+
+#[derive(Debug)]
+struct PathError {
+    path: PathBuf,
+    err: io::Error,
+}
+
+impl fmt::Display for PathError {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(f, "{} at path {:?}", self.err, self.path)
+    }
+}
+
+impl error::Error for PathError {
+    fn source(&self) -> Option<&(dyn error::Error + 'static)> {
+        self.err.source()
+    }
+}
+
+pub(crate) trait IoResultExt<T> {
+    fn with_err_path<F, P>(self, path: F) -> Self
+    where
+        F: FnOnce() -> P,
+        P: Into<PathBuf>;
+}
+
+impl<T> IoResultExt<T> for Result<T, io::Error> {
+    fn with_err_path<F, P>(self, path: F) -> Self
+    where
+        F: FnOnce() -> P,
+        P: Into<PathBuf>,
+    {
+        self.map_err(|e| {
+            io::Error::new(
+                e.kind(),
+                PathError {
+                    path: path().into(),
+                    err: e,
+                },
+            )
+        })
+    }
+}
diff --git a/crates/tempfile/src/file/imp/mod.rs b/crates/tempfile/src/file/imp/mod.rs
new file mode 100644
index 0000000..fbb2bbf
--- /dev/null
+++ b/crates/tempfile/src/file/imp/mod.rs
@@ -0,0 +1,12 @@
+cfg_if::cfg_if! {
+    if #[cfg(any(unix, target_os = "redox", target_os = "wasi"))] {
+        mod unix;
+        pub use self::unix::*;
+    } else if #[cfg(windows)] {
+        mod windows;
+        pub use self::windows::*;
+    } else {
+        mod other;
+        pub use self::other::*;
+    }
+}
diff --git a/crates/tempfile/src/file/imp/other.rs b/crates/tempfile/src/file/imp/other.rs
new file mode 100644
index 0000000..d8a55a7
--- /dev/null
+++ b/crates/tempfile/src/file/imp/other.rs
@@ -0,0 +1,30 @@
+use std::fs::{File, OpenOptions};
+use std::io;
+use std::path::Path;
+
+fn not_supported<T>() -> io::Result<T> {
+    Err(io::Error::new(
+        io::ErrorKind::Other,
+        "operation not supported on this platform",
+    ))
+}
+
+pub fn create_named(_path: &Path, open_options: &mut OpenOptions) -> io::Result<File> {
+    not_supported()
+}
+
+pub fn create(_dir: &Path) -> io::Result<File> {
+    not_supported()
+}
+
+pub fn reopen(_file: &File, _path: &Path) -> io::Result<File> {
+    not_supported()
+}
+
+pub fn persist(_old_path: &Path, _new_path: &Path, _overwrite: bool) -> io::Result<()> {
+    not_supported()
+}
+
+pub fn keep(path: &Path) -> io::Result<()> {
+    not_supported()
+}
diff --git a/crates/tempfile/src/file/imp/unix.rs b/crates/tempfile/src/file/imp/unix.rs
new file mode 100644
index 0000000..480743c
--- /dev/null
+++ b/crates/tempfile/src/file/imp/unix.rs
@@ -0,0 +1,156 @@
+use std::env;
+use std::ffi::{CString, OsStr};
+use std::fs::{self, File, OpenOptions};
+use std::io;
+cfg_if::cfg_if! {
+    if #[cfg(not(target_os = "wasi"))] {
+        use std::os::unix::ffi::OsStrExt;
+        use std::os::unix::fs::{MetadataExt, OpenOptionsExt};
+    } else {
+        use std::os::wasi::ffi::OsStrExt;
+        #[cfg(feature = "nightly")]
+        use std::os::wasi::fs::MetadataExt;
+    }
+}
+use crate::util;
+use std::path::Path;
+
+#[cfg(not(target_os = "redox"))]
+use libc::{c_char, c_int, link, rename, unlink};
+
+#[cfg(not(target_os = "redox"))]
+#[inline(always)]
+pub fn cvt_err(result: c_int) -> io::Result<c_int> {
+    if result == -1 {
+        Err(io::Error::last_os_error())
+    } else {
+        Ok(result)
+    }
+}
+
+#[cfg(target_os = "redox")]
+#[inline(always)]
+pub fn cvt_err(result: Result<usize, syscall::Error>) -> io::Result<usize> {
+    result.map_err(|err| io::Error::from_raw_os_error(err.errno))
+}
+
+// Stolen from std.
+pub fn cstr(path: &Path) -> io::Result<CString> {
+    CString::new(path.as_os_str().as_bytes())
+        .map_err(|_| io::Error::new(io::ErrorKind::InvalidInput, "path contained a null"))
+}
+
+pub fn create_named(path: &Path, open_options: &mut OpenOptions) -> io::Result<File> {
+    open_options.read(true).write(true).create_new(true);
+
+    #[cfg(not(target_os = "wasi"))]
+    {
+        open_options.mode(0o600);
+    }
+
+    open_options.open(path)
+}
+
+fn create_unlinked(path: &Path) -> io::Result<File> {
+    let tmp;
+    // shadow this to decrease the lifetime. It can't live longer than `tmp`.
+    let mut path = path;
+    if !path.is_absolute() {
+        let cur_dir = env::current_dir()?;
+        tmp = cur_dir.join(path);
+        path = &tmp;
+    }
+
+    let f = create_named(path, &mut OpenOptions::new())?;
+    // don't care whether the path has already been unlinked,
+    // but perhaps there are some IO error conditions we should send up?
+    let _ = fs::remove_file(path);
+    Ok(f)
+}
+
+#[cfg(target_os = "linux")]
+pub fn create(dir: &Path) -> io::Result<File> {
+    use libc::{EISDIR, ENOENT, EOPNOTSUPP, O_TMPFILE};
+    OpenOptions::new()
+        .read(true)
+        .write(true)
+        .custom_flags(O_TMPFILE) // do not mix with `create_new(true)`
+        .open(dir)
+        .or_else(|e| {
+            match e.raw_os_error() {
+                // These are the three "not supported" error codes for O_TMPFILE.
+                Some(EOPNOTSUPP) | Some(EISDIR) | Some(ENOENT) => create_unix(dir),
+                _ => Err(e),
+            }
+        })
+}
+
+#[cfg(not(target_os = "linux"))]
+pub fn create(dir: &Path) -> io::Result<File> {
+    create_unix(dir)
+}
+
+fn create_unix(dir: &Path) -> io::Result<File> {
+    util::create_helper(
+        dir,
+        OsStr::new(".tmp"),
+        OsStr::new(""),
+        crate::NUM_RAND_CHARS,
+        |path| create_unlinked(&path),
+    )
+}
+
+#[cfg(any(not(target_os = "wasi"), feature = "nightly"))]
+pub fn reopen(file: &File, path: &Path) -> io::Result<File> {
+    let new_file = OpenOptions::new().read(true).write(true).open(path)?;
+    let old_meta = file.metadata()?;
+    let new_meta = new_file.metadata()?;
+    if old_meta.dev() != new_meta.dev() || old_meta.ino() != new_meta.ino() {
+        return Err(io::Error::new(
+            io::ErrorKind::NotFound,
+            "original tempfile has been replaced",
+        ));
+    }
+    Ok(new_file)
+}
+
+#[cfg(all(target_os = "wasi", not(feature = "nightly")))]
+pub fn reopen(_file: &File, _path: &Path) -> io::Result<File> {
+    return Err(io::Error::new(
+        io::ErrorKind::Other,
+        "this operation is supported on WASI only on nightly Rust (with `nightly` feature enabled)",
+    ));
+}
+
+#[cfg(not(target_os = "redox"))]
+pub fn persist(old_path: &Path, new_path: &Path, overwrite: bool) -> io::Result<()> {
+    unsafe {
+        let old_path = cstr(old_path)?;
+        let new_path = cstr(new_path)?;
+        if overwrite {
+            cvt_err(rename(
+                old_path.as_ptr() as *const c_char,
+                new_path.as_ptr() as *const c_char,
+            ))?;
+        } else {
+            cvt_err(link(
+                old_path.as_ptr() as *const c_char,
+                new_path.as_ptr() as *const c_char,
+            ))?;
+            // Ignore unlink errors. Can we do better?
+            // On recent linux, we can use renameat2 to do this atomically.
+            let _ = unlink(old_path.as_ptr() as *const c_char);
+        }
+        Ok(())
+    }
+}
+
+#[cfg(target_os = "redox")]
+pub fn persist(old_path: &Path, new_path: &Path, overwrite: bool) -> io::Result<()> {
+    // XXX implement when possible
+    Err(io::Error::from_raw_os_error(syscall::ENOSYS))
+}
+
+pub fn keep(_: &Path) -> io::Result<()> {
+    Ok(())
+}
diff --git a/crates/tempfile/src/file/imp/windows.rs b/crates/tempfile/src/file/imp/windows.rs
new file mode 100644
index 0000000..71b4748
--- /dev/null
+++ b/crates/tempfile/src/file/imp/windows.rs
@@ -0,0 +1,108 @@
+use std::ffi::OsStr;
+use std::fs::{File, OpenOptions};
+use std::os::windows::ffi::OsStrExt;
+use std::os::windows::fs::OpenOptionsExt;
+use std::os::windows::io::{AsRawHandle, FromRawHandle, RawHandle};
+use std::path::Path;
+use std::{io, iter};
+
+use winapi::um::fileapi::SetFileAttributesW;
+use winapi::um::handleapi::INVALID_HANDLE_VALUE;
+use winapi::um::winbase::{MoveFileExW, ReOpenFile};
+use winapi::um::winbase::{FILE_FLAG_DELETE_ON_CLOSE, MOVEFILE_REPLACE_EXISTING};
+use winapi::um::winnt::{FILE_ATTRIBUTE_NORMAL, FILE_ATTRIBUTE_TEMPORARY};
+use winapi::um::winnt::{FILE_GENERIC_READ, FILE_GENERIC_WRITE, HANDLE};
+use winapi::um::winnt::{FILE_SHARE_DELETE, FILE_SHARE_READ, FILE_SHARE_WRITE};
+
+use crate::util;
+
+fn to_utf16(s: &Path) -> Vec<u16> {
+    s.as_os_str().encode_wide().chain(iter::once(0)).collect()
+}
+
+pub fn create_named(path: &Path, open_options: &mut OpenOptions) -> io::Result<File> {
+    open_options
+        .create_new(true)
+        .read(true)
+        .write(true)
+        .custom_flags(FILE_ATTRIBUTE_TEMPORARY)
+        .open(path)
+}
+
+pub fn create(dir: &Path) -> io::Result<File> {
+    util::create_helper(
+        dir,
+        OsStr::new(".tmp"),
+        OsStr::new(""),
+        crate::NUM_RAND_CHARS,
+        |path| {
+            OpenOptions::new()
+                .create_new(true)
+                .read(true)
+                .write(true)
+                .share_mode(0)
+                .custom_flags(FILE_ATTRIBUTE_TEMPORARY | FILE_FLAG_DELETE_ON_CLOSE)
+                .open(path)
+        },
+    )
+}
+
+pub fn reopen(file: &File, _path: &Path) -> io::Result<File> {
+    let handle = file.as_raw_handle();
+    unsafe {
+        let handle = ReOpenFile(
+            handle as HANDLE,
+            FILE_GENERIC_READ | FILE_GENERIC_WRITE,
+            FILE_SHARE_DELETE | FILE_SHARE_READ | FILE_SHARE_WRITE,
+            0,
+        );
+        if handle == INVALID_HANDLE_VALUE {
+            Err(io::Error::last_os_error())
+        } else {
+            Ok(FromRawHandle::from_raw_handle(handle as RawHandle))
+        }
+    }
+}
+
+pub fn keep(path: &Path) -> io::Result<()> {
+    unsafe {
+        let path_w = to_utf16(path);
+        if SetFileAttributesW(path_w.as_ptr(), FILE_ATTRIBUTE_NORMAL) == 0 {
+            Err(io::Error::last_os_error())
+        } else {
+            Ok(())
+        }
+    }
+}
+
+pub fn persist(old_path: &Path, new_path: &Path, overwrite: bool) -> io::Result<()> {
+    // TODO: We should probably do this in one-shot using SetFileInformationByHandle but the API is
+    // really painful.
+
+    unsafe {
+        let old_path_w = to_utf16(old_path);
+        let new_path_w = to_utf16(new_path);
+
+        // Don't succeed if this fails. We don't want to claim to have successfully persisted a file
+        // still marked as temporary because this file won't have the same consistency guarantees.
+        if SetFileAttributesW(old_path_w.as_ptr(), FILE_ATTRIBUTE_NORMAL) == 0 {
+            return Err(io::Error::last_os_error());
+        }
+
+        let mut flags = 0;
+
+        if overwrite {
+            flags |= MOVEFILE_REPLACE_EXISTING;
+        }
+
+        if MoveFileExW(old_path_w.as_ptr(), new_path_w.as_ptr(), flags) == 0 {
+            let e = io::Error::last_os_error();
+            // If this fails, the temporary file is now un-hidden and no longer marked temporary
+            // (slightly less efficient) but it will still work.
+            let _ = SetFileAttributesW(old_path_w.as_ptr(), FILE_ATTRIBUTE_TEMPORARY);
+            Err(e)
+        } else {
+            Ok(())
+        }
+    }
+}
diff --git a/crates/tempfile/src/file/mod.rs b/crates/tempfile/src/file/mod.rs
new file mode 100644
index 0000000..b859ced
--- /dev/null
+++ b/crates/tempfile/src/file/mod.rs
@@ -0,0 +1,973 @@
+use std::env;
+use std::error;
+use std::ffi::OsStr;
+use std::fmt;
+use std::fs::{self, File, OpenOptions};
+use std::io::{self, Read, Seek, SeekFrom, Write};
+use std::mem;
+use std::ops::Deref;
+use std::path::{Path, PathBuf};
+
+use crate::error::IoResultExt;
+use crate::Builder;
+
+mod imp;
+
+/// Create a new temporary file.
+///
+/// The file will be created in the location returned by [`std::env::temp_dir()`].
+///
+/// # Security
+///
+/// This variant is secure/reliable in the presence of a pathological temporary file cleaner.
+///
+/// # Resource Leaking
+///
+/// The temporary file will be automatically removed by the OS when the last handle to it is closed.
+/// This doesn't rely on Rust destructors being run, so will (almost) never fail to clean up the temporary file.
+///
+/// # Errors
+///
+/// If the file can not be created, `Err` is returned.
+///
+/// # Examples
+///
+/// ```
+/// use tempfile::tempfile;
+/// use std::io::{self, Write};
+///
+/// # fn main() {
+/// #     if let Err(_) = run() {
+/// #         ::std::process::exit(1);
+/// #     }
+/// # }
+/// # fn run() -> Result<(), io::Error> {
+/// // Create a file inside of `std::env::temp_dir()`.
+/// let mut file = tempfile()?;
+///
+/// writeln!(file, "Brian was here. Briefly.")?;
+/// # Ok(())
+/// # }
+/// ```
+///
+/// [`std::env::temp_dir()`]: https://doc.rust-lang.org/std/env/fn.temp_dir.html
+pub fn tempfile() -> io::Result<File> {
+    tempfile_in(&env::temp_dir())
+}
+
+/// Create a new temporary file in the specified directory.
+///
+/// # Security
+///
+/// This variant is secure/reliable in the presence of a pathological temporary file cleaner.
+/// If the temporary file isn't created in [`std::env::temp_dir()`] then temporary file cleaners aren't an issue.
+///
+/// # Resource Leaking
+///
+/// The temporary file will be automatically removed by the OS when the last handle to it is closed.
+/// This doesn't rely on Rust destructors being run, so will (almost) never fail to clean up the temporary file.
+///
+/// # Errors
+///
+/// If the file can not be created, `Err` is returned.
+///
+/// # Examples
+///
+/// ```
+/// use tempfile::tempfile_in;
+/// use std::io::{self, Write};
+///
+/// # fn main() {
+/// #     if let Err(_) = run() {
+/// #         ::std::process::exit(1);
+/// #     }
+/// # }
+/// # fn run() -> Result<(), io::Error> {
+/// // Create a file inside of the current working directory
+/// let mut file = tempfile_in("./")?;
+///
+/// writeln!(file, "Brian was here. Briefly.")?;
+/// # Ok(())
+/// # }
+/// ```
+///
+/// [`std::env::temp_dir()`]: https://doc.rust-lang.org/std/env/fn.temp_dir.html
+pub fn tempfile_in<P: AsRef<Path>>(dir: P) -> io::Result<File> {
+    imp::create(dir.as_ref())
+}
+
+/// Error returned when persisting a temporary file path fails.
+#[derive(Debug)]
+pub struct PathPersistError {
+    /// The underlying IO error.
+    pub error: io::Error,
+    /// The temporary file path that couldn't be persisted.
+    pub path: TempPath,
+}
+
+impl From<PathPersistError> for io::Error {
+    #[inline]
+    fn from(error: PathPersistError) -> io::Error {
+        error.error
+    }
+}
+
+impl From<PathPersistError> for TempPath {
+    #[inline]
+    fn from(error: PathPersistError) -> TempPath {
+        error.path
+    }
+}
+
+impl fmt::Display for PathPersistError {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(f, "failed to persist temporary file path: {}", self.error)
+    }
+}
+
+impl error::Error for PathPersistError {
+    fn source(&self) -> Option<&(dyn error::Error + 'static)> {
+        Some(&self.error)
+    }
+}
+
+/// A path to a named temporary file without an open file handle.
+///
+/// This is useful when the temporary file needs to be used by a child process,
+/// for example.
+///
+/// When dropped, the temporary file is deleted.
+pub struct TempPath {
+    path: Box<Path>,
+}
+
+impl TempPath {
+    /// Close and remove the temporary file.
+    ///
+    /// Use this if you want to detect errors in deleting the file.
+    ///
+    /// # Errors
+    ///
+    /// If the file cannot be deleted, `Err` is returned.
+    ///
+    /// # Examples
+    ///
+    /// ```no_run
+    /// # use std::io;
+    /// use tempfile::NamedTempFile;
+    ///
+    /// # fn main() {
+    /// #     if let Err(_) = run() {
+    /// #         ::std::process::exit(1);
+    /// #     }
+    /// # }
+    /// # fn run() -> Result<(), io::Error> {
+    /// let file = NamedTempFile::new()?;
+    ///
+    /// // Close the file, but keep the path to it around.
+    /// let path = file.into_temp_path();
+    ///
+    /// // By closing the `TempPath` explicitly, we can check that it has
+    /// // been deleted successfully. If we don't close it explicitly, the
+    /// // file will still be deleted when `file` goes out of scope, but we
+    /// // won't know whether deleting the file succeeded.
+    /// path.close()?;
+    /// # Ok(())
+    /// # }
+    /// ```
+    pub fn close(mut self) -> io::Result<()> {
+        let result = fs::remove_file(&self.path).with_err_path(|| &*self.path);
+        self.path = PathBuf::new().into_boxed_path();
+        mem::forget(self);
+        result
+    }
+
+    /// Persist the temporary file at the target path.
+    ///
+    /// If a file exists at the target path, persist will atomically replace it.
+    /// If this method fails, it will return `self` in the resulting
+    /// [`PathPersistError`].
+    ///
+    /// Note: Temporary files cannot be persisted across filesystems. Also
+    /// neither the file contents nor the containing directory are
+    /// synchronized, so the update may not yet have reached the disk when
+    /// `persist` returns.
+    ///
+    /// # Security
+    ///
+    /// Only use this method if you're positive that a temporary file cleaner
+    /// won't have deleted your file. Otherwise, you might end up persisting an
+    /// attacker controlled file.
+    ///
+    /// # Errors
+    ///
+    /// If the file cannot be moved to the new location, `Err` is returned.
+    ///
+    /// # Examples
+    ///
+    /// ```no_run
+    /// # use std::io::{self, Write};
+    /// use tempfile::NamedTempFile;
+    ///
+    /// # fn main() {
+    /// #     if let Err(_) = run() {
+    /// #         ::std::process::exit(1);
+    /// #     }
+    /// # }
+    /// # fn run() -> Result<(), io::Error> {
+    /// let mut file = NamedTempFile::new()?;
+    /// writeln!(file, "Brian was here. Briefly.")?;
+    ///
+    /// let path = file.into_temp_path();
+    /// path.persist("./saved_file.txt")?;
+    /// # Ok(())
+    /// # }
+    /// ```
+    ///
+    /// [`PathPersistError`]: struct.PathPersistError.html
+    pub fn persist<P: AsRef<Path>>(mut self, new_path: P) -> Result<(), PathPersistError> {
+        match imp::persist(&self.path, new_path.as_ref(), true) {
+            Ok(_) => {
+                // Don't drop `self`. We don't want to try deleting the old
+                // temporary file path. (It'll fail, but the failure is never
+                // seen.)
+                self.path = PathBuf::new().into_boxed_path();
+                mem::forget(self);
+                Ok(())
+            }
+            Err(e) => Err(PathPersistError {
+                error: e,
+                path: self,
+            }),
+        }
+    }
+
+    /// Persist the temporary file at the target path if and only if no file exists there.
+    ///
+    /// If a file exists at the target path, fail. If this method fails, it will
+    /// return `self` in the resulting [`PathPersistError`].
+    ///
+    /// Note: Temporary files cannot be persisted across filesystems. Also Note:
+    /// This method is not atomic. It can leave the original link to the
+    /// temporary file behind.
+    ///
+    /// # Security
+    ///
+    /// Only use this method if you're positive that a temporary file cleaner
+    /// won't have deleted your file. Otherwise, you might end up persisting an
+    /// attacker controlled file.
+    ///
+    /// # Errors
+    ///
+    /// If the file cannot be moved to the new location or a file already exists
+    /// there, `Err` is returned.
+    ///
+    /// # Examples
+    ///
+    /// ```no_run
+    /// # use std::io::{self, Write};
+    /// use tempfile::NamedTempFile;
+    ///
+    /// # fn main() {
+    /// #     if let Err(_) = run() {
+    /// #         ::std::process::exit(1);
+    /// #     }
+    /// # }
+    /// # fn run() -> Result<(), io::Error> {
+    /// let mut file = NamedTempFile::new()?;
+    /// writeln!(file, "Brian was here. Briefly.")?;
+    ///
+    /// let path = file.into_temp_path();
+    /// path.persist_noclobber("./saved_file.txt")?;
+    /// # Ok(())
+    /// # }
+    /// ```
+    ///
+    /// [`PathPersistError`]: struct.PathPersistError.html
+    pub fn persist_noclobber<P: AsRef<Path>>(
+        mut self,
+        new_path: P,
+    ) -> Result<(), PathPersistError> {
+        match imp::persist(&self.path, new_path.as_ref(), false) {
+            Ok(_) => {
+                // Don't drop `self`. We don't want to try deleting the old
+                // temporary file path. (It'll fail, but the failure is never
+                // seen.)
+                self.path = PathBuf::new().into_boxed_path();
+                mem::forget(self);
+                Ok(())
+            }
+            Err(e) => Err(PathPersistError {
+                error: e,
+                path: self,
+            }),
+        }
+    }
+
+    /// Keep the temporary file from being deleted. This function will turn the
+    /// temporary file into a non-temporary file without moving it.
+    ///
+    ///
+    /// # Errors
+    ///
+    /// On some platforms (e.g., Windows), we need to mark the file as
+    /// non-temporary. This operation could fail.
+    ///
+    /// # Examples
+    ///
+    /// ```no_run
+    /// # use std::io::{self, Write};
+    /// use tempfile::NamedTempFile;
+    ///
+    /// # fn main() {
+    /// #     if let Err(_) = run() {
+    /// #         ::std::process::exit(1);
+    /// #     }
+    /// # }
+    /// # fn run() -> Result<(), io::Error> {
+    /// let mut file = NamedTempFile::new()?;
+    /// writeln!(file, "Brian was here. Briefly.")?;
+    ///
+    /// let path = file.into_temp_path();
+    /// let path = path.keep()?;
+    /// # Ok(())
+    /// # }
+    /// ```
+    ///
+    /// [`PathPersistError`]: struct.PathPersistError.html
+    pub fn keep(mut self) -> Result<PathBuf, PathPersistError> {
+        match imp::keep(&self.path) {
+            Ok(_) => {
+                // Don't drop `self`. We don't want to try deleting the old
+                // temporary file path. (It'll fail, but the failure is never
+                // seen.)
+                let path = mem::replace(&mut self.path, PathBuf::new().into_boxed_path());
+                mem::forget(self);
+                Ok(path.into())
+            }
+            Err(e) => Err(PathPersistError {
+                error: e,
+                path: self,
+            }),
+        }
+    }
+
+    /// Create a new TempPath from an existing path. This can be done even if no
+    /// file exists at the given path.
+    ///
+    /// This is mostly useful for interacting with libraries and external
+    /// components that provide files to be consumed or expect a path with no
+    /// existing file to be given.
+    pub fn from_path(path: impl Into<PathBuf>) -> Self {
+        Self {
+            path: path.into().into_boxed_path(),
+        }
+    }
+}
+
+impl fmt::Debug for TempPath {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        self.path.fmt(f)
+    }
+}
+
+impl Drop for TempPath {
+    fn drop(&mut self) {
+        let _ = fs::remove_file(&self.path);
+    }
+}
+
+impl Deref for TempPath {
+    type Target = Path;
+
+    fn deref(&self) -> &Path {
+        &self.path
+    }
+}
+
+impl AsRef<Path> for TempPath {
+    fn as_ref(&self) -> &Path {
+        &self.path
+    }
+}
+
+impl AsRef<OsStr> for TempPath {
+    fn as_ref(&self) -> &OsStr {
+        self.path.as_os_str()
+    }
+}
+
+/// A named temporary file.
+///
+/// The default constructor, [`NamedTempFile::new()`], creates files in
+/// the location returned by [`std::env::temp_dir()`], but `NamedTempFile`
+/// can be configured to manage a temporary file in any location
+/// by constructing with [`NamedTempFile::new_in()`].
+///
+/// # Security
+///
+/// Most operating systems employ temporary file cleaners to delete old
+/// temporary files. Unfortunately these temporary file cleaners don't always
+/// reliably _detect_ whether the temporary file is still being used.
+///
+/// Specifically, the following sequence of events can happen:
+///
+/// 1. A user creates a temporary file with `NamedTempFile::new()`.
+/// 2. Time passes.
+/// 3. The temporary file cleaner deletes (unlinks) the temporary file from the
+///    filesystem.
+/// 4. Some other program creates a new file to replace this deleted temporary
+///    file.
+/// 5. The user tries to re-open the temporary file (in the same program or in a
+///    different program) by path. Unfortunately, they'll end up opening the
+///    file created by the other program, not the original file.
+///
+/// ## Operating System Specific Concerns
+///
+/// The behavior of temporary files and temporary file cleaners differ by
+/// operating system.
+///
+/// ### Windows
+///
+/// On Windows, open files _can't_ be deleted. This removes most of the concerns
+/// around temporary file cleaners.
+///
+/// Furthermore, temporary files are, by default, created in per-user temporary
+/// file directories so only an application running as the same user would be
+/// able to interfere (which they could do anyways). However, an application
+/// running as the same user can still _accidentally_ re-create deleted
+/// temporary files if the number of random bytes in the temporary file name is
+/// too small.
+///
+/// So, the only real concern on Windows is:
+///
+/// 1. Opening a named temporary file in a world-writable directory.
+/// 2. Using the `into_temp_path()` and/or `into_parts()` APIs to close the file
+///    handle without deleting the underlying file.
+/// 3. Continuing to use the file by path.
+///
+/// ### UNIX
+///
+/// Unlike on Windows, UNIX (and UNIX like) systems allow open files to be
+/// "unlinked" (deleted).
+///
+/// #### MacOS
+///
+/// Like on Windows, temporary files are created in per-user temporary file
+/// directories by default so calling `NamedTempFile::new()` should be
+/// relatively safe.
+///
+/// #### Linux
+///
+/// Unfortunately, most _Linux_ distributions don't create per-user temporary
+/// file directories. Worse, systemd's tmpfiles daemon (a common temporary file
+/// cleaner) will happily remove open temporary files if they haven't been
+/// modified within the last 10 days.
+///
+/// # Resource Leaking
+///
+/// If the program exits before the `NamedTempFile` destructor is
+/// run, such as via [`std::process::exit()`], by segfaulting, or by
+/// receiving a signal like `SIGINT`, then the temporary file
+/// will not be deleted.
+///
+/// Use the [`tempfile()`] function unless you absolutely need a named file.
+///
+/// [`tempfile()`]: fn.tempfile.html
+/// [`NamedTempFile::new()`]: #method.new
+/// [`NamedTempFile::new_in()`]: #method.new_in
+/// [`std::env::temp_dir()`]: https://doc.rust-lang.org/std/env/fn.temp_dir.html
+/// [`std::process::exit()`]: http://doc.rust-lang.org/std/process/fn.exit.html
+pub struct NamedTempFile {
+    path: TempPath,
+    file: File,
+}
+
+impl fmt::Debug for NamedTempFile {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(f, "NamedTempFile({:?})", self.path)
+    }
+}
+
+impl AsRef<Path> for NamedTempFile {
+    #[inline]
+    fn as_ref(&self) -> &Path {
+        self.path()
+    }
+}
+
+/// Error returned when persisting a temporary file fails.
+#[derive(Debug)]
+pub struct PersistError {
+    /// The underlying IO error.
+    pub error: io::Error,
+    /// The temporary file that couldn't be persisted.
+    pub file: NamedTempFile,
+}
+
+impl From<PersistError> for io::Error {
+    #[inline]
+    fn from(error: PersistError) -> io::Error {
+        error.error
+    }
+}
+
+impl From<PersistError> for NamedTempFile {
+    #[inline]
+    fn from(error: PersistError) -> NamedTempFile {
+        error.file
+    }
+}
+
+impl fmt::Display for PersistError {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(f, "failed to persist temporary file: {}", self.error)
+    }
+}
+
+impl error::Error for PersistError {
+    fn source(&self) -> Option<&(dyn error::Error + 'static)> {
+        Some(&self.error)
+    }
+}
+
+impl NamedTempFile {
+    /// Create a new named temporary file.
+    ///
+    /// See [`Builder`] for more configuration.
+    ///
+    /// # Security
+    ///
+    /// This will create a temporary file in the default temporary file
+    /// directory (platform dependent). This has security implications on many
+    /// platforms so please read the security section of this type's
+    /// documentation.
+    ///
+    /// Reasons to use this method:
+    ///
+    ///   1. The file has a short lifetime and your temporary file cleaner is
+    ///      sane (doesn't delete recently accessed files).
+    ///
+    ///   2. You trust every user on your system (i.e. you are the only user).
+    ///
+    ///   3. You have disabled your system's temporary file cleaner or verified
+    ///      that your system doesn't have a temporary file cleaner.
+    ///
+    /// Reasons not to use this method:
+    ///
+    ///   1. You'll fix it later. No you won't.
+    ///
+    ///   2. You don't care about the security of the temporary file. If none of
+    ///      the "reasons to use this method" apply, referring to a temporary
+    ///      file by name may allow an attacker to create/overwrite your
+    ///      non-temporary files. There are exceptions but if you don't already
+    ///      know them, don't use this method.
+    ///
+    /// # Errors
+    ///
+    /// If the file can not be created, `Err` is returned.
+    ///
+    /// # Examples
+    ///
+    /// Create a named temporary file and write some data to it:
+    ///
+    /// ```no_run
+    /// # use std::io::{self, Write};
+    /// use tempfile::NamedTempFile;
+    ///
+    /// # fn main() {
+    /// #     if let Err(_) = run() {
+    /// #         ::std::process::exit(1);
+    /// #     }
+    /// # }
+    /// # fn run() -> Result<(), ::std::io::Error> {
+    /// let mut file = NamedTempFile::new()?;
+    ///
+    /// writeln!(file, "Brian was here. Briefly.")?;
+    /// # Ok(())
+    /// # }
+    /// ```
+    ///
+    /// [`Builder`]: struct.Builder.html
+    pub fn new() -> io::Result<NamedTempFile> {
+        Builder::new().tempfile()
+    }
+
+    /// Create a new named temporary file in the specified directory.
+    ///
+    /// See [`NamedTempFile::new()`] for details.
+    ///
+    /// [`NamedTempFile::new()`]: #method.new
+    pub fn new_in<P: AsRef<Path>>(dir: P) -> io::Result<NamedTempFile> {
+        Builder::new().tempfile_in(dir)
+    }
+
+    /// Get the temporary file's path.
+    ///
+    /// # Security
+    ///
+    /// Referring to a temporary file's path may not be secure in all cases.
+    /// Please read the security section on the top level documentation of this
+    /// type for details.
+    ///
+    /// # Examples
+    ///
+    /// ```no_run
+    /// # use std::io::{self, Write};
+    /// use tempfile::NamedTempFile;
+    ///
+    /// # fn main() {
+    /// #     if let Err(_) = run() {
+    /// #         ::std::process::exit(1);
+    /// #     }
+    /// # }
+    /// # fn run() -> Result<(), ::std::io::Error> {
+    /// let file = NamedTempFile::new()?;
+    ///
+    /// println!("{:?}", file.path());
+    /// # Ok(())
+    /// # }
+    /// ```
+    #[inline]
+    pub fn path(&self) -> &Path {
+        &self.path
+    }
+
+    /// Close and remove the temporary file.
+    ///
+    /// Use this if you want to detect errors in deleting the file.
+    ///
+    /// # Errors
+    ///
+    /// If the file cannot be deleted, `Err` is returned.
+    ///
+    /// # Examples
+    ///
+    /// ```no_run
+    /// # use std::io;
+    /// use tempfile::NamedTempFile;
+    ///
+    /// # fn main() {
+    /// #     if let Err(_) = run() {
+    /// #         ::std::process::exit(1);
+    /// #     }
+    /// # }
+    /// # fn run() -> Result<(), io::Error> {
+    /// let file = NamedTempFile::new()?;
+    ///
+    /// // By closing the `NamedTempFile` explicitly, we can check that it has
+    /// // been deleted successfully. If we don't close it explicitly,
+    /// // the file will still be deleted when `file` goes out
+    /// // of scope, but we won't know whether deleting the file
+    /// // succeeded.
+    /// file.close()?;
+    /// # Ok(())
+    /// # }
+    /// ```
+    pub fn close(self) -> io::Result<()> {
+        let NamedTempFile { path, .. } = self;
+        path.close()
+    }
+
+    /// Persist the temporary file at the target path.
+    ///
+    /// If a file exists at the target path, persist will atomically replace it.
+    /// If this method fails, it will return `self` in the resulting
+    /// [`PersistError`].
+    ///
+    /// Note: Temporary files cannot be persisted across filesystems. Also
+    /// neither the file contents nor the containing directory are
+    /// synchronized, so the update may not yet have reached the disk when
+    /// `persist` returns.
+    ///
+    /// # Security
+    ///
+    /// This method persists the temporary file using its path and may not be
+    /// secure in the in all cases. Please read the security section on the top
+    /// level documentation of this type for details.
+    ///
+    /// # Errors
+    ///
+    /// If the file cannot be moved to the new location, `Err` is returned.
+    ///
+    /// # Examples
+    ///
+    /// ```no_run
+    /// # use std::io::{self, Write};
+    /// use tempfile::NamedTempFile;
+    ///
+    /// # fn main() {
+    /// #     if let Err(_) = run() {
+    /// #         ::std::process::exit(1);
+    /// #     }
+    /// # }
+    /// # fn run() -> Result<(), io::Error> {
+    /// let file = NamedTempFile::new()?;
+    ///
+    /// let mut persisted_file = file.persist("./saved_file.txt")?;
+    /// writeln!(persisted_file, "Brian was here. Briefly.")?;
+    /// # Ok(())
+    /// # }
+    /// ```
+    ///
+    /// [`PersistError`]: struct.PersistError.html
+    pub fn persist<P: AsRef<Path>>(self, new_path: P) -> Result<File, PersistError> {
+        let NamedTempFile { path, file } = self;
+        match path.persist(new_path) {
+            Ok(_) => Ok(file),
+            Err(err) => {
+                let PathPersistError { error, path } = err;
+                Err(PersistError {
+                    file: NamedTempFile { path, file },
+                    error,
+                })
+            }
+        }
+    }
+
+    /// Persist the temporary file at the target path if and only if no file exists there.
+    ///
+    /// If a file exists at the target path, fail. If this method fails, it will
+    /// return `self` in the resulting PersistError.
+    ///
+    /// Note: Temporary files cannot be persisted across filesystems. Also Note:
+    /// This method is not atomic. It can leave the original link to the
+    /// temporary file behind.
+    ///
+    /// # Security
+    ///
+    /// This method persists the temporary file using its path and may not be
+    /// secure in the in all cases. Please read the security section on the top
+    /// level documentation of this type for details.
+    ///
+    /// # Errors
+    ///
+    /// If the file cannot be moved to the new location or a file already exists there,
+    /// `Err` is returned.
+    ///
+    /// # Examples
+    ///
+    /// ```no_run
+    /// # use std::io::{self, Write};
+    /// use tempfile::NamedTempFile;
+    ///
+    /// # fn main() {
+    /// #     if let Err(_) = run() {
+    /// #         ::std::process::exit(1);
+    /// #     }
+    /// # }
+    /// # fn run() -> Result<(), io::Error> {
+    /// let file = NamedTempFile::new()?;
+    ///
+    /// let mut persisted_file = file.persist_noclobber("./saved_file.txt")?;
+    /// writeln!(persisted_file, "Brian was here. Briefly.")?;
+    /// # Ok(())
+    /// # }
+    /// ```
+    pub fn persist_noclobber<P: AsRef<Path>>(self, new_path: P) -> Result<File, PersistError> {
+        let NamedTempFile { path, file } = self;
+        match path.persist_noclobber(new_path) {
+            Ok(_) => Ok(file),
+            Err(err) => {
+                let PathPersistError { error, path } = err;
+                Err(PersistError {
+                    file: NamedTempFile { path, file },
+                    error,
+                })
+            }
+        }
+    }
+
+    /// Keep the temporary file from being deleted. This function will turn the
+    /// temporary file into a non-temporary file without moving it.
+    ///
+    ///
+    /// # Errors
+    ///
+    /// On some platforms (e.g., Windows), we need to mark the file as
+    /// non-temporary. This operation could fail.
+    ///
+    /// # Examples
+    ///
+    /// ```no_run
+    /// # use std::io::{self, Write};
+    /// use tempfile::NamedTempFile;
+    ///
+    /// # fn main() {
+    /// #     if let Err(_) = run() {
+    /// #         ::std::process::exit(1);
+    /// #     }
+    /// # }
+    /// # fn run() -> Result<(), io::Error> {
+    /// let mut file = NamedTempFile::new()?;
+    /// writeln!(file, "Brian was here. Briefly.")?;
+    ///
+    /// let (file, path) = file.keep()?;
+    /// # Ok(())
+    /// # }
+    /// ```
+    ///
+    /// [`PathPersistError`]: struct.PathPersistError.html
+    pub fn keep(self) -> Result<(File, PathBuf), PersistError> {
+        let (file, path) = (self.file, self.path);
+        match path.keep() {
+            Ok(path) => Ok((file, path)),
+            Err(PathPersistError { error, path }) => Err(PersistError {
+                file: NamedTempFile { path, file },
+                error,
+            }),
+        }
+    }
+
+    /// Securely reopen the temporary file.
+    ///
+    /// This function is useful when you need multiple independent handles to
+    /// the same file. It's perfectly fine to drop the original `NamedTempFile`
+    /// while holding on to `File`s returned by this function; the `File`s will
+    /// remain usable. However, they may not be nameable.
+    ///
+    /// # Errors
+    ///
+    /// If the file cannot be reopened, `Err` is returned.
+    ///
+    /// # Security
+    ///
+    /// Unlike `File::open(my_temp_file.path())`, `NamedTempFile::reopen()`
+    /// guarantees that the re-opened file is the _same_ file, even in the
+    /// presence of pathological temporary file cleaners.
+    ///
+    /// # Examples
+    ///
+    /// ```no_run
+    /// # use std::io;
+    /// use tempfile::NamedTempFile;
+    ///
+    /// # fn main() {
+    /// #     if let Err(_) = run() {
+    /// #         ::std::process::exit(1);
+    /// #     }
+    /// # }
+    /// # fn run() -> Result<(), io::Error> {
+    /// let file = NamedTempFile::new()?;
+    ///
+    /// let another_handle = file.reopen()?;
+    /// # Ok(())
+    /// # }
+    /// ```
+    pub fn reopen(&self) -> io::Result<File> {
+        imp::reopen(self.as_file(), NamedTempFile::path(self))
+            .with_err_path(|| NamedTempFile::path(self))
+    }
+
+    /// Get a reference to the underlying file.
+    pub fn as_file(&self) -> &File {
+        &self.file
+    }
+
+    /// Get a mutable reference to the underlying file.
+    pub fn as_file_mut(&mut self) -> &mut File {
+        &mut self.file
+    }
+
+    /// Convert the temporary file into a `std::fs::File`.
+    ///
+    /// The inner file will be deleted.
+    pub fn into_file(self) -> File {
+        self.file
+    }
+
+    /// Closes the file, leaving only the temporary file path.
+    ///
+    /// This is useful when another process must be able to open the temporary
+    /// file.
+    pub fn into_temp_path(self) -> TempPath {
+        self.path
+    }
+
+    /// Converts the named temporary file into its constituent parts.
+    ///
+    /// Note: When the path is dropped, the file is deleted but the file handle
+    /// is still usable.
+    pub fn into_parts(self) -> (File, TempPath) {
+        (self.file, self.path)
+    }
+}
+
+impl Read for NamedTempFile {
+    fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+        self.as_file_mut().read(buf).with_err_path(|| self.path())
+    }
+}
+
+impl<'a> Read for &'a NamedTempFile {
+    fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+        self.as_file().read(buf).with_err_path(|| self.path())
+    }
+}
+
+impl Write for NamedTempFile {
+    fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+        self.as_file_mut().write(buf).with_err_path(|| self.path())
+    }
+    #[inline]
+    fn flush(&mut self) -> io::Result<()> {
+        self.as_file_mut().flush().with_err_path(|| self.path())
+    }
+}
+
+impl<'a> Write for &'a NamedTempFile {
+    fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+        self.as_file().write(buf).with_err_path(|| self.path())
+    }
+    #[inline]
+    fn flush(&mut self) -> io::Result<()> {
+        self.as_file().flush().with_err_path(|| self.path())
+    }
+}
+
+impl Seek for NamedTempFile {
+    fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
+        self.as_file_mut().seek(pos).with_err_path(|| self.path())
+    }
+}
+
+impl<'a> Seek for &'a NamedTempFile {
+    fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
+        self.as_file().seek(pos).with_err_path(|| self.path())
+    }
+}
+
+#[cfg(unix)]
+impl std::os::unix::io::AsRawFd for NamedTempFile {
+    #[inline]
+    fn as_raw_fd(&self) -> std::os::unix::io::RawFd {
+        self.as_file().as_raw_fd()
+    }
+}
+
+#[cfg(windows)]
+impl std::os::windows::io::AsRawHandle for NamedTempFile {
+    #[inline]
+    fn as_raw_handle(&self) -> std::os::windows::io::RawHandle {
+        self.as_file().as_raw_handle()
+    }
+}
+
+pub(crate) fn create_named(
+    mut path: PathBuf,
+    open_options: &mut OpenOptions,
+) -> io::Result<NamedTempFile> {
+    // Make the path absolute. Otherwise, changing directories could cause us to
+    // delete the wrong file.
+    if !path.is_absolute() {
+        path = env::current_dir()?.join(path)
+    }
+    imp::create_named(&path, open_options)
+        .with_err_path(|| path.clone())
+        .map(|file| NamedTempFile {
+            path: TempPath {
+                path: path.into_boxed_path(),
+            },
+            file,
+        })
+}
diff --git a/crates/tempfile/src/lib.rs b/crates/tempfile/src/lib.rs
new file mode 100644
index 0000000..c38ca7b
--- /dev/null
+++ b/crates/tempfile/src/lib.rs
@@ -0,0 +1,537 @@
+//! Temporary files and directories.
+//!
+//! - Use the [`tempfile()`] function for temporary files
+//! - Use the [`tempdir()`] function for temporary directories.
+//!
+//! # Design
+//!
+//! This crate provides several approaches to creating temporary files and directories.
+//! [`tempfile()`] relies on the OS to remove the temporary file once the last handle is closed.
+//! [`TempDir`] and [`NamedTempFile`] both rely on Rust destructors for cleanup.
+//!
+//! When choosing between the temporary file variants, prefer `tempfile`
+//! unless you either need to know the file's path or to be able to persist it.
+//!
+//! ## Resource Leaking
+//!
+//! `tempfile` will (almost) never fail to cleanup temporary resources, but `TempDir` and `NamedTempFile` will if
+//! their destructors don't run. This is because `tempfile` relies on the OS to cleanup the
+//! underlying file, while `TempDir` and `NamedTempFile` rely on their destructors to do so.
+//!
+//! ## Security
+//!
+//! In the presence of pathological temporary file cleaner, relying on file paths is unsafe because
+//! a temporary file cleaner could delete the temporary file which an attacker could then replace.
+//!
+//! `tempfile` doesn't rely on file paths so this isn't an issue. However, `NamedTempFile` does
+//! rely on file paths for _some_ operations. See the security documentation on
+//! the `NamedTempFile` type for more information.
+//!
+//! ## Early drop pitfall
+//!
+//! Because `TempDir` and `NamedTempFile` rely on their destructors for cleanup, this can lead
+//! to an unexpected early removal of the directory/file, usually when working with APIs which are
+//! generic over `AsRef<Path>`. Consider the following example:
+//!
+//! ```no_run
+//! # use tempfile::tempdir;
+//! # use std::io;
+//! # use std::process::Command;
+//! # fn main() {
+//! #     if let Err(_) = run() {
+//! #         ::std::process::exit(1);
+//! #     }
+//! # }
+//! # fn run() -> Result<(), io::Error> {
+//! // Create a directory inside of `std::env::temp_dir()`.
+//! let temp_dir = tempdir()?;
+//!
+//! // Spawn the `touch` command inside the temporary directory and collect the exit status
+//! // Note that `temp_dir` is **not** moved into `current_dir`, but passed as a reference
+//! let exit_status = Command::new("touch").arg("tmp").current_dir(&temp_dir).status()?;
+//! assert!(exit_status.success());
+//!
+//! # Ok(())
+//! # }
+//! ```
+//!
+//! This works because a reference to `temp_dir` is passed to `current_dir`, resulting in the
+//! destructor of `temp_dir` being run after the `Command` has finished execution. Moving the
+//! `TempDir` into the `current_dir` call would result in the `TempDir` being converted into
+//! an internal representation, with the original value being dropped and the directory thus
+//! being deleted, before the command can be executed.
+//!
+//! The `touch` command would fail with an `No such file or directory` error.
+//!
+//! ## Examples
+//!
+//! Create a temporary file and write some data into it:
+//!
+//! ```
+//! use tempfile::tempfile;
+//! use std::io::{self, Write};
+//!
+//! # fn main() {
+//! #     if let Err(_) = run() {
+//! #         ::std::process::exit(1);
+//! #     }
+//! # }
+//! # fn run() -> Result<(), io::Error> {
+//! // Create a file inside of `std::env::temp_dir()`.
+//! let mut file = tempfile()?;
+//!
+//! writeln!(file, "Brian was here. Briefly.")?;
+//! # Ok(())
+//! # }
+//! ```
+//!
+//! Create a named temporary file and open an independent file handle:
+//!
+//! ```
+//! use tempfile::NamedTempFile;
+//! use std::io::{self, Write, Read};
+//!
+//! # fn main() {
+//! #     if let Err(_) = run() {
+//! #         ::std::process::exit(1);
+//! #     }
+//! # }
+//! # fn run() -> Result<(), io::Error> {
+//! let text = "Brian was here. Briefly.";
+//!
+//! // Create a file inside of `std::env::temp_dir()`.
+//! let mut file1 = NamedTempFile::new()?;
+//!
+//! // Re-open it.
+//! let mut file2 = file1.reopen()?;
+//!
+//! // Write some test data to the first handle.
+//! file1.write_all(text.as_bytes())?;
+//!
+//! // Read the test data using the second handle.
+//! let mut buf = String::new();
+//! file2.read_to_string(&mut buf)?;
+//! assert_eq!(buf, text);
+//! # Ok(())
+//! # }
+//! ```
+//!
+//! Create a temporary directory and add a file to it:
+//!
+//! ```
+//! use tempfile::tempdir;
+//! use std::fs::File;
+//! use std::io::{self, Write};
+//!
+//! # fn main() {
+//! #     if let Err(_) = run() {
+//! #         ::std::process::exit(1);
+//! #     }
+//! # }
+//! # fn run() -> Result<(), io::Error> {
+//! // Create a directory inside of `std::env::temp_dir()`.
+//! let dir = tempdir()?;
+//!
+//! let file_path = dir.path().join("my-temporary-note.txt");
+//! let mut file = File::create(file_path)?;
+//! writeln!(file, "Brian was here. Briefly.")?;
+//!
+//! // By closing the `TempDir` explicitly, we can check that it has
+//! // been deleted successfully. If we don't close it explicitly,
+//! // the directory will still be deleted when `dir` goes out
+//! // of scope, but we won't know whether deleting the directory
+//! // succeeded.
+//! drop(file);
+//! dir.close()?;
+//! # Ok(())
+//! # }
+//! ```
+//!
+//! [`tempfile()`]: fn.tempfile.html
+//! [`tempdir()`]: fn.tempdir.html
+//! [`TempDir`]: struct.TempDir.html
+//! [`NamedTempFile`]: struct.NamedTempFile.html
+//! [`std::env::temp_dir()`]: https://doc.rust-lang.org/std/env/fn.temp_dir.html
+
+#![doc(
+    html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
+    html_favicon_url = "https://www.rust-lang.org/favicon.ico",
+    html_root_url = "https://docs.rs/tempfile/3.1.0"
+)]
+#![cfg_attr(test, deny(warnings))]
+#![deny(rust_2018_idioms)]
+#![allow(clippy::redundant_field_names)]
+#![cfg_attr(feature = "nightly", feature(wasi_ext))]
+
+#[cfg(doctest)]
+doc_comment::doctest!("../README.md");
+
+const NUM_RETRIES: u32 = 1 << 31;
+const NUM_RAND_CHARS: usize = 6;
+
+use std::ffi::OsStr;
+use std::fs::OpenOptions;
+use std::path::Path;
+use std::{env, io};
+
+mod dir;
+mod error;
+mod file;
+mod spooled;
+mod util;
+
+pub use crate::dir::{tempdir, tempdir_in, TempDir};
+pub use crate::file::{
+    tempfile, tempfile_in, NamedTempFile, PathPersistError, PersistError, TempPath,
+};
+pub use crate::spooled::{spooled_tempfile, SpooledTempFile};
+
+/// Create a new temporary file or directory with custom parameters.
+#[derive(Debug, Clone, Eq, PartialEq)]
+pub struct Builder<'a, 'b> {
+    random_len: usize,
+    prefix: &'a OsStr,
+    suffix: &'b OsStr,
+    append: bool,
+}
+
+impl<'a, 'b> Default for Builder<'a, 'b> {
+    fn default() -> Self {
+        Builder {
+            random_len: crate::NUM_RAND_CHARS,
+            prefix: OsStr::new(".tmp"),
+            suffix: OsStr::new(""),
+            append: false,
+        }
+    }
+}
+
+impl<'a, 'b> Builder<'a, 'b> {
+    /// Create a new `Builder`.
+    ///
+    /// # Examples
+    ///
+    /// Create a named temporary file and write some data into it:
+    ///
+    /// ```
+    /// # use std::io;
+    /// # use std::ffi::OsStr;
+    /// # fn main() {
+    /// #     if let Err(_) = run() {
+    /// #         ::std::process::exit(1);
+    /// #     }
+    /// # }
+    /// # fn run() -> Result<(), io::Error> {
+    /// use tempfile::Builder;
+    ///
+    /// let named_tempfile = Builder::new()
+    ///     .prefix("my-temporary-note")
+    ///     .suffix(".txt")
+    ///     .rand_bytes(5)
+    ///     .tempfile()?;
+    ///
+    /// let name = named_tempfile
+    ///     .path()
+    ///     .file_name().and_then(OsStr::to_str);
+    ///
+    /// if let Some(name) = name {
+    ///     assert!(name.starts_with("my-temporary-note"));
+    ///     assert!(name.ends_with(".txt"));
+    ///     assert_eq!(name.len(), "my-temporary-note.txt".len() + 5);
+    /// }
+    /// # Ok(())
+    /// # }
+    /// ```
+    ///
+    /// Create a temporary directory and add a file to it:
+    ///
+    /// ```
+    /// # use std::io::{self, Write};
+    /// # use std::fs::File;
+    /// # use std::ffi::OsStr;
+    /// # fn main() {
+    /// #     if let Err(_) = run() {
+    /// #         ::std::process::exit(1);
+    /// #     }
+    /// # }
+    /// # fn run() -> Result<(), io::Error> {
+    /// use tempfile::Builder;
+    ///
+    /// let dir = Builder::new()
+    ///     .prefix("my-temporary-dir")
+    ///     .rand_bytes(5)
+    ///     .tempdir()?;
+    ///
+    /// let file_path = dir.path().join("my-temporary-note.txt");
+    /// let mut file = File::create(file_path)?;
+    /// writeln!(file, "Brian was here. Briefly.")?;
+    ///
+    /// // By closing the `TempDir` explicitly, we can check that it has
+    /// // been deleted successfully. If we don't close it explicitly,
+    /// // the directory will still be deleted when `dir` goes out
+    /// // of scope, but we won't know whether deleting the directory
+    /// // succeeded.
+    /// drop(file);
+    /// dir.close()?;
+    /// # Ok(())
+    /// # }
+    /// ```
+    pub fn new() -> Self {
+        Self::default()
+    }
+
+    /// Set a custom filename prefix.
+    ///
+    /// Path separators are legal but not advisable.
+    /// Default: `.tmp`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use std::io;
+    /// # fn main() {
+    /// #     if let Err(_) = run() {
+    /// #         ::std::process::exit(1);
+    /// #     }
+    /// # }
+    /// # fn run() -> Result<(), io::Error> {
+    /// # use tempfile::Builder;
+    /// let named_tempfile = Builder::new()
+    ///     .prefix("my-temporary-note")
+    ///     .tempfile()?;
+    /// # Ok(())
+    /// # }
+    /// ```
+    pub fn prefix<S: AsRef<OsStr> + ?Sized>(&mut self, prefix: &'a S) -> &mut Self {
+        self.prefix = prefix.as_ref();
+        self
+    }
+
+    /// Set a custom filename suffix.
+    ///
+    /// Path separators are legal but not advisable.
+    /// Default: empty.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use std::io;
+    /// # fn main() {
+    /// #     if let Err(_) = run() {
+    /// #         ::std::process::exit(1);
+    /// #     }
+    /// # }
+    /// # fn run() -> Result<(), io::Error> {
+    /// # use tempfile::Builder;
+    /// let named_tempfile = Builder::new()
+    ///     .suffix(".txt")
+    ///     .tempfile()?;
+    /// # Ok(())
+    /// # }
+    /// ```
+    pub fn suffix<S: AsRef<OsStr> + ?Sized>(&mut self, suffix: &'b S) -> &mut Self {
+        self.suffix = suffix.as_ref();
+        self
+    }
+
+    /// Set the number of random bytes.
+    ///
+    /// Default: `6`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use std::io;
+    /// # fn main() {
+    /// #     if let Err(_) = run() {
+    /// #         ::std::process::exit(1);
+    /// #     }
+    /// # }
+    /// # fn run() -> Result<(), io::Error> {
+    /// # use tempfile::Builder;
+    /// let named_tempfile = Builder::new()
+    ///     .rand_bytes(5)
+    ///     .tempfile()?;
+    /// # Ok(())
+    /// # }
+    /// ```
+    pub fn rand_bytes(&mut self, rand: usize) -> &mut Self {
+        self.random_len = rand;
+        self
+    }
+
+    /// Set the file to be opened in append mode.
+    ///
+    /// Default: `false`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use std::io;
+    /// # fn main() {
+    /// #     if let Err(_) = run() {
+    /// #         ::std::process::exit(1);
+    /// #     }
+    /// # }
+    /// # fn run() -> Result<(), io::Error> {
+    /// # use tempfile::Builder;
+    /// let named_tempfile = Builder::new()
+    ///     .append(true)
+    ///     .tempfile()?;
+    /// # Ok(())
+    /// # }
+    /// ```
+    pub fn append(&mut self, append: bool) -> &mut Self {
+        self.append = append;
+        self
+    }
+
+    /// Create the named temporary file.
+    ///
+    /// # Security
+    ///
+    /// See [the security][security] docs on `NamedTempFile`.
+    ///
+    /// # Resource leaking
+    ///
+    /// See [the resource leaking][resource-leaking] docs on `NamedTempFile`.
+    ///
+    /// # Errors
+    ///
+    /// If the file cannot be created, `Err` is returned.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use std::io;
+    /// # fn main() {
+    /// #     if let Err(_) = run() {
+    /// #         ::std::process::exit(1);
+    /// #     }
+    /// # }
+    /// # fn run() -> Result<(), io::Error> {
+    /// # use tempfile::Builder;
+    /// let tempfile = Builder::new().tempfile()?;
+    /// # Ok(())
+    /// # }
+    /// ```
+    ///
+    /// [security]: struct.NamedTempFile.html#security
+    /// [resource-leaking]: struct.NamedTempFile.html#resource-leaking
+    pub fn tempfile(&self) -> io::Result<NamedTempFile> {
+        self.tempfile_in(&env::temp_dir())
+    }
+
+    /// Create the named temporary file in the specified directory.
+    ///
+    /// # Security
+    ///
+    /// See [the security][security] docs on `NamedTempFile`.
+    ///
+    /// # Resource leaking
+    ///
+    /// See [the resource leaking][resource-leaking] docs on `NamedTempFile`.
+    ///
+    /// # Errors
+    ///
+    /// If the file cannot be created, `Err` is returned.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use std::io;
+    /// # fn main() {
+    /// #     if let Err(_) = run() {
+    /// #         ::std::process::exit(1);
+    /// #     }
+    /// # }
+    /// # fn run() -> Result<(), io::Error> {
+    /// # use tempfile::Builder;
+    /// let tempfile = Builder::new().tempfile_in("./")?;
+    /// # Ok(())
+    /// # }
+    /// ```
+    ///
+    /// [security]: struct.NamedTempFile.html#security
+    /// [resource-leaking]: struct.NamedTempFile.html#resource-leaking
+    pub fn tempfile_in<P: AsRef<Path>>(&self, dir: P) -> io::Result<NamedTempFile> {
+        util::create_helper(
+            dir.as_ref(),
+            self.prefix,
+            self.suffix,
+            self.random_len,
+            |path| file::create_named(path, OpenOptions::new().append(self.append)),
+        )
+    }
+
+    /// Attempts to make a temporary directory inside of `env::temp_dir()` whose
+    /// name will have the prefix, `prefix`. The directory and
+    /// everything inside it will be automatically deleted once the
+    /// returned `TempDir` is destroyed.
+    ///
+    /// # Resource leaking
+    ///
+    /// See [the resource leaking][resource-leaking] docs on `TempDir`.
+    ///
+    /// # Errors
+    ///
+    /// If the directory can not be created, `Err` is returned.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use std::fs::File;
+    /// use std::io::Write;
+    /// use tempfile::Builder;
+    ///
+    /// # use std::io;
+    /// # fn run() -> Result<(), io::Error> {
+    /// let tmp_dir = Builder::new().tempdir()?;
+    /// # Ok(())
+    /// # }
+    /// ```
+    ///
+    /// [resource-leaking]: struct.TempDir.html#resource-leaking
+    pub fn tempdir(&self) -> io::Result<TempDir> {
+        self.tempdir_in(&env::temp_dir())
+    }
+
+    /// Attempts to make a temporary directory inside of `dir`.
+    /// The directory and everything inside it will be automatically
+    /// deleted once the returned `TempDir` is destroyed.
+    ///
+    /// # Resource leaking
+    ///
+    /// See [the resource leaking][resource-leaking] docs on `TempDir`.
+    ///
+    /// # Errors
+    ///
+    /// If the directory can not be created, `Err` is returned.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use std::fs::{self, File};
+    /// use std::io::Write;
+    /// use tempfile::Builder;
+    ///
+    /// # use std::io;
+    /// # fn run() -> Result<(), io::Error> {
+    /// let tmp_dir = Builder::new().tempdir_in("./")?;
+    /// # Ok(())
+    /// # }
+    /// ```
+    ///
+    /// [resource-leaking]: struct.TempDir.html#resource-leaking
+    pub fn tempdir_in<P: AsRef<Path>>(&self, dir: P) -> io::Result<TempDir> {
+        let storage;
+        let mut dir = dir.as_ref();
+        if !dir.is_absolute() {
+            let cur_dir = env::current_dir()?;
+            storage = cur_dir.join(dir);
+            dir = &storage;
+        }
+
+        util::create_helper(dir, self.prefix, self.suffix, self.random_len, dir::create)
+    }
+}
diff --git a/crates/tempfile/src/spooled.rs b/crates/tempfile/src/spooled.rs
new file mode 100644
index 0000000..ed6c16f
--- /dev/null
+++ b/crates/tempfile/src/spooled.rs
@@ -0,0 +1,158 @@
+use crate::file::tempfile;
+use std::fs::File;
+use std::io::{self, Cursor, Read, Seek, SeekFrom, Write};
+
+/// A wrapper for the two states of a `SpooledTempFile`.
+#[derive(Debug)]
+pub enum SpooledData {
+    InMemory(Cursor<Vec<u8>>),
+    OnDisk(File),
+}
+
+/// An object that behaves like a regular temporary file, but keeps data in
+/// memory until it reaches a configured size, at which point the data is
+/// written to a temporary file on disk, and further operations use the file
+/// on disk.
+#[derive(Debug)]
+pub struct SpooledTempFile {
+    max_size: usize,
+    inner: SpooledData,
+}
+
+/// Create a new spooled temporary file.
+///
+/// # Security
+///
+/// This variant is secure/reliable in the presence of a pathological temporary
+/// file cleaner.
+///
+/// # Resource Leaking
+///
+/// The temporary file will be automatically removed by the OS when the last
+/// handle to it is closed. This doesn't rely on Rust destructors being run, so
+/// will (almost) never fail to clean up the temporary file.
+///
+/// # Examples
+///
+/// ```
+/// use tempfile::spooled_tempfile;
+/// use std::io::{self, Write};
+///
+/// # fn main() {
+/// #     if let Err(_) = run() {
+/// #         ::std::process::exit(1);
+/// #     }
+/// # }
+/// # fn run() -> Result<(), io::Error> {
+/// let mut file = spooled_tempfile(15);
+///
+/// writeln!(file, "short line")?;
+/// assert!(!file.is_rolled());
+///
+/// // as a result of this write call, the size of the data will exceed
+/// // `max_size` (15), so it will be written to a temporary file on disk,
+/// // and the in-memory buffer will be dropped
+/// writeln!(file, "marvin gardens")?;
+/// assert!(file.is_rolled());
+///
+/// # Ok(())
+/// # }
+/// ```
+#[inline]
+pub fn spooled_tempfile(max_size: usize) -> SpooledTempFile {
+    SpooledTempFile::new(max_size)
+}
+
+impl SpooledTempFile {
+    pub fn new(max_size: usize) -> SpooledTempFile {
+        SpooledTempFile {
+            max_size: max_size,
+            inner: SpooledData::InMemory(Cursor::new(Vec::new())),
+        }
+    }
+
+    /// Returns true if the file has been rolled over to disk.
+    pub fn is_rolled(&self) -> bool {
+        match self.inner {
+            SpooledData::InMemory(_) => false,
+            SpooledData::OnDisk(_) => true,
+        }
+    }
+
+    /// Rolls over to a file on disk, regardless of current size. Does nothing
+    /// if already rolled over.
+    pub fn roll(&mut self) -> io::Result<()> {
+        if !self.is_rolled() {
+            let mut file = tempfile()?;
+            if let SpooledData::InMemory(ref mut cursor) = self.inner {
+                file.write_all(cursor.get_ref())?;
+                file.seek(SeekFrom::Start(cursor.position()))?;
+            }
+            self.inner = SpooledData::OnDisk(file);
+        }
+        Ok(())
+    }
+
+    pub fn set_len(&mut self, size: u64) -> Result<(), io::Error> {
+        if size as usize > self.max_size {
+            self.roll()?; // does nothing if already rolled over
+        }
+        match self.inner {
+            SpooledData::InMemory(ref mut cursor) => {
+                cursor.get_mut().resize(size as usize, 0);
+                Ok(())
+            }
+            SpooledData::OnDisk(ref mut file) => file.set_len(size),
+        }
+    }
+
+    /// Consumes and returns the inner `SpooledData` type.
+    pub fn into_inner(self) -> SpooledData {
+        self.inner
+    }
+}
+
+impl Read for SpooledTempFile {
+    fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+        match self.inner {
+            SpooledData::InMemory(ref mut cursor) => cursor.read(buf),
+            SpooledData::OnDisk(ref mut file) => file.read(buf),
+        }
+    }
+}
+
+impl Write for SpooledTempFile {
+    fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+        // roll over to file if necessary
+        let mut rolling = false;
+        if let SpooledData::InMemory(ref mut cursor) = self.inner {
+            rolling = cursor.position() as usize + buf.len() > self.max_size;
+        }
+        if rolling {
+            self.roll()?;
+        }
+
+        // write the bytes
+        match self.inner {
+            SpooledData::InMemory(ref mut cursor) => cursor.write(buf),
+            SpooledData::OnDisk(ref mut file) => file.write(buf),
+        }
+    }
+
+    #[inline]
+    fn flush(&mut self) -> io::Result<()> {
+        match self.inner {
+            SpooledData::InMemory(ref mut cursor) => cursor.flush(),
+            SpooledData::OnDisk(ref mut file) => file.flush(),
+        }
+    }
+}
+
+impl Seek for SpooledTempFile {
+    fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
+        match self.inner {
+            SpooledData::InMemory(ref mut cursor) => cursor.seek(pos),
+            SpooledData::OnDisk(ref mut file) => file.seek(pos),
+        }
+    }
+}
diff --git a/crates/tempfile/src/util.rs b/crates/tempfile/src/util.rs
new file mode 100644
index 0000000..8c91b9c
--- /dev/null
+++ b/crates/tempfile/src/util.rs
@@ -0,0 +1,48 @@
+use fastrand;
+use std::ffi::{OsStr, OsString};
+use std::path::{Path, PathBuf};
+use std::{io, iter::repeat_with};
+
+use crate::error::IoResultExt;
+
+fn tmpname(prefix: &OsStr, suffix: &OsStr, rand_len: usize) -> OsString {
+    let mut buf = OsString::with_capacity(prefix.len() + suffix.len() + rand_len);
+    buf.push(prefix);
+    let mut char_buf = [0u8; 4];
+    for c in repeat_with(fastrand::alphanumeric).take(rand_len) {
+        buf.push(c.encode_utf8(&mut char_buf));
+    }
+    buf.push(suffix);
+    buf
+}
+
+pub fn create_helper<F, R>(
+    base: &Path,
+    prefix: &OsStr,
+    suffix: &OsStr,
+    random_len: usize,
+    f: F,
+) -> io::Result<R>
+where
+    F: Fn(PathBuf) -> io::Result<R>,
+{
+    let num_retries = if random_len != 0 {
+        crate::NUM_RETRIES
+    } else {
+        1
+    };
+
+    for _ in 0..num_retries {
+        let path = base.join(tmpname(prefix, suffix, random_len));
+        return match f(path) {
+            Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => continue,
+            res => res,
+        };
+    }
+
+    Err(io::Error::new(
+        io::ErrorKind::AlreadyExists,
+        "too many temporary files exist",
+    ))
+    .with_err_path(|| base)
+}
diff --git a/crates/tempfile/tests/namedtempfile.rs b/crates/tempfile/tests/namedtempfile.rs
new file mode 100644
index 0000000..d2c7da2
--- /dev/null
+++ b/crates/tempfile/tests/namedtempfile.rs
@@ -0,0 +1,328 @@
+#![deny(rust_2018_idioms)]
+
+use std::env;
+use std::ffi::{OsStr, OsString};
+use std::fs::File;
+use std::io::{Read, Seek, SeekFrom, Write};
+use std::path::{Path, PathBuf};
+use tempfile::{tempdir, Builder, NamedTempFile, TempPath};
+
+fn exists<P: AsRef<Path>>(path: P) -> bool {
+    std::fs::metadata(path.as_ref()).is_ok()
+}
+
+#[test]
+fn test_basic() {
+    let mut tmpfile = NamedTempFile::new().unwrap();
+    write!(tmpfile, "abcde").unwrap();
+    tmpfile.seek(SeekFrom::Start(0)).unwrap();
+    let mut buf = String::new();
+    tmpfile.read_to_string(&mut buf).unwrap();
+    assert_eq!("abcde", buf);
+}
+
+#[test]
+fn test_deleted() {
+    let tmpfile = NamedTempFile::new().unwrap();
+    let path = tmpfile.path().to_path_buf();
+    assert!(exists(&path));
+    drop(tmpfile);
+    assert!(!exists(&path));
+}
+
+#[test]
+fn test_persist() {
+    let mut tmpfile = NamedTempFile::new().unwrap();
+    let old_path = tmpfile.path().to_path_buf();
+    let persist_path = env::temp_dir().join("persisted_temporary_file");
+    write!(tmpfile, "abcde").unwrap();
+    {
+        assert!(exists(&old_path));
+        let mut f = tmpfile.persist(&persist_path).unwrap();
+        assert!(!exists(&old_path));
+
+        // Check original file
+        f.seek(SeekFrom::Start(0)).unwrap();
+        let mut buf = String::new();
+        f.read_to_string(&mut buf).unwrap();
+        assert_eq!("abcde", buf);
+    }
+
+    {
+        // Try opening it at the new path.
+        let mut f = File::open(&persist_path).unwrap();
+        f.seek(SeekFrom::Start(0)).unwrap();
+        let mut buf = String::new();
+        f.read_to_string(&mut buf).unwrap();
+        assert_eq!("abcde", buf);
+    }
+    std::fs::remove_file(&persist_path).unwrap();
+}
+
+#[test]
+fn test_persist_noclobber() {
+    let mut tmpfile = NamedTempFile::new().unwrap();
+    let old_path = tmpfile.path().to_path_buf();
+    let persist_target = NamedTempFile::new().unwrap();
+    let persist_path = persist_target.path().to_path_buf();
+    write!(tmpfile, "abcde").unwrap();
+    assert!(exists(&old_path));
+    {
+        tmpfile = tmpfile.persist_noclobber(&persist_path).unwrap_err().into();
+        assert!(exists(&old_path));
+        std::fs::remove_file(&persist_path).unwrap();
+        drop(persist_target);
+    }
+    tmpfile.persist_noclobber(&persist_path).unwrap();
+    // Try opening it at the new path.
+    let mut f = File::open(&persist_path).unwrap();
+    f.seek(SeekFrom::Start(0)).unwrap();
+    let mut buf = String::new();
+    f.read_to_string(&mut buf).unwrap();
+    assert_eq!("abcde", buf);
+    std::fs::remove_file(&persist_path).unwrap();
+}
+
+#[test]
+fn test_customnamed() {
+    let tmpfile = Builder::new()
+        .prefix("tmp")
+        .suffix(&".rs".to_string())
+        .rand_bytes(12)
+        .tempfile()
+        .unwrap();
+    let name = tmpfile.path().file_name().unwrap().to_str().unwrap();
+    assert!(name.starts_with("tmp"));
+    assert!(name.ends_with(".rs"));
+    assert_eq!(name.len(), 18);
+}
+
+#[test]
+fn test_append() {
+    let mut tmpfile = Builder::new().append(true).tempfile().unwrap();
+    tmpfile.write(b"a").unwrap();
+    tmpfile.seek(SeekFrom::Start(0)).unwrap();
+    tmpfile.write(b"b").unwrap();
+
+    tmpfile.seek(SeekFrom::Start(0)).unwrap();
+    let mut buf = vec![0u8; 1];
+    tmpfile.read_exact(&mut buf).unwrap();
+    assert_eq!(buf, b"a");
+}
+
+#[test]
+fn test_reopen() {
+    let source = NamedTempFile::new().unwrap();
+    let mut first = source.reopen().unwrap();
+    let mut second = source.reopen().unwrap();
+    drop(source);
+
+    write!(first, "abcde").expect("write failed");
+    let mut buf = String::new();
+    second.read_to_string(&mut buf).unwrap();
+    assert_eq!("abcde", buf);
+}
+
+#[test]
+fn test_into_file() {
+    let mut file = NamedTempFile::new().unwrap();
+    let path = file.path().to_owned();
+    write!(file, "abcde").expect("write failed");
+
+    assert!(path.exists());
+    let mut file = file.into_file();
+    assert!(!path.exists());
+
+    file.seek(SeekFrom::Start(0)).unwrap();
+    let mut buf = String::new();
+    file.read_to_string(&mut buf).unwrap();
+    assert_eq!("abcde", buf);
+}
+
+#[test]
+fn test_immut() {
+    let tmpfile = NamedTempFile::new().unwrap();
+    (&tmpfile).write_all(b"abcde").unwrap();
+    (&tmpfile).seek(SeekFrom::Start(0)).unwrap();
+    let mut buf = String::new();
+    (&tmpfile).read_to_string(&mut buf).unwrap();
+    assert_eq!("abcde", buf);
+}
+
+#[test]
+fn test_temppath() {
+    let mut tmpfile = NamedTempFile::new().unwrap();
+    write!(tmpfile, "abcde").unwrap();
+
+    let path = tmpfile.into_temp_path();
+    assert!(path.is_file());
+}
+
+#[test]
+fn test_temppath_persist() {
+    let mut tmpfile = NamedTempFile::new().unwrap();
+    write!(tmpfile, "abcde").unwrap();
+
+    let tmppath = tmpfile.into_temp_path();
+
+    let old_path = tmppath.to_path_buf();
+    let persist_path = env::temp_dir().join("persisted_temppath_file");
+
+    {
+        assert!(exists(&old_path));
+        tmppath.persist(&persist_path).unwrap();
+        assert!(!exists(&old_path));
+    }
+
+    {
+        // Try opening it at the new path.
+        let mut f = File::open(&persist_path).unwrap();
+        f.seek(SeekFrom::Start(0)).unwrap();
+        let mut buf = String::new();
+        f.read_to_string(&mut buf).unwrap();
+        assert_eq!("abcde", buf);
+    }
+
+    std::fs::remove_file(&persist_path).unwrap();
+}
+
+#[test]
+fn test_temppath_persist_noclobber() {
+    let mut tmpfile = NamedTempFile::new().unwrap();
+    write!(tmpfile, "abcde").unwrap();
+
+    let mut tmppath = tmpfile.into_temp_path();
+
+    let old_path = tmppath.to_path_buf();
+    let persist_target = NamedTempFile::new().unwrap();
+    let persist_path = persist_target.path().to_path_buf();
+
+    assert!(exists(&old_path));
+
+    {
+        tmppath = tmppath.persist_noclobber(&persist_path).unwrap_err().into();
+        assert!(exists(&old_path));
+        std::fs::remove_file(&persist_path).unwrap();
+        drop(persist_target);
+    }
+
+    tmppath.persist_noclobber(&persist_path).unwrap();
+
+    // Try opening it at the new path.
+    let mut f = File::open(&persist_path).unwrap();
+    f.seek(SeekFrom::Start(0)).unwrap();
+    let mut buf = String::new();
+    f.read_to_string(&mut buf).unwrap();
+    assert_eq!("abcde", buf);
+    std::fs::remove_file(&persist_path).unwrap();
+}
+
+#[test]
+fn temp_path_from_existing() {
+    let tmp_dir = tempdir().unwrap();
+    let tmp_file_path_1 = tmp_dir.path().join("testfile1");
+    let tmp_file_path_2 = tmp_dir.path().join("testfile2");
+
+    File::create(&tmp_file_path_1).unwrap();
+    assert!(tmp_file_path_1.exists(), "Test file 1 hasn't been created");
+
+    File::create(&tmp_file_path_2).unwrap();
+    assert!(tmp_file_path_2.exists(), "Test file 2 hasn't been created");
+
+    let tmp_path = TempPath::from_path(&tmp_file_path_1);
+    assert!(
+        tmp_file_path_1.exists(),
+        "Test file has been deleted before dropping TempPath"
+    );
+
+    drop(tmp_path);
+    assert!(
+        !tmp_file_path_1.exists(),
+        "Test file exists after dropping TempPath"
+    );
+    assert!(
+        tmp_file_path_2.exists(),
+        "Test file 2 has been deleted before dropping TempDir"
+    );
+}
+
+#[test]
+#[allow(unreachable_code)]
+fn temp_path_from_argument_types() {
+    // This just has to compile
+    return;
+
+    TempPath::from_path("");
+    TempPath::from_path(String::new());
+    TempPath::from_path(OsStr::new(""));
+    TempPath::from_path(OsString::new());
+    TempPath::from_path(Path::new(""));
+    TempPath::from_path(PathBuf::new());
+    TempPath::from_path(PathBuf::new().into_boxed_path());
+}
+
+#[test]
+fn test_write_after_close() {
+    let path = NamedTempFile::new().unwrap().into_temp_path();
+    File::create(path).unwrap().write_all(b"test").unwrap();
+}
+
+#[test]
+fn test_change_dir() {
+    env::set_current_dir(env::temp_dir()).unwrap();
+    let tmpfile = NamedTempFile::new_in(".").unwrap();
+    let path = env::current_dir().unwrap().join(tmpfile.path());
+    env::set_current_dir("/").unwrap();
+    drop(tmpfile);
+    assert!(!exists(path))
+}
+
+#[test]
+fn test_into_parts() {
+    let mut file = NamedTempFile::new().unwrap();
+    write!(file, "abcd").expect("write failed");
+
+    let (mut file, temp_path) = file.into_parts();
+
+    let path = temp_path.to_path_buf();
+
+    assert!(path.exists());
+    drop(temp_path);
+    assert!(!path.exists());
+
+    write!(file, "efgh").expect("write failed");
+
+    file.seek(SeekFrom::Start(0)).unwrap();
+    let mut buf = String::new();
+    file.read_to_string(&mut buf).unwrap();
+    assert_eq!("abcdefgh", buf);
+}
+
+#[test]
+fn test_keep() {
+    let mut tmpfile = NamedTempFile::new().unwrap();
+    write!(tmpfile, "abcde").unwrap();
+    let (mut f, temp_path) = tmpfile.into_parts();
+    let path;
+    {
+        assert!(exists(&temp_path));
+        path = temp_path.keep().unwrap();
+        assert!(exists(&path));
+
+        // Check original file
+        f.seek(SeekFrom::Start(0)).unwrap();
+        let mut buf = String::new();
+        f.read_to_string(&mut buf).unwrap();
+        assert_eq!("abcde", buf);
+    }
+
+    {
+        // Try opening it again.
+        let mut f = File::open(&path).unwrap();
+        f.seek(SeekFrom::Start(0)).unwrap();
+        let mut buf = String::new();
+        f.read_to_string(&mut buf).unwrap();
+        assert_eq!("abcde", buf);
+    }
+    std::fs::remove_file(&path).unwrap();
+}
diff --git a/crates/tempfile/tests/spooled.rs b/crates/tempfile/tests/spooled.rs
new file mode 100644
index 0000000..288d1e6
--- /dev/null
+++ b/crates/tempfile/tests/spooled.rs
@@ -0,0 +1,307 @@
+#![deny(rust_2018_idioms)]
+
+use std::io::{Read, Seek, SeekFrom, Write};
+
+use tempfile::{spooled_tempfile, SpooledTempFile};
+
+#[test]
+fn test_automatic_rollover() {
+    let mut t = spooled_tempfile(10);
+    let mut buf = Vec::new();
+
+    assert!(!t.is_rolled());
+    assert_eq!(t.seek(SeekFrom::Current(0)).unwrap(), 0);
+    assert_eq!(t.read_to_end(&mut buf).unwrap(), 0);
+    assert_eq!(buf.as_slice(), b"");
+    buf.clear();
+
+    assert_eq!(t.write(b"abcde").unwrap(), 5);
+
+    assert!(!t.is_rolled());
+    assert_eq!(t.seek(SeekFrom::Start(0)).unwrap(), 0);
+    assert_eq!(t.read_to_end(&mut buf).unwrap(), 5);
+    assert_eq!(buf.as_slice(), b"abcde");
+
+    assert_eq!(t.write(b"fghijklmno").unwrap(), 10);
+
+    assert_eq!(t.seek(SeekFrom::Current(0)).unwrap(), 15);
+    assert!(t.is_rolled());
+}
+
+#[test]
+fn test_explicit_rollover() {
+    let mut t = SpooledTempFile::new(100);
+    assert_eq!(t.write(b"abcdefghijklmnopqrstuvwxyz").unwrap(), 26);
+    assert_eq!(t.seek(SeekFrom::Current(0)).unwrap(), 26);
+    assert!(!t.is_rolled());
+
+    // roll over explicitly
+    assert!(t.roll().is_ok());
+    assert!(t.is_rolled());
+    assert_eq!(t.seek(SeekFrom::Current(0)).unwrap(), 26);
+
+    let mut buf = Vec::new();
+    assert_eq!(t.read_to_end(&mut buf).unwrap(), 0);
+    assert_eq!(buf.as_slice(), b"");
+    buf.clear();
+
+    assert_eq!(t.seek(SeekFrom::Start(0)).unwrap(), 0);
+    assert_eq!(t.read_to_end(&mut buf).unwrap(), 26);
+    assert_eq!(buf.as_slice(), b"abcdefghijklmnopqrstuvwxyz");
+    assert_eq!(t.seek(SeekFrom::Current(0)).unwrap(), 26);
+}
+
+// called by test_seek_{buffer, file}
+// assumes t is empty and offset is 0 to start
+fn test_seek(t: &mut SpooledTempFile) {
+    assert_eq!(t.write(b"abcdefghijklmnopqrstuvwxyz").unwrap(), 26);
+
+    assert_eq!(t.seek(SeekFrom::Current(0)).unwrap(), 26); // tell()
+    assert_eq!(t.seek(SeekFrom::Current(-1)).unwrap(), 25);
+    assert_eq!(t.seek(SeekFrom::Current(1)).unwrap(), 26);
+    assert_eq!(t.seek(SeekFrom::Current(1)).unwrap(), 27);
+    assert_eq!(t.seek(SeekFrom::Current(-27)).unwrap(), 0);
+    assert!(t.seek(SeekFrom::Current(-1)).is_err());
+    assert!(t.seek(SeekFrom::Current(-1245)).is_err());
+
+    assert_eq!(t.seek(SeekFrom::Start(0)).unwrap(), 0);
+    assert_eq!(t.seek(SeekFrom::Start(1)).unwrap(), 1);
+    assert_eq!(t.seek(SeekFrom::Start(26)).unwrap(), 26);
+    assert_eq!(t.seek(SeekFrom::Start(27)).unwrap(), 27);
+    // // these are build errors
+    // assert!(t.seek(SeekFrom::Start(-1)).is_err());
+    // assert!(t.seek(SeekFrom::Start(-1000)).is_err());
+
+    assert_eq!(t.seek(SeekFrom::End(0)).unwrap(), 26);
+    assert_eq!(t.seek(SeekFrom::End(-1)).unwrap(), 25);
+    assert_eq!(t.seek(SeekFrom::End(-26)).unwrap(), 0);
+    assert!(t.seek(SeekFrom::End(-27)).is_err());
+    assert!(t.seek(SeekFrom::End(-99)).is_err());
+    assert_eq!(t.seek(SeekFrom::End(1)).unwrap(), 27);
+    assert_eq!(t.seek(SeekFrom::End(1)).unwrap(), 27);
+}
+
+#[test]
+fn test_seek_buffer() {
+    let mut t = spooled_tempfile(100);
+    test_seek(&mut t);
+}
+
+#[test]
+fn test_seek_file() {
+    let mut t = SpooledTempFile::new(10);
+    test_seek(&mut t);
+}
+
+fn test_seek_read(t: &mut SpooledTempFile) {
+    assert_eq!(t.write(b"abcdefghijklmnopqrstuvwxyz").unwrap(), 26);
+
+    let mut buf = Vec::new();
+
+    // we're at the end
+    assert_eq!(t.read_to_end(&mut buf).unwrap(), 0);
+    assert_eq!(buf.as_slice(), b"");
+    buf.clear();
+
+    // seek to start, read whole thing
+    assert_eq!(t.seek(SeekFrom::Start(0)).unwrap(), 0);
+    assert_eq!(t.read_to_end(&mut buf).unwrap(), 26);
+    assert_eq!(buf.as_slice(), b"abcdefghijklmnopqrstuvwxyz");
+    buf.clear();
+
+    // now we're at the end again
+    assert_eq!(t.seek(SeekFrom::Current(0)).unwrap(), 26); // tell()
+    assert_eq!(t.read_to_end(&mut buf).unwrap(), 0);
+    assert_eq!(buf.as_slice(), b"");
+    buf.clear();
+
+    // seek to somewhere in the middle, read a bit
+    assert_eq!(t.seek(SeekFrom::Start(5)).unwrap(), 5);
+    let mut buf = [0; 5];
+    assert!(t.read_exact(&mut buf).is_ok());
+    assert_eq!(buf, *b"fghij");
+
+    // read again from current spot
+    assert_eq!(t.seek(SeekFrom::Current(0)).unwrap(), 10); // tell()
+    assert!(t.read_exact(&mut buf).is_ok());
+    assert_eq!(buf, *b"klmno");
+
+    let mut buf = [0; 15];
+    // partial read
+    assert_eq!(t.read(&mut buf).unwrap(), 11);
+    assert_eq!(buf[0..11], *b"pqrstuvwxyz");
+
+    // try to read off the end: UnexpectedEof
+    assert!(t.read_exact(&mut buf).is_err());
+}
+
+#[test]
+fn test_seek_read_buffer() {
+    let mut t = spooled_tempfile(100);
+    test_seek_read(&mut t);
+}
+
+#[test]
+fn test_seek_read_file() {
+    let mut t = SpooledTempFile::new(10);
+    test_seek_read(&mut t);
+}
+
+fn test_overwrite_middle(t: &mut SpooledTempFile) {
+    assert_eq!(t.write(b"abcdefghijklmnopqrstuvwxyz").unwrap(), 26);
+
+    assert_eq!(t.seek(SeekFrom::Start(10)).unwrap(), 10);
+    assert_eq!(t.write(b"0123456789").unwrap(), 10);
+    assert_eq!(t.seek(SeekFrom::Start(0)).unwrap(), 0);
+
+    let mut buf = Vec::new();
+    assert_eq!(t.read_to_end(&mut buf).unwrap(), 26);
+    assert_eq!(buf.as_slice(), b"abcdefghij0123456789uvwxyz");
+}
+
+#[test]
+fn test_overwrite_middle_of_buffer() {
+    let mut t = spooled_tempfile(100);
+    test_overwrite_middle(&mut t);
+}
+
+#[test]
+fn test_overwrite_middle_of_file() {
+    let mut t = SpooledTempFile::new(10);
+    test_overwrite_middle(&mut t);
+}
+
+#[test]
+fn test_overwrite_and_extend_buffer() {
+    let mut t = spooled_tempfile(100);
+    assert_eq!(t.write(b"abcdefghijklmnopqrstuvwxyz").unwrap(), 26);
+    assert_eq!(t.seek(SeekFrom::End(-5)).unwrap(), 21);
+    assert_eq!(t.write(b"0123456789").unwrap(), 10);
+    assert_eq!(t.seek(SeekFrom::Start(0)).unwrap(), 0);
+    let mut buf = Vec::new();
+    assert_eq!(t.read_to_end(&mut buf).unwrap(), 31);
+    assert_eq!(buf.as_slice(), b"abcdefghijklmnopqrstu0123456789");
+    assert!(!t.is_rolled());
+}
+
+#[test]
+fn test_overwrite_and_extend_rollover() {
+    let mut t = SpooledTempFile::new(20);
+    assert_eq!(t.write(b"abcdefghijklmno").unwrap(), 15);
+    assert!(!t.is_rolled());
+    assert_eq!(t.seek(SeekFrom::End(-5)).unwrap(), 10);
+    assert_eq!(t.seek(SeekFrom::Current(0)).unwrap(), 10); // tell()
+    assert!(!t.is_rolled());
+    assert_eq!(t.write(b"0123456789)!@#$%^&*(").unwrap(), 20);
+    assert!(t.is_rolled());
+    assert_eq!(t.seek(SeekFrom::Current(0)).unwrap(), 30); // tell()
+    let mut buf = Vec::new();
+    assert_eq!(t.seek(SeekFrom::Start(0)).unwrap(), 0);
+    assert_eq!(t.read_to_end(&mut buf).unwrap(), 30);
+    assert_eq!(buf.as_slice(), b"abcdefghij0123456789)!@#$%^&*(");
+}
+
+fn test_sparse(t: &mut SpooledTempFile) {
+    assert_eq!(t.write(b"abcde").unwrap(), 5);
+    assert_eq!(t.seek(SeekFrom::Current(5)).unwrap(), 10);
+    assert_eq!(t.write(b"klmno").unwrap(), 5);
+    assert_eq!(t.seek(SeekFrom::Start(0)).unwrap(), 0);
+    let mut buf = Vec::new();
+    assert_eq!(t.read_to_end(&mut buf).unwrap(), 15);
+    assert_eq!(buf.as_slice(), b"abcde\0\0\0\0\0klmno");
+}
+
+#[test]
+fn test_sparse_buffer() {
+    let mut t = spooled_tempfile(100);
+    test_sparse(&mut t);
+}
+
+#[test]
+fn test_sparse_file() {
+    let mut t = SpooledTempFile::new(1);
+    test_sparse(&mut t);
+}
+
+#[test]
+fn test_sparse_write_rollover() {
+    let mut t = spooled_tempfile(10);
+    assert_eq!(t.write(b"abcde").unwrap(), 5);
+    assert!(!t.is_rolled());
+    assert_eq!(t.seek(SeekFrom::Current(5)).unwrap(), 10);
+    assert!(!t.is_rolled());
+    assert_eq!(t.write(b"klmno").unwrap(), 5);
+    assert!(t.is_rolled());
+    assert_eq!(t.seek(SeekFrom::Start(0)).unwrap(), 0);
+    let mut buf = Vec::new();
+    assert_eq!(t.read_to_end(&mut buf).unwrap(), 15);
+    assert_eq!(buf.as_slice(), b"abcde\0\0\0\0\0klmno");
+}
+
+fn test_set_len(t: &mut SpooledTempFile) {
+    let mut buf: Vec<u8> = Vec::new();
+
+    assert_eq!(t.write(b"abcdefghijklmnopqrstuvwxyz").unwrap(), 26);
+
+    // truncate to 10 bytes
+    assert!(t.set_len(10).is_ok());
+
+    // position should not have moved
+    assert_eq!(t.seek(SeekFrom::Current(0)).unwrap(), 26); // tell()
+
+    assert_eq!(t.read_to_end(&mut buf).unwrap(), 0);
+    assert_eq!(buf.as_slice(), b"");
+    assert_eq!(t.seek(SeekFrom::Current(0)).unwrap(), 26); // tell()
+    buf.clear();
+
+    // read whole thing
+    assert_eq!(t.seek(SeekFrom::Start(0)).unwrap(), 0);
+    assert_eq!(t.read_to_end(&mut buf).unwrap(), 10);
+    assert_eq!(buf.as_slice(), b"abcdefghij");
+    buf.clear();
+
+    // set_len to expand beyond the end
+    assert!(t.set_len(40).is_ok());
+    assert_eq!(t.seek(SeekFrom::Current(0)).unwrap(), 10); // tell()
+    assert_eq!(t.seek(SeekFrom::Start(0)).unwrap(), 0);
+    assert_eq!(t.read_to_end(&mut buf).unwrap(), 40);
+    assert_eq!(
+        buf.as_slice(),
+        &b"abcdefghij\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"[..]
+    );
+}
+
+#[test]
+fn test_set_len_buffer() {
+    let mut t = spooled_tempfile(100);
+    test_set_len(&mut t);
+}
+
+#[test]
+fn test_set_len_file() {
+    let mut t = spooled_tempfile(100);
+    test_set_len(&mut t);
+}
+
+#[test]
+fn test_set_len_rollover() {
+    let mut buf: Vec<u8> = Vec::new();
+
+    let mut t = spooled_tempfile(10);
+    assert_eq!(t.write(b"abcde").unwrap(), 5);
+    assert!(!t.is_rolled());
+    assert_eq!(t.seek(SeekFrom::Current(0)).unwrap(), 5); // tell()
+
+    assert_eq!(t.seek(SeekFrom::Start(0)).unwrap(), 0);
+    assert_eq!(t.read_to_end(&mut buf).unwrap(), 5);
+    assert_eq!(buf.as_slice(), b"abcde");
+    assert_eq!(t.seek(SeekFrom::Current(0)).unwrap(), 5); // tell()
+    buf.clear();
+
+    assert!(t.set_len(20).is_ok());
+    assert!(t.is_rolled());
+    assert_eq!(t.seek(SeekFrom::Current(0)).unwrap(), 5); // tell()
+    assert_eq!(t.seek(SeekFrom::Start(0)).unwrap(), 0);
+    assert_eq!(t.read_to_end(&mut buf).unwrap(), 20);
+    assert_eq!(buf.as_slice(), b"abcde\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0");
+}
diff --git a/crates/tempfile/tests/tempdir.rs b/crates/tempfile/tests/tempdir.rs
new file mode 100644
index 0000000..746fe47
--- /dev/null
+++ b/crates/tempfile/tests/tempdir.rs
@@ -0,0 +1,261 @@
+// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![deny(rust_2018_idioms)]
+
+use std::env;
+use std::fs;
+use std::path::Path;
+use std::sync::mpsc::channel;
+use std::thread;
+
+use tempfile::{Builder, TempDir};
+
+macro_rules! t {
+    ($e:expr) => {
+        match $e {
+            Ok(n) => n,
+            Err(e) => panic!("error: {}", e),
+        }
+    };
+}
+
+trait PathExt {
+    fn exists(&self) -> bool;
+    fn is_dir(&self) -> bool;
+}
+
+impl PathExt for Path {
+    fn exists(&self) -> bool {
+        fs::metadata(self).is_ok()
+    }
+    fn is_dir(&self) -> bool {
+        fs::metadata(self).map(|m| m.is_dir()).unwrap_or(false)
+    }
+}
+
+fn test_tempdir() {
+    let path = {
+        let p = t!(Builder::new().prefix("foobar").tempdir_in(&Path::new(".")));
+        let p = p.path();
+        assert!(p.to_str().unwrap().contains("foobar"));
+        p.to_path_buf()
+    };
+    assert!(!path.exists());
+}
+
+#[test]
+fn test_customnamed() {
+    let tmpfile = Builder::new()
+        .prefix("prefix")
+        .suffix("suffix")
+        .rand_bytes(12)
+        .tempdir()
+        .unwrap();
+    let name = tmpfile.path().file_name().unwrap().to_str().unwrap();
+    assert!(name.starts_with("prefix"));
+    assert!(name.ends_with("suffix"));
+    assert_eq!(name.len(), 24);
+}
+
+fn test_rm_tempdir() {
+    let (tx, rx) = channel();
+    let f = move || -> () {
+        let tmp = t!(TempDir::new());
+        tx.send(tmp.path().to_path_buf()).unwrap();
+        panic!("panic to unwind past `tmp`");
+    };
+    let _ = thread::spawn(f).join();
+    let path = rx.recv().unwrap();
+    assert!(!path.exists());
+
+    let tmp = t!(TempDir::new());
+    let path = tmp.path().to_path_buf();
+    let f = move || -> () {
+        let _tmp = tmp;
+        panic!("panic to unwind past `tmp`");
+    };
+    let _ = thread::spawn(f).join();
+    assert!(!path.exists());
+
+    let path;
+    {
+        let f = move || t!(TempDir::new());
+
+        let tmp = thread::spawn(f).join().unwrap();
+        path = tmp.path().to_path_buf();
+        assert!(path.exists());
+    }
+    assert!(!path.exists());
+
+    let path;
+    {
+        let tmp = t!(TempDir::new());
+        path = tmp.into_path();
+    }
+    assert!(path.exists());
+    t!(fs::remove_dir_all(&path));
+    assert!(!path.exists());
+}
+
+fn test_rm_tempdir_close() {
+    let (tx, rx) = channel();
+    let f = move || -> () {
+        let tmp = t!(TempDir::new());
+        tx.send(tmp.path().to_path_buf()).unwrap();
+        t!(tmp.close());
+        panic!("panic when unwinding past `tmp`");
+    };
+    let _ = thread::spawn(f).join();
+    let path = rx.recv().unwrap();
+    assert!(!path.exists());
+
+    let tmp = t!(TempDir::new());
+    let path = tmp.path().to_path_buf();
+    let f = move || -> () {
+        let tmp = tmp;
+        t!(tmp.close());
+        panic!("panic when unwinding past `tmp`");
+    };
+    let _ = thread::spawn(f).join();
+    assert!(!path.exists());
+
+    let path;
+    {
+        let f = move || t!(TempDir::new());
+
+        let tmp = thread::spawn(f).join().unwrap();
+        path = tmp.path().to_path_buf();
+        assert!(path.exists());
+        t!(tmp.close());
+    }
+    assert!(!path.exists());
+
+    let path;
+    {
+        let tmp = t!(TempDir::new());
+        path = tmp.into_path();
+    }
+    assert!(path.exists());
+    t!(fs::remove_dir_all(&path));
+    assert!(!path.exists());
+}
+
+// Ideally these would be in std::os but then core would need
+// to depend on std
+fn recursive_mkdir_rel() {
+    let path = Path::new("frob");
+    let cwd = env::current_dir().unwrap();
+    println!(
+        "recursive_mkdir_rel: Making: {} in cwd {} [{}]",
+        path.display(),
+        cwd.display(),
+        path.exists()
+    );
+    t!(fs::create_dir(&path));
+    assert!(path.is_dir());
+    t!(fs::create_dir_all(&path));
+    assert!(path.is_dir());
+}
+
+fn recursive_mkdir_dot() {
+    let dot = Path::new(".");
+    t!(fs::create_dir_all(&dot));
+    let dotdot = Path::new("..");
+    t!(fs::create_dir_all(&dotdot));
+}
+
+fn recursive_mkdir_rel_2() {
+    let path = Path::new("./frob/baz");
+    let cwd = env::current_dir().unwrap();
+    println!(
+        "recursive_mkdir_rel_2: Making: {} in cwd {} [{}]",
+        path.display(),
+        cwd.display(),
+        path.exists()
+    );
+    t!(fs::create_dir_all(&path));
+    assert!(path.is_dir());
+    assert!(path.parent().unwrap().is_dir());
+    let path2 = Path::new("quux/blat");
+    println!(
+        "recursive_mkdir_rel_2: Making: {} in cwd {}",
+        path2.display(),
+        cwd.display()
+    );
+    t!(fs::create_dir("quux"));
+    t!(fs::create_dir_all(&path2));
+    assert!(path2.is_dir());
+    assert!(path2.parent().unwrap().is_dir());
+}
+
+// Ideally this would be in core, but needs TempFile
+pub fn test_remove_dir_all_ok() {
+    let tmpdir = t!(TempDir::new());
+    let tmpdir = tmpdir.path();
+    let root = tmpdir.join("foo");
+
+    println!("making {}", root.display());
+    t!(fs::create_dir(&root));
+    t!(fs::create_dir(&root.join("foo")));
+    t!(fs::create_dir(&root.join("foo").join("bar")));
+    t!(fs::create_dir(&root.join("foo").join("bar").join("blat")));
+    t!(fs::remove_dir_all(&root));
+    assert!(!root.exists());
+    assert!(!root.join("bar").exists());
+    assert!(!root.join("bar").join("blat").exists());
+}
+
+pub fn dont_double_panic() {
+    let r: Result<(), _> = thread::spawn(move || {
+        let tmpdir = TempDir::new().unwrap();
+        // Remove the temporary directory so that TempDir sees
+        // an error on drop
+        t!(fs::remove_dir(tmpdir.path()));
+        // Panic. If TempDir panics *again* due to the rmdir
+        // error then the process will abort.
+        panic!();
+    })
+    .join();
+    assert!(r.is_err());
+}
+
+fn in_tmpdir<F>(f: F)
+where
+    F: FnOnce(),
+{
+    let tmpdir = t!(TempDir::new());
+    assert!(env::set_current_dir(tmpdir.path()).is_ok());
+
+    f();
+}
+
+pub fn pass_as_asref_path() {
+    let tempdir = t!(TempDir::new());
+    takes_asref_path(&tempdir);
+
+    fn takes_asref_path<T: AsRef<Path>>(path: T) {
+        let path = path.as_ref();
+        assert!(path.exists());
+    }
+}
+
+#[test]
+fn main() {
+    in_tmpdir(test_tempdir);
+    in_tmpdir(test_rm_tempdir);
+    in_tmpdir(test_rm_tempdir_close);
+    in_tmpdir(recursive_mkdir_rel);
+    in_tmpdir(recursive_mkdir_dot);
+    in_tmpdir(recursive_mkdir_rel_2);
+    in_tmpdir(test_remove_dir_all_ok);
+    in_tmpdir(dont_double_panic);
+    in_tmpdir(pass_as_asref_path);
+}
diff --git a/crates/tempfile/tests/tempfile.rs b/crates/tempfile/tests/tempfile.rs
new file mode 100644
index 0000000..f4dddb2
--- /dev/null
+++ b/crates/tempfile/tests/tempfile.rs
@@ -0,0 +1,65 @@
+#![deny(rust_2018_idioms)]
+
+use std::fs;
+use std::io::{Read, Seek, SeekFrom, Write};
+use std::sync::mpsc::{sync_channel, TryRecvError};
+use std::thread;
+
+#[test]
+fn test_basic() {
+    let mut tmpfile = tempfile::tempfile().unwrap();
+    write!(tmpfile, "abcde").unwrap();
+    tmpfile.seek(SeekFrom::Start(0)).unwrap();
+    let mut buf = String::new();
+    tmpfile.read_to_string(&mut buf).unwrap();
+    assert_eq!("abcde", buf);
+}
+
+#[test]
+fn test_cleanup() {
+    let tmpdir = tempfile::tempdir().unwrap();
+    {
+        let mut tmpfile = tempfile::tempfile_in(&tmpdir).unwrap();
+        write!(tmpfile, "abcde").unwrap();
+    }
+    let num_files = fs::read_dir(&tmpdir).unwrap().count();
+    assert!(num_files == 0);
+}
+
+// Only run this test on Linux. MacOS doesn't like us creating so many files, apparently.
+#[cfg(target_os = "linux")]
+#[test]
+fn test_pathological_cleaner() {
+    let tmpdir = tempfile::tempdir().unwrap();
+    let (tx, rx) = sync_channel(0);
+    let cleaner_thread = thread::spawn(move || {
+        let tmp_path = rx.recv().unwrap();
+        while rx.try_recv() == Err(TryRecvError::Empty) {
+            let files = fs::read_dir(&tmp_path).unwrap();
+            for f in files {
+                // skip errors
+                if f.is_err() {
+                    continue;
+                }
+                let f = f.unwrap();
+                let _ = fs::remove_file(f.path());
+            }
+        }
+    });
+
+    // block until cleaner_thread makes progress
+    tx.send(tmpdir.path().to_owned()).unwrap();
+    // need 40-400 iterations to encounter race with cleaner on original system
+    for _ in 0..10000 {
+        let mut tmpfile = tempfile::tempfile_in(&tmpdir).unwrap();
+        write!(tmpfile, "abcde").unwrap();
+        tmpfile.seek(SeekFrom::Start(0)).unwrap();
+        let mut buf = String::new();
+        tmpfile.read_to_string(&mut buf).unwrap();
+        assert_eq!("abcde", buf);
+    }
+
+    // close the channel to make cleaner_thread exit
+    drop(tx);
+    cleaner_thread.join().expect("The cleaner thread failed");
+}
diff --git a/crates/termcolor/.cargo-checksum.json b/crates/termcolor/.cargo-checksum.json
new file mode 100644
index 0000000..347620f
--- /dev/null
+++ b/crates/termcolor/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"COPYING":"01c266bced4a434da0051174d6bee16a4c82cf634e2679b6155d40d75012390f","Cargo.toml":"572fae34cf0bee9ca39a1398501c17fed5e98a2de908136cabbaba98cf97208e","LICENSE-MIT":"0f96a83840e146e43c0ec96a22ec1f392e0680e6c1226e6f3ba87e0740af850f","README.md":"b07f32791ef31fdc347d1d4a62a0bf0979ab825a361ca9079f31908a0b78ea96","UNLICENSE":"7e12e5df4bae12cb21581ba157ced20e1986a0508dd10d0e8a4ab9a4cf94e85c","rustfmt.toml":"1ca600239a27401c4a43f363cf3f38183a212affc1f31bff3ae93234bbaec228","src/lib.rs":"155112a5bc22f4640911dff4a167721ade0b739eb1eda4d108727044dcfd9918"},"package":"06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755"}
\ No newline at end of file
diff --git a/crates/termcolor/Android.bp b/crates/termcolor/Android.bp
new file mode 100644
index 0000000..d1ff4ca
--- /dev/null
+++ b/crates/termcolor/Android.bp
@@ -0,0 +1,45 @@
+// This file is generated by cargo_embargo.
+// Do not modify this file because the changes will be overridden on upgrade.
+
+package {
+    default_applicable_licenses: ["external_rust_crates_termcolor_license"],
+    default_team: "trendy_team_android_rust",
+}
+
+license {
+    name: "external_rust_crates_termcolor_license",
+    visibility: [":__subpackages__"],
+    license_kinds: ["SPDX-license-identifier-MIT"],
+    license_text: ["LICENSE"],
+}
+
+rust_library {
+    name: "libtermcolor",
+    host_supported: true,
+    crate_name: "termcolor",
+    cargo_env_compat: true,
+    cargo_pkg_version: "1.4.1",
+    crate_root: "src/lib.rs",
+    edition: "2018",
+    apex_available: [
+        "//apex_available:platform",
+        "//apex_available:anyapex",
+    ],
+    product_available: true,
+    vendor_available: true,
+}
+
+rust_test {
+    name: "termcolor_test_src_lib",
+    host_supported: true,
+    crate_name: "termcolor",
+    cargo_env_compat: true,
+    cargo_pkg_version: "1.4.1",
+    crate_root: "src/lib.rs",
+    test_suites: ["general-tests"],
+    auto_gen_config: true,
+    test_options: {
+        unit_test: true,
+    },
+    edition: "2018",
+}
diff --git a/crates/termcolor/COPYING b/crates/termcolor/COPYING
new file mode 100644
index 0000000..bb9c20a
--- /dev/null
+++ b/crates/termcolor/COPYING
@@ -0,0 +1,3 @@
+This project is dual-licensed under the Unlicense and MIT licenses.
+
+You may use this code under the terms of either license.
diff --git a/crates/termcolor/Cargo.lock b/crates/termcolor/Cargo.lock
new file mode 100644
index 0000000..7d3692c
--- /dev/null
+++ b/crates/termcolor/Cargo.lock
@@ -0,0 +1,92 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "termcolor"
+version = "1.4.1"
+dependencies = [
+ "winapi-util",
+]
+
+[[package]]
+name = "winapi-util"
+version = "0.1.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb"
+dependencies = [
+ "windows-sys",
+]
+
+[[package]]
+name = "windows-sys"
+version = "0.59.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b"
+dependencies = [
+ "windows-targets",
+]
+
+[[package]]
+name = "windows-targets"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973"
+dependencies = [
+ "windows_aarch64_gnullvm",
+ "windows_aarch64_msvc",
+ "windows_i686_gnu",
+ "windows_i686_gnullvm",
+ "windows_i686_msvc",
+ "windows_x86_64_gnu",
+ "windows_x86_64_gnullvm",
+ "windows_x86_64_msvc",
+]
+
+[[package]]
+name = "windows_aarch64_gnullvm"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"
+
+[[package]]
+name = "windows_aarch64_msvc"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"
+
+[[package]]
+name = "windows_i686_gnu"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b"
+
+[[package]]
+name = "windows_i686_gnullvm"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"
+
+[[package]]
+name = "windows_i686_msvc"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
+
+[[package]]
+name = "windows_x86_64_gnu"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
+
+[[package]]
+name = "windows_x86_64_gnullvm"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
+
+[[package]]
+name = "windows_x86_64_msvc"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
diff --git a/crates/termcolor/Cargo.toml b/crates/termcolor/Cargo.toml
new file mode 100644
index 0000000..f90eaca
--- /dev/null
+++ b/crates/termcolor/Cargo.toml
@@ -0,0 +1,40 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2018"
+name = "termcolor"
+version = "1.4.1"
+authors = ["Andrew Gallant <jamslam@gmail.com>"]
+description = """
+A simple cross platform library for writing colored text to a terminal.
+"""
+homepage = "https://github.com/BurntSushi/termcolor"
+documentation = "https://docs.rs/termcolor"
+readme = "README.md"
+keywords = [
+    "windows",
+    "win",
+    "color",
+    "ansi",
+    "console",
+]
+license = "Unlicense OR MIT"
+repository = "https://github.com/BurntSushi/termcolor"
+
+[lib]
+name = "termcolor"
+bench = false
+
+[dev-dependencies]
+
+[target."cfg(windows)".dependencies.winapi-util]
+version = "0.1.3"
diff --git a/crates/termcolor/LICENSE b/crates/termcolor/LICENSE
new file mode 120000
index 0000000..7f9a88e
--- /dev/null
+++ b/crates/termcolor/LICENSE
@@ -0,0 +1 @@
+LICENSE-MIT
\ No newline at end of file
diff --git a/crates/termcolor/LICENSE-MIT b/crates/termcolor/LICENSE-MIT
new file mode 100644
index 0000000..3b0a5dc
--- /dev/null
+++ b/crates/termcolor/LICENSE-MIT
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Andrew Gallant
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/crates/termcolor/METADATA b/crates/termcolor/METADATA
new file mode 100644
index 0000000..46e39c0
--- /dev/null
+++ b/crates/termcolor/METADATA
@@ -0,0 +1,20 @@
+# This project was upgraded with external_updater.
+# Usage: tools/external_updater/updater.sh update external/rust/crates/termcolor
+# For more info, check https://cs.android.com/android/platform/superproject/+/main:tools/external_updater/README.md
+
+name: "termcolor"
+description: "A simple cross platform library for writing colored text to a terminal."
+third_party {
+  license_type: NOTICE
+  last_upgrade_date {
+    year: 2024
+    month: 2
+    day: 7
+  }
+  homepage: "https://crates.io/crates/termcolor"
+  identifier {
+    type: "Archive"
+    value: "https://static.crates.io/crates/termcolor/termcolor-1.4.1.crate"
+    version: "1.4.1"
+  }
+}
diff --git a/crates/termcolor/MODULE_LICENSE_MIT b/crates/termcolor/MODULE_LICENSE_MIT
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/crates/termcolor/MODULE_LICENSE_MIT
diff --git a/crates/termcolor/NOTICE b/crates/termcolor/NOTICE
new file mode 120000
index 0000000..7a694c9
--- /dev/null
+++ b/crates/termcolor/NOTICE
@@ -0,0 +1 @@
+LICENSE
\ No newline at end of file
diff --git a/crates/termcolor/README.md b/crates/termcolor/README.md
new file mode 100644
index 0000000..ef7e3e8
--- /dev/null
+++ b/crates/termcolor/README.md
@@ -0,0 +1,110 @@
+termcolor
+=========
+A simple cross platform library for writing colored text to a terminal. This
+library writes colored text either using standard ANSI escape sequences or by
+interacting with the Windows console. Several convenient abstractions are
+provided for use in single-threaded or multi-threaded command line
+applications.
+
+[![Build status](https://github.com/BurntSushi/termcolor/workflows/ci/badge.svg)](https://github.com/BurntSushi/termcolor/actions)
+[![crates.io](https://img.shields.io/crates/v/termcolor.svg)](https://crates.io/crates/termcolor)
+
+Dual-licensed under MIT or the [UNLICENSE](https://unlicense.org/).
+
+### Documentation
+
+[https://docs.rs/termcolor](https://docs.rs/termcolor)
+
+### Usage
+
+Run `cargo add termcolor` to add this dependency to your `Cargo.toml` file.
+
+### Organization
+
+The `WriteColor` trait extends the `io::Write` trait with methods for setting
+colors or resetting them.
+
+`StandardStream` and `StandardStreamLock` both satisfy `WriteColor` and are
+analogous to `std::io::Stdout` and `std::io::StdoutLock`, or `std::io::Stderr`
+and `std::io::StderrLock`.
+
+`Buffer` is an in memory buffer that supports colored text. In a parallel
+program, each thread might write to its own buffer. A buffer can be printed to
+stdout or stderr using a `BufferWriter`. The advantage of this design is that
+each thread can work in parallel on a buffer without having to synchronize
+access to global resources such as the Windows console. Moreover, this design
+also prevents interleaving of buffer output.
+
+`Ansi` and `NoColor` both satisfy `WriteColor` for arbitrary implementors of
+`io::Write`. These types are useful when you know exactly what you need. An
+analogous type for the Windows console is not provided since it cannot exist.
+
+### Example: using `StandardStream`
+
+The `StandardStream` type in this crate works similarly to `std::io::Stdout`,
+except it is augmented with methods for coloring by the `WriteColor` trait. For
+example, to write some green text:
+
+```rust
+use std::io::{self, Write};
+use termcolor::{Color, ColorChoice, ColorSpec, StandardStream, WriteColor};
+
+fn write_green() -> io::Result<()> {
+    let mut stdout = StandardStream::stdout(ColorChoice::Always);
+    stdout.set_color(ColorSpec::new().set_fg(Some(Color::Green)))?;
+    writeln!(&mut stdout, "green text!")
+}
+```
+
+### Example: using `BufferWriter`
+
+A `BufferWriter` can create buffers and write buffers to stdout or stderr. It
+does *not* implement `io::Write` or `WriteColor` itself. Instead, `Buffer`
+implements `io::Write` and `termcolor::WriteColor`.
+
+This example shows how to print some green text to stderr.
+
+```rust
+use std::io::{self, Write};
+use termcolor::{BufferWriter, Color, ColorChoice, ColorSpec, WriteColor};
+
+fn write_green() -> io::Result<()> {
+    let mut bufwtr = BufferWriter::stderr(ColorChoice::Always);
+    let mut buffer = bufwtr.buffer();
+    buffer.set_color(ColorSpec::new().set_fg(Some(Color::Green)))?;
+    writeln!(&mut buffer, "green text!")?;
+    bufwtr.print(&buffer)
+}
+```
+
+### Automatic color selection
+
+When building a writer with termcolor, the caller must provide a
+[`ColorChoice`](https://docs.rs/termcolor/1.*/termcolor/enum.ColorChoice.html)
+selection. When the color choice is `Auto`, termcolor will attempt to determine
+whether colors should be enabled by inspecting the environment. Currently,
+termcolor will inspect the `TERM` and `NO_COLOR` environment variables:
+
+* If `NO_COLOR` is set to any value, then colors will be suppressed.
+* If `TERM` is set to `dumb`, then colors will be suppressed.
+* In non-Windows environments, if `TERM` is not set, then colors will be
+  suppressed.
+
+This decision procedure may change over time.
+
+Currently, `termcolor` does not attempt to detect whether a tty is present or
+not. To achieve that, please use
+[`std::io::IsTerminal`](https://doc.rust-lang.org/std/io/trait.IsTerminal.html).
+
+### Minimum Rust version policy
+
+This crate's minimum supported `rustc` version is `1.34.0`.
+
+The current policy is that the minimum Rust version required to use this crate
+can be increased in minor version updates. For example, if `crate 1.0` requires
+Rust 1.20.0, then `crate 1.0.z` for all values of `z` will also require Rust
+1.20.0 or newer. However, `crate 1.y` for `y > 0` may require a newer minimum
+version of Rust.
+
+In general, this crate will be conservative with respect to the minimum
+supported version of Rust.
diff --git a/crates/termcolor/TEST_MAPPING b/crates/termcolor/TEST_MAPPING
new file mode 100644
index 0000000..ea6c5d1
--- /dev/null
+++ b/crates/termcolor/TEST_MAPPING
@@ -0,0 +1,27 @@
+// Generated by update_crate_tests.py for tests that depend on this crate.
+{
+  "imports": [
+    {
+      "path": "packages/modules/Virtualization/virtualizationmanager"
+    },
+    {
+      "path": "system/security/diced"
+    },
+    {
+      "path": "system/security/keystore2"
+    },
+    {
+      "path": "system/security/keystore2/legacykeystore"
+    }
+  ],
+  "presubmit": [
+    {
+      "name": "termcolor_test_src_lib"
+    }
+  ],
+  "presubmit-rust": [
+    {
+      "name": "termcolor_test_src_lib"
+    }
+  ]
+}
diff --git a/crates/termcolor/UNLICENSE b/crates/termcolor/UNLICENSE
new file mode 100644
index 0000000..68a49da
--- /dev/null
+++ b/crates/termcolor/UNLICENSE
@@ -0,0 +1,24 @@
+This is free and unencumbered software released into the public domain.
+
+Anyone is free to copy, modify, publish, use, compile, sell, or
+distribute this software, either in source code form or as a compiled
+binary, for any purpose, commercial or non-commercial, and by any
+means.
+
+In jurisdictions that recognize copyright laws, the author or authors
+of this software dedicate any and all copyright interest in the
+software to the public domain. We make this dedication for the benefit
+of the public at large and to the detriment of our heirs and
+successors. We intend this dedication to be an overt act of
+relinquishment in perpetuity of all present and future rights to this
+software under copyright law.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
+
+For more information, please refer to <http://unlicense.org/>
diff --git a/crates/termcolor/cargo_embargo.json b/crates/termcolor/cargo_embargo.json
new file mode 100644
index 0000000..c8842d1
--- /dev/null
+++ b/crates/termcolor/cargo_embargo.json
@@ -0,0 +1,4 @@
+{
+  "run_cargo": false,
+  "tests": true
+}
diff --git a/crates/termcolor/rustfmt.toml b/crates/termcolor/rustfmt.toml
new file mode 100644
index 0000000..aa37a21
--- /dev/null
+++ b/crates/termcolor/rustfmt.toml
@@ -0,0 +1,2 @@
+max_width = 79
+use_small_heuristics = "max"
diff --git a/crates/termcolor/src/lib.rs b/crates/termcolor/src/lib.rs
new file mode 100644
index 0000000..9e6371f
--- /dev/null
+++ b/crates/termcolor/src/lib.rs
@@ -0,0 +1,2572 @@
+/*!
+This crate provides a cross platform abstraction for writing colored text to
+a terminal. Colors are written using either ANSI escape sequences or by
+communicating with a Windows console. Much of this API was motivated by use
+inside command line applications, where colors or styles can be configured
+by the end user and/or the environment.
+
+This crate also provides platform independent support for writing colored text
+to an in memory buffer. While this is easy to do with ANSI escape sequences
+(because they are in the buffer themselves), it is trickier to do with the
+Windows console API, which requires synchronous communication.
+
+In ANSI mode, this crate also provides support for writing hyperlinks.
+
+# Organization
+
+The `WriteColor` trait extends the `io::Write` trait with methods for setting
+colors or resetting them.
+
+`StandardStream` and `StandardStreamLock` both satisfy `WriteColor` and are
+analogous to `std::io::Stdout` and `std::io::StdoutLock`, or `std::io::Stderr`
+and `std::io::StderrLock`.
+
+`Buffer` is an in memory buffer that supports colored text. In a parallel
+program, each thread might write to its own buffer. A buffer can be printed to
+using a `BufferWriter`. The advantage of this design is that each thread can
+work in parallel on a buffer without having to synchronize access to global
+resources such as the Windows console. Moreover, this design also prevents
+interleaving of buffer output.
+
+`Ansi` and `NoColor` both satisfy `WriteColor` for arbitrary implementors of
+`io::Write`. These types are useful when you know exactly what you need. An
+analogous type for the Windows console is not provided since it cannot exist.
+
+# Example: using `StandardStream`
+
+The `StandardStream` type in this crate works similarly to `std::io::Stdout`,
+except it is augmented with methods for coloring by the `WriteColor` trait.
+For example, to write some green text:
+
+```rust,no_run
+# fn test() -> Result<(), Box<::std::error::Error>> {
+use std::io::Write;
+use termcolor::{Color, ColorChoice, ColorSpec, StandardStream, WriteColor};
+
+let mut stdout = StandardStream::stdout(ColorChoice::Always);
+stdout.set_color(ColorSpec::new().set_fg(Some(Color::Green)))?;
+writeln!(&mut stdout, "green text!")?;
+# Ok(()) }
+```
+
+Note that any text written to the terminal now will be colored
+green when using ANSI escape sequences, even if it is written via
+stderr, and even if stderr had previously been set to `Color::Red`.
+Users will need to manage any color changes themselves by calling
+[`WriteColor::set_color`](trait.WriteColor.html#tymethod.set_color), and this
+may include calling [`WriteColor::reset`](trait.WriteColor.html#tymethod.reset)
+before the program exits to a shell.
+
+# Example: using `BufferWriter`
+
+A `BufferWriter` can create buffers and write buffers to stdout or stderr. It
+does *not* implement `io::Write` or `WriteColor` itself. Instead, `Buffer`
+implements `io::Write` and `io::WriteColor`.
+
+This example shows how to print some green text to stderr.
+
+```rust,no_run
+# fn test() -> Result<(), Box<::std::error::Error>> {
+use std::io::Write;
+use termcolor::{BufferWriter, Color, ColorChoice, ColorSpec, WriteColor};
+
+let mut bufwtr = BufferWriter::stderr(ColorChoice::Always);
+let mut buffer = bufwtr.buffer();
+buffer.set_color(ColorSpec::new().set_fg(Some(Color::Green)))?;
+writeln!(&mut buffer, "green text!")?;
+bufwtr.print(&buffer)?;
+# Ok(()) }
+```
+
+# Detecting presence of a terminal
+
+In many scenarios when using color, one often wants to enable colors
+automatically when writing to a terminal and disable colors automatically when
+writing to anything else. The typical way to achieve this in Unix environments
+is via libc's
+[`isatty`](https://man7.org/linux/man-pages/man3/isatty.3.html)
+function.
+Unfortunately, this notoriously does not work well in Windows environments. To
+work around that, the recommended solution is to use the standard library's
+[`IsTerminal`](https://doc.rust-lang.org/std/io/trait.IsTerminal.html) trait.
+It goes out of its way to get it as right as possible in Windows environments.
+
+For example, in a command line application that exposes a `--color` flag,
+your logic for how to enable colors might look like this:
+
+```ignore
+use std::io::IsTerminal;
+
+use termcolor::{ColorChoice, StandardStream};
+
+let preference = argv.get_flag("color").unwrap_or("auto");
+let mut choice = preference.parse::<ColorChoice>()?;
+if choice == ColorChoice::Auto && !std::io::stdin().is_terminal() {
+    choice = ColorChoice::Never;
+}
+let stdout = StandardStream::stdout(choice);
+// ... write to stdout
+```
+
+Currently, `termcolor` does not provide anything to do this for you.
+*/
+
+#![deny(missing_debug_implementations, missing_docs)]
+
+// #[cfg(doctest)]
+// use doc_comment::doctest;
+// #[cfg(doctest)]
+// doctest!("../README.md");
+
+use std::env;
+use std::error;
+use std::fmt;
+use std::io::{self, Write};
+use std::str::FromStr;
+use std::sync::atomic::{AtomicBool, Ordering};
+#[cfg(windows)]
+use std::sync::{Mutex, MutexGuard};
+
+#[cfg(windows)]
+use winapi_util::console as wincon;
+
+/// This trait describes the behavior of writers that support colored output.
+pub trait WriteColor: io::Write {
+    /// Returns true if and only if the underlying writer supports colors.
+    fn supports_color(&self) -> bool;
+
+    /// Set the color settings of the writer.
+    ///
+    /// Subsequent writes to this writer will use these settings until either
+    /// `reset` is called or new color settings are set.
+    ///
+    /// If there was a problem setting the color settings, then an error is
+    /// returned.
+    fn set_color(&mut self, spec: &ColorSpec) -> io::Result<()>;
+
+    /// Reset the current color settings to their original settings.
+    ///
+    /// If there was a problem resetting the color settings, then an error is
+    /// returned.
+    ///
+    /// Note that this does not reset hyperlinks. Those need to be
+    /// reset on their own, e.g., by calling `set_hyperlink` with
+    /// [`HyperlinkSpec::none`].
+    fn reset(&mut self) -> io::Result<()>;
+
+    /// Returns true if and only if the underlying writer must synchronously
+    /// interact with an end user's device in order to control colors. By
+    /// default, this always returns `false`.
+    ///
+    /// In practice, this should return `true` if the underlying writer is
+    /// manipulating colors using the Windows console APIs.
+    ///
+    /// This is useful for writing generic code (such as a buffered writer)
+    /// that can perform certain optimizations when the underlying writer
+    /// doesn't rely on synchronous APIs. For example, ANSI escape sequences
+    /// can be passed through to the end user's device as is.
+    fn is_synchronous(&self) -> bool {
+        false
+    }
+
+    /// Set the current hyperlink of the writer.
+    ///
+    /// The typical way to use this is to first call it with a
+    /// [`HyperlinkSpec::open`] to write the actual URI to a tty that supports
+    /// [OSC-8]. At this point, the caller can now write the label for the
+    /// hyperlink. This may include coloring or other styles. Once the caller
+    /// has finished writing the label, one should call this method again with
+    /// [`HyperlinkSpec::close`].
+    ///
+    /// If there was a problem setting the hyperlink, then an error is
+    /// returned.
+    ///
+    /// This defaults to doing nothing.
+    ///
+    /// [OSC8]: https://github.com/Alhadis/OSC8-Adoption/
+    fn set_hyperlink(&mut self, _link: &HyperlinkSpec) -> io::Result<()> {
+        Ok(())
+    }
+
+    /// Returns true if and only if the underlying writer supports hyperlinks.
+    ///
+    /// This can be used to avoid generating hyperlink URIs unnecessarily.
+    ///
+    /// This defaults to `false`.
+    fn supports_hyperlinks(&self) -> bool {
+        false
+    }
+}
+
+impl<'a, T: ?Sized + WriteColor> WriteColor for &'a mut T {
+    fn supports_color(&self) -> bool {
+        (&**self).supports_color()
+    }
+    fn supports_hyperlinks(&self) -> bool {
+        (&**self).supports_hyperlinks()
+    }
+    fn set_color(&mut self, spec: &ColorSpec) -> io::Result<()> {
+        (&mut **self).set_color(spec)
+    }
+    fn set_hyperlink(&mut self, link: &HyperlinkSpec) -> io::Result<()> {
+        (&mut **self).set_hyperlink(link)
+    }
+    fn reset(&mut self) -> io::Result<()> {
+        (&mut **self).reset()
+    }
+    fn is_synchronous(&self) -> bool {
+        (&**self).is_synchronous()
+    }
+}
+
+impl<T: ?Sized + WriteColor> WriteColor for Box<T> {
+    fn supports_color(&self) -> bool {
+        (&**self).supports_color()
+    }
+    fn supports_hyperlinks(&self) -> bool {
+        (&**self).supports_hyperlinks()
+    }
+    fn set_color(&mut self, spec: &ColorSpec) -> io::Result<()> {
+        (&mut **self).set_color(spec)
+    }
+    fn set_hyperlink(&mut self, link: &HyperlinkSpec) -> io::Result<()> {
+        (&mut **self).set_hyperlink(link)
+    }
+    fn reset(&mut self) -> io::Result<()> {
+        (&mut **self).reset()
+    }
+    fn is_synchronous(&self) -> bool {
+        (&**self).is_synchronous()
+    }
+}
+
+/// ColorChoice represents the color preferences of an end user.
+///
+/// The `Default` implementation for this type will select `Auto`, which tries
+/// to do the right thing based on the current environment.
+///
+/// The `FromStr` implementation for this type converts a lowercase kebab-case
+/// string of the variant name to the corresponding variant. Any other string
+/// results in an error.
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+pub enum ColorChoice {
+    /// Try very hard to emit colors. This includes emitting ANSI colors
+    /// on Windows if the console API is unavailable.
+    Always,
+    /// AlwaysAnsi is like Always, except it never tries to use anything other
+    /// than emitting ANSI color codes.
+    AlwaysAnsi,
+    /// Try to use colors, but don't force the issue. If the console isn't
+    /// available on Windows, or if TERM=dumb, or if `NO_COLOR` is defined, for
+    /// example, then don't use colors.
+    Auto,
+    /// Never emit colors.
+    Never,
+}
+
+/// The default is `Auto`.
+impl Default for ColorChoice {
+    fn default() -> ColorChoice {
+        ColorChoice::Auto
+    }
+}
+
+impl FromStr for ColorChoice {
+    type Err = ColorChoiceParseError;
+
+    fn from_str(s: &str) -> Result<ColorChoice, ColorChoiceParseError> {
+        match s.to_lowercase().as_str() {
+            "always" => Ok(ColorChoice::Always),
+            "always-ansi" => Ok(ColorChoice::AlwaysAnsi),
+            "never" => Ok(ColorChoice::Never),
+            "auto" => Ok(ColorChoice::Auto),
+            unknown => Err(ColorChoiceParseError {
+                unknown_choice: unknown.to_string(),
+            }),
+        }
+    }
+}
+
+impl ColorChoice {
+    /// Returns true if we should attempt to write colored output.
+    fn should_attempt_color(&self) -> bool {
+        match *self {
+            ColorChoice::Always => true,
+            ColorChoice::AlwaysAnsi => true,
+            ColorChoice::Never => false,
+            ColorChoice::Auto => self.env_allows_color(),
+        }
+    }
+
+    #[cfg(not(windows))]
+    fn env_allows_color(&self) -> bool {
+        match env::var_os("TERM") {
+            // If TERM isn't set, then we are in a weird environment that
+            // probably doesn't support colors.
+            None => return false,
+            Some(k) => {
+                if k == "dumb" {
+                    return false;
+                }
+            }
+        }
+        // If TERM != dumb, then the only way we don't allow colors at this
+        // point is if NO_COLOR is set.
+        if env::var_os("NO_COLOR").is_some() {
+            return false;
+        }
+        true
+    }
+
+    #[cfg(windows)]
+    fn env_allows_color(&self) -> bool {
+        // On Windows, if TERM isn't set, then we shouldn't automatically
+        // assume that colors aren't allowed. This is unlike Unix environments
+        // where TERM is more rigorously set.
+        if let Some(k) = env::var_os("TERM") {
+            if k == "dumb" {
+                return false;
+            }
+        }
+        // If TERM != dumb, then the only way we don't allow colors at this
+        // point is if NO_COLOR is set.
+        if env::var_os("NO_COLOR").is_some() {
+            return false;
+        }
+        true
+    }
+
+    /// Returns true if this choice should forcefully use ANSI color codes.
+    ///
+    /// It's possible that ANSI is still the correct choice even if this
+    /// returns false.
+    #[cfg(windows)]
+    fn should_ansi(&self) -> bool {
+        match *self {
+            ColorChoice::Always => false,
+            ColorChoice::AlwaysAnsi => true,
+            ColorChoice::Never => false,
+            ColorChoice::Auto => {
+                match env::var("TERM") {
+                    Err(_) => false,
+                    // cygwin doesn't seem to support ANSI escape sequences
+                    // and instead has its own variety. However, the Windows
+                    // console API may be available.
+                    Ok(k) => k != "dumb" && k != "cygwin",
+                }
+            }
+        }
+    }
+}
+
+/// An error that occurs when parsing a `ColorChoice` fails.
+#[derive(Clone, Debug)]
+pub struct ColorChoiceParseError {
+    unknown_choice: String,
+}
+
+impl std::error::Error for ColorChoiceParseError {}
+
+impl fmt::Display for ColorChoiceParseError {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(
+            f,
+            "unrecognized color choice '{}': valid choices are: \
+             always, always-ansi, never, auto",
+            self.unknown_choice,
+        )
+    }
+}
+
+/// `std::io` implements `Stdout` and `Stderr` (and their `Lock` variants) as
+/// separate types, which makes it difficult to abstract over them. We use
+/// some simple internal enum types to work around this.
+
+enum StandardStreamType {
+    Stdout,
+    Stderr,
+    StdoutBuffered,
+    StderrBuffered,
+}
+
+#[derive(Debug)]
+enum IoStandardStream {
+    Stdout(io::Stdout),
+    Stderr(io::Stderr),
+    StdoutBuffered(io::BufWriter<io::Stdout>),
+    StderrBuffered(io::BufWriter<io::Stderr>),
+}
+
+impl IoStandardStream {
+    fn new(sty: StandardStreamType) -> IoStandardStream {
+        match sty {
+            StandardStreamType::Stdout => {
+                IoStandardStream::Stdout(io::stdout())
+            }
+            StandardStreamType::Stderr => {
+                IoStandardStream::Stderr(io::stderr())
+            }
+            StandardStreamType::StdoutBuffered => {
+                let wtr = io::BufWriter::new(io::stdout());
+                IoStandardStream::StdoutBuffered(wtr)
+            }
+            StandardStreamType::StderrBuffered => {
+                let wtr = io::BufWriter::new(io::stderr());
+                IoStandardStream::StderrBuffered(wtr)
+            }
+        }
+    }
+
+    fn lock(&self) -> IoStandardStreamLock<'_> {
+        match *self {
+            IoStandardStream::Stdout(ref s) => {
+                IoStandardStreamLock::StdoutLock(s.lock())
+            }
+            IoStandardStream::Stderr(ref s) => {
+                IoStandardStreamLock::StderrLock(s.lock())
+            }
+            IoStandardStream::StdoutBuffered(_)
+            | IoStandardStream::StderrBuffered(_) => {
+                // We don't permit this case to ever occur in the public API,
+                // so it's OK to panic.
+                panic!("cannot lock a buffered standard stream")
+            }
+        }
+    }
+}
+
+impl io::Write for IoStandardStream {
+    #[inline(always)]
+    fn write(&mut self, b: &[u8]) -> io::Result<usize> {
+        match *self {
+            IoStandardStream::Stdout(ref mut s) => s.write(b),
+            IoStandardStream::Stderr(ref mut s) => s.write(b),
+            IoStandardStream::StdoutBuffered(ref mut s) => s.write(b),
+            IoStandardStream::StderrBuffered(ref mut s) => s.write(b),
+        }
+    }
+
+    #[inline(always)]
+    fn flush(&mut self) -> io::Result<()> {
+        match *self {
+            IoStandardStream::Stdout(ref mut s) => s.flush(),
+            IoStandardStream::Stderr(ref mut s) => s.flush(),
+            IoStandardStream::StdoutBuffered(ref mut s) => s.flush(),
+            IoStandardStream::StderrBuffered(ref mut s) => s.flush(),
+        }
+    }
+}
+
+// Same rigmarole for the locked variants of the standard streams.
+
+#[derive(Debug)]
+enum IoStandardStreamLock<'a> {
+    StdoutLock(io::StdoutLock<'a>),
+    StderrLock(io::StderrLock<'a>),
+}
+
+impl<'a> io::Write for IoStandardStreamLock<'a> {
+    #[inline(always)]
+    fn write(&mut self, b: &[u8]) -> io::Result<usize> {
+        match *self {
+            IoStandardStreamLock::StdoutLock(ref mut s) => s.write(b),
+            IoStandardStreamLock::StderrLock(ref mut s) => s.write(b),
+        }
+    }
+
+    #[inline(always)]
+    fn flush(&mut self) -> io::Result<()> {
+        match *self {
+            IoStandardStreamLock::StdoutLock(ref mut s) => s.flush(),
+            IoStandardStreamLock::StderrLock(ref mut s) => s.flush(),
+        }
+    }
+}
+
+/// Satisfies `io::Write` and `WriteColor`, and supports optional coloring
+/// to either of the standard output streams, stdout and stderr.
+#[derive(Debug)]
+pub struct StandardStream {
+    wtr: LossyStandardStream<WriterInner<IoStandardStream>>,
+}
+
+/// `StandardStreamLock` is a locked reference to a `StandardStream`.
+///
+/// This implements the `io::Write` and `WriteColor` traits, and is constructed
+/// via the `Write::lock` method.
+///
+/// The lifetime `'a` refers to the lifetime of the corresponding
+/// `StandardStream`.
+#[derive(Debug)]
+pub struct StandardStreamLock<'a> {
+    wtr: LossyStandardStream<WriterInnerLock<'a, IoStandardStreamLock<'a>>>,
+}
+
+/// Like `StandardStream`, but does buffered writing.
+#[derive(Debug)]
+pub struct BufferedStandardStream {
+    wtr: LossyStandardStream<WriterInner<IoStandardStream>>,
+}
+
+/// WriterInner is a (limited) generic representation of a writer. It is
+/// limited because W should only ever be stdout/stderr on Windows.
+#[derive(Debug)]
+enum WriterInner<W> {
+    NoColor(NoColor<W>),
+    Ansi(Ansi<W>),
+    #[cfg(windows)]
+    Windows {
+        wtr: W,
+        console: Mutex<wincon::Console>,
+    },
+}
+
+/// WriterInnerLock is a (limited) generic representation of a writer. It is
+/// limited because W should only ever be stdout/stderr on Windows.
+#[derive(Debug)]
+enum WriterInnerLock<'a, W> {
+    NoColor(NoColor<W>),
+    Ansi(Ansi<W>),
+    /// What a gross hack. On Windows, we need to specify a lifetime for the
+    /// console when in a locked state, but obviously don't need to do that
+    /// on Unix, which makes the `'a` unused. To satisfy the compiler, we need
+    /// a PhantomData.
+    #[allow(dead_code)]
+    Unreachable(::std::marker::PhantomData<&'a ()>),
+    #[cfg(windows)]
+    Windows {
+        wtr: W,
+        console: MutexGuard<'a, wincon::Console>,
+    },
+}
+
+impl StandardStream {
+    /// Create a new `StandardStream` with the given color preferences that
+    /// writes to standard output.
+    ///
+    /// On Windows, if coloring is desired and a Windows console could not be
+    /// found, then ANSI escape sequences are used instead.
+    ///
+    /// The specific color/style settings can be configured when writing via
+    /// the `WriteColor` trait.
+    pub fn stdout(choice: ColorChoice) -> StandardStream {
+        let wtr = WriterInner::create(StandardStreamType::Stdout, choice);
+        StandardStream { wtr: LossyStandardStream::new(wtr) }
+    }
+
+    /// Create a new `StandardStream` with the given color preferences that
+    /// writes to standard error.
+    ///
+    /// On Windows, if coloring is desired and a Windows console could not be
+    /// found, then ANSI escape sequences are used instead.
+    ///
+    /// The specific color/style settings can be configured when writing via
+    /// the `WriteColor` trait.
+    pub fn stderr(choice: ColorChoice) -> StandardStream {
+        let wtr = WriterInner::create(StandardStreamType::Stderr, choice);
+        StandardStream { wtr: LossyStandardStream::new(wtr) }
+    }
+
+    /// Lock the underlying writer.
+    ///
+    /// The lock guard returned also satisfies `io::Write` and
+    /// `WriteColor`.
+    ///
+    /// This method is **not reentrant**. It may panic if `lock` is called
+    /// while a `StandardStreamLock` is still alive.
+    pub fn lock(&self) -> StandardStreamLock<'_> {
+        StandardStreamLock::from_stream(self)
+    }
+}
+
+impl<'a> StandardStreamLock<'a> {
+    #[cfg(not(windows))]
+    fn from_stream(stream: &StandardStream) -> StandardStreamLock<'_> {
+        let locked = match *stream.wtr.get_ref() {
+            WriterInner::NoColor(ref w) => {
+                WriterInnerLock::NoColor(NoColor(w.0.lock()))
+            }
+            WriterInner::Ansi(ref w) => {
+                WriterInnerLock::Ansi(Ansi(w.0.lock()))
+            }
+        };
+        StandardStreamLock { wtr: stream.wtr.wrap(locked) }
+    }
+
+    #[cfg(windows)]
+    fn from_stream(stream: &StandardStream) -> StandardStreamLock {
+        let locked = match *stream.wtr.get_ref() {
+            WriterInner::NoColor(ref w) => {
+                WriterInnerLock::NoColor(NoColor(w.0.lock()))
+            }
+            WriterInner::Ansi(ref w) => {
+                WriterInnerLock::Ansi(Ansi(w.0.lock()))
+            }
+            #[cfg(windows)]
+            WriterInner::Windows { ref wtr, ref console } => {
+                WriterInnerLock::Windows {
+                    wtr: wtr.lock(),
+                    console: console.lock().unwrap(),
+                }
+            }
+        };
+        StandardStreamLock { wtr: stream.wtr.wrap(locked) }
+    }
+}
+
+impl BufferedStandardStream {
+    /// Create a new `BufferedStandardStream` with the given color preferences
+    /// that writes to standard output via a buffered writer.
+    ///
+    /// On Windows, if coloring is desired and a Windows console could not be
+    /// found, then ANSI escape sequences are used instead.
+    ///
+    /// The specific color/style settings can be configured when writing via
+    /// the `WriteColor` trait.
+    pub fn stdout(choice: ColorChoice) -> BufferedStandardStream {
+        let wtr =
+            WriterInner::create(StandardStreamType::StdoutBuffered, choice);
+        BufferedStandardStream { wtr: LossyStandardStream::new(wtr) }
+    }
+
+    /// Create a new `BufferedStandardStream` with the given color preferences
+    /// that writes to standard error via a buffered writer.
+    ///
+    /// On Windows, if coloring is desired and a Windows console could not be
+    /// found, then ANSI escape sequences are used instead.
+    ///
+    /// The specific color/style settings can be configured when writing via
+    /// the `WriteColor` trait.
+    pub fn stderr(choice: ColorChoice) -> BufferedStandardStream {
+        let wtr =
+            WriterInner::create(StandardStreamType::StderrBuffered, choice);
+        BufferedStandardStream { wtr: LossyStandardStream::new(wtr) }
+    }
+}
+
+impl WriterInner<IoStandardStream> {
+    /// Create a new inner writer for a standard stream with the given color
+    /// preferences.
+    #[cfg(not(windows))]
+    fn create(
+        sty: StandardStreamType,
+        choice: ColorChoice,
+    ) -> WriterInner<IoStandardStream> {
+        if choice.should_attempt_color() {
+            WriterInner::Ansi(Ansi(IoStandardStream::new(sty)))
+        } else {
+            WriterInner::NoColor(NoColor(IoStandardStream::new(sty)))
+        }
+    }
+
+    /// Create a new inner writer for a standard stream with the given color
+    /// preferences.
+    ///
+    /// If coloring is desired and a Windows console could not be found, then
+    /// ANSI escape sequences are used instead.
+    #[cfg(windows)]
+    fn create(
+        sty: StandardStreamType,
+        choice: ColorChoice,
+    ) -> WriterInner<IoStandardStream> {
+        let mut con = match sty {
+            StandardStreamType::Stdout => wincon::Console::stdout(),
+            StandardStreamType::Stderr => wincon::Console::stderr(),
+            StandardStreamType::StdoutBuffered => wincon::Console::stdout(),
+            StandardStreamType::StderrBuffered => wincon::Console::stderr(),
+        };
+        let is_console_virtual = con
+            .as_mut()
+            .map(|con| con.set_virtual_terminal_processing(true).is_ok())
+            .unwrap_or(false);
+        if choice.should_attempt_color() {
+            if choice.should_ansi() || is_console_virtual {
+                WriterInner::Ansi(Ansi(IoStandardStream::new(sty)))
+            } else if let Ok(console) = con {
+                WriterInner::Windows {
+                    wtr: IoStandardStream::new(sty),
+                    console: Mutex::new(console),
+                }
+            } else {
+                WriterInner::Ansi(Ansi(IoStandardStream::new(sty)))
+            }
+        } else {
+            WriterInner::NoColor(NoColor(IoStandardStream::new(sty)))
+        }
+    }
+}
+
+impl io::Write for StandardStream {
+    #[inline]
+    fn write(&mut self, b: &[u8]) -> io::Result<usize> {
+        self.wtr.write(b)
+    }
+
+    #[inline]
+    fn flush(&mut self) -> io::Result<()> {
+        self.wtr.flush()
+    }
+}
+
+impl WriteColor for StandardStream {
+    #[inline]
+    fn supports_color(&self) -> bool {
+        self.wtr.supports_color()
+    }
+
+    #[inline]
+    fn supports_hyperlinks(&self) -> bool {
+        self.wtr.supports_hyperlinks()
+    }
+
+    #[inline]
+    fn set_color(&mut self, spec: &ColorSpec) -> io::Result<()> {
+        self.wtr.set_color(spec)
+    }
+
+    #[inline]
+    fn set_hyperlink(&mut self, link: &HyperlinkSpec) -> io::Result<()> {
+        self.wtr.set_hyperlink(link)
+    }
+
+    #[inline]
+    fn reset(&mut self) -> io::Result<()> {
+        self.wtr.reset()
+    }
+
+    #[inline]
+    fn is_synchronous(&self) -> bool {
+        self.wtr.is_synchronous()
+    }
+}
+
+impl<'a> io::Write for StandardStreamLock<'a> {
+    #[inline]
+    fn write(&mut self, b: &[u8]) -> io::Result<usize> {
+        self.wtr.write(b)
+    }
+
+    #[inline]
+    fn flush(&mut self) -> io::Result<()> {
+        self.wtr.flush()
+    }
+}
+
+impl<'a> WriteColor for StandardStreamLock<'a> {
+    #[inline]
+    fn supports_color(&self) -> bool {
+        self.wtr.supports_color()
+    }
+
+    #[inline]
+    fn supports_hyperlinks(&self) -> bool {
+        self.wtr.supports_hyperlinks()
+    }
+
+    #[inline]
+    fn set_color(&mut self, spec: &ColorSpec) -> io::Result<()> {
+        self.wtr.set_color(spec)
+    }
+
+    #[inline]
+    fn set_hyperlink(&mut self, link: &HyperlinkSpec) -> io::Result<()> {
+        self.wtr.set_hyperlink(link)
+    }
+
+    #[inline]
+    fn reset(&mut self) -> io::Result<()> {
+        self.wtr.reset()
+    }
+
+    #[inline]
+    fn is_synchronous(&self) -> bool {
+        self.wtr.is_synchronous()
+    }
+}
+
+impl io::Write for BufferedStandardStream {
+    #[inline]
+    fn write(&mut self, b: &[u8]) -> io::Result<usize> {
+        self.wtr.write(b)
+    }
+
+    #[inline]
+    fn flush(&mut self) -> io::Result<()> {
+        self.wtr.flush()
+    }
+}
+
+impl WriteColor for BufferedStandardStream {
+    #[inline]
+    fn supports_color(&self) -> bool {
+        self.wtr.supports_color()
+    }
+
+    #[inline]
+    fn supports_hyperlinks(&self) -> bool {
+        self.wtr.supports_hyperlinks()
+    }
+
+    #[inline]
+    fn set_color(&mut self, spec: &ColorSpec) -> io::Result<()> {
+        if self.is_synchronous() {
+            self.wtr.flush()?;
+        }
+        self.wtr.set_color(spec)
+    }
+
+    #[inline]
+    fn set_hyperlink(&mut self, link: &HyperlinkSpec) -> io::Result<()> {
+        if self.is_synchronous() {
+            self.wtr.flush()?;
+        }
+        self.wtr.set_hyperlink(link)
+    }
+
+    #[inline]
+    fn reset(&mut self) -> io::Result<()> {
+        self.wtr.reset()
+    }
+
+    #[inline]
+    fn is_synchronous(&self) -> bool {
+        self.wtr.is_synchronous()
+    }
+}
+
+impl<W: io::Write> io::Write for WriterInner<W> {
+    #[inline(always)]
+    fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+        match *self {
+            WriterInner::NoColor(ref mut wtr) => wtr.write(buf),
+            WriterInner::Ansi(ref mut wtr) => wtr.write(buf),
+            #[cfg(windows)]
+            WriterInner::Windows { ref mut wtr, .. } => wtr.write(buf),
+        }
+    }
+
+    #[inline(always)]
+    fn flush(&mut self) -> io::Result<()> {
+        match *self {
+            WriterInner::NoColor(ref mut wtr) => wtr.flush(),
+            WriterInner::Ansi(ref mut wtr) => wtr.flush(),
+            #[cfg(windows)]
+            WriterInner::Windows { ref mut wtr, .. } => wtr.flush(),
+        }
+    }
+}
+
+impl<W: io::Write> WriteColor for WriterInner<W> {
+    fn supports_color(&self) -> bool {
+        match *self {
+            WriterInner::NoColor(_) => false,
+            WriterInner::Ansi(_) => true,
+            #[cfg(windows)]
+            WriterInner::Windows { .. } => true,
+        }
+    }
+
+    fn supports_hyperlinks(&self) -> bool {
+        match *self {
+            WriterInner::NoColor(_) => false,
+            WriterInner::Ansi(_) => true,
+            #[cfg(windows)]
+            WriterInner::Windows { .. } => false,
+        }
+    }
+
+    fn set_color(&mut self, spec: &ColorSpec) -> io::Result<()> {
+        match *self {
+            WriterInner::NoColor(ref mut wtr) => wtr.set_color(spec),
+            WriterInner::Ansi(ref mut wtr) => wtr.set_color(spec),
+            #[cfg(windows)]
+            WriterInner::Windows { ref mut wtr, ref console } => {
+                wtr.flush()?;
+                let mut console = console.lock().unwrap();
+                spec.write_console(&mut *console)
+            }
+        }
+    }
+
+    fn set_hyperlink(&mut self, link: &HyperlinkSpec) -> io::Result<()> {
+        match *self {
+            WriterInner::NoColor(ref mut wtr) => wtr.set_hyperlink(link),
+            WriterInner::Ansi(ref mut wtr) => wtr.set_hyperlink(link),
+            #[cfg(windows)]
+            WriterInner::Windows { .. } => Ok(()),
+        }
+    }
+
+    fn reset(&mut self) -> io::Result<()> {
+        match *self {
+            WriterInner::NoColor(ref mut wtr) => wtr.reset(),
+            WriterInner::Ansi(ref mut wtr) => wtr.reset(),
+            #[cfg(windows)]
+            WriterInner::Windows { ref mut wtr, ref mut console } => {
+                wtr.flush()?;
+                console.lock().unwrap().reset()?;
+                Ok(())
+            }
+        }
+    }
+
+    fn is_synchronous(&self) -> bool {
+        match *self {
+            WriterInner::NoColor(_) => false,
+            WriterInner::Ansi(_) => false,
+            #[cfg(windows)]
+            WriterInner::Windows { .. } => true,
+        }
+    }
+}
+
+impl<'a, W: io::Write> io::Write for WriterInnerLock<'a, W> {
+    fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+        match *self {
+            WriterInnerLock::Unreachable(_) => unreachable!(),
+            WriterInnerLock::NoColor(ref mut wtr) => wtr.write(buf),
+            WriterInnerLock::Ansi(ref mut wtr) => wtr.write(buf),
+            #[cfg(windows)]
+            WriterInnerLock::Windows { ref mut wtr, .. } => wtr.write(buf),
+        }
+    }
+
+    fn flush(&mut self) -> io::Result<()> {
+        match *self {
+            WriterInnerLock::Unreachable(_) => unreachable!(),
+            WriterInnerLock::NoColor(ref mut wtr) => wtr.flush(),
+            WriterInnerLock::Ansi(ref mut wtr) => wtr.flush(),
+            #[cfg(windows)]
+            WriterInnerLock::Windows { ref mut wtr, .. } => wtr.flush(),
+        }
+    }
+}
+
+impl<'a, W: io::Write> WriteColor for WriterInnerLock<'a, W> {
+    fn supports_color(&self) -> bool {
+        match *self {
+            WriterInnerLock::Unreachable(_) => unreachable!(),
+            WriterInnerLock::NoColor(_) => false,
+            WriterInnerLock::Ansi(_) => true,
+            #[cfg(windows)]
+            WriterInnerLock::Windows { .. } => true,
+        }
+    }
+
+    fn supports_hyperlinks(&self) -> bool {
+        match *self {
+            WriterInnerLock::Unreachable(_) => unreachable!(),
+            WriterInnerLock::NoColor(_) => false,
+            WriterInnerLock::Ansi(_) => true,
+            #[cfg(windows)]
+            WriterInnerLock::Windows { .. } => false,
+        }
+    }
+
+    fn set_color(&mut self, spec: &ColorSpec) -> io::Result<()> {
+        match *self {
+            WriterInnerLock::Unreachable(_) => unreachable!(),
+            WriterInnerLock::NoColor(ref mut wtr) => wtr.set_color(spec),
+            WriterInnerLock::Ansi(ref mut wtr) => wtr.set_color(spec),
+            #[cfg(windows)]
+            WriterInnerLock::Windows { ref mut wtr, ref mut console } => {
+                wtr.flush()?;
+                spec.write_console(console)
+            }
+        }
+    }
+
+    fn set_hyperlink(&mut self, link: &HyperlinkSpec) -> io::Result<()> {
+        match *self {
+            WriterInnerLock::Unreachable(_) => unreachable!(),
+            WriterInnerLock::NoColor(ref mut wtr) => wtr.set_hyperlink(link),
+            WriterInnerLock::Ansi(ref mut wtr) => wtr.set_hyperlink(link),
+            #[cfg(windows)]
+            WriterInnerLock::Windows { .. } => Ok(()),
+        }
+    }
+
+    fn reset(&mut self) -> io::Result<()> {
+        match *self {
+            WriterInnerLock::Unreachable(_) => unreachable!(),
+            WriterInnerLock::NoColor(ref mut wtr) => wtr.reset(),
+            WriterInnerLock::Ansi(ref mut wtr) => wtr.reset(),
+            #[cfg(windows)]
+            WriterInnerLock::Windows { ref mut wtr, ref mut console } => {
+                wtr.flush()?;
+                console.reset()?;
+                Ok(())
+            }
+        }
+    }
+
+    fn is_synchronous(&self) -> bool {
+        match *self {
+            WriterInnerLock::Unreachable(_) => unreachable!(),
+            WriterInnerLock::NoColor(_) => false,
+            WriterInnerLock::Ansi(_) => false,
+            #[cfg(windows)]
+            WriterInnerLock::Windows { .. } => true,
+        }
+    }
+}
+
+/// Writes colored buffers to stdout or stderr.
+///
+/// Writable buffers can be obtained by calling `buffer` on a `BufferWriter`.
+///
+/// This writer works with terminals that support ANSI escape sequences or
+/// with a Windows console.
+///
+/// It is intended for a `BufferWriter` to be put in an `Arc` and written to
+/// from multiple threads simultaneously.
+#[derive(Debug)]
+pub struct BufferWriter {
+    stream: LossyStandardStream<IoStandardStream>,
+    printed: AtomicBool,
+    separator: Option<Vec<u8>>,
+    color_choice: ColorChoice,
+    #[cfg(windows)]
+    console: Option<Mutex<wincon::Console>>,
+}
+
+impl BufferWriter {
+    /// Create a new `BufferWriter` that writes to a standard stream with the
+    /// given color preferences.
+    ///
+    /// The specific color/style settings can be configured when writing to
+    /// the buffers themselves.
+    #[cfg(not(windows))]
+    fn create(sty: StandardStreamType, choice: ColorChoice) -> BufferWriter {
+        BufferWriter {
+            stream: LossyStandardStream::new(IoStandardStream::new(sty)),
+            printed: AtomicBool::new(false),
+            separator: None,
+            color_choice: choice,
+        }
+    }
+
+    /// Create a new `BufferWriter` that writes to a standard stream with the
+    /// given color preferences.
+    ///
+    /// If coloring is desired and a Windows console could not be found, then
+    /// ANSI escape sequences are used instead.
+    ///
+    /// The specific color/style settings can be configured when writing to
+    /// the buffers themselves.
+    #[cfg(windows)]
+    fn create(sty: StandardStreamType, choice: ColorChoice) -> BufferWriter {
+        let mut con = match sty {
+            StandardStreamType::Stdout => wincon::Console::stdout(),
+            StandardStreamType::Stderr => wincon::Console::stderr(),
+            StandardStreamType::StdoutBuffered => wincon::Console::stdout(),
+            StandardStreamType::StderrBuffered => wincon::Console::stderr(),
+        }
+        .ok();
+        let is_console_virtual = con
+            .as_mut()
+            .map(|con| con.set_virtual_terminal_processing(true).is_ok())
+            .unwrap_or(false);
+        // If we can enable ANSI on Windows, then we don't need the console
+        // anymore.
+        if is_console_virtual {
+            con = None;
+        }
+        let stream = LossyStandardStream::new(IoStandardStream::new(sty));
+        BufferWriter {
+            stream,
+            printed: AtomicBool::new(false),
+            separator: None,
+            color_choice: choice,
+            console: con.map(Mutex::new),
+        }
+    }
+
+    /// Create a new `BufferWriter` that writes to stdout with the given
+    /// color preferences.
+    ///
+    /// On Windows, if coloring is desired and a Windows console could not be
+    /// found, then ANSI escape sequences are used instead.
+    ///
+    /// The specific color/style settings can be configured when writing to
+    /// the buffers themselves.
+    pub fn stdout(choice: ColorChoice) -> BufferWriter {
+        BufferWriter::create(StandardStreamType::Stdout, choice)
+    }
+
+    /// Create a new `BufferWriter` that writes to stderr with the given
+    /// color preferences.
+    ///
+    /// On Windows, if coloring is desired and a Windows console could not be
+    /// found, then ANSI escape sequences are used instead.
+    ///
+    /// The specific color/style settings can be configured when writing to
+    /// the buffers themselves.
+    pub fn stderr(choice: ColorChoice) -> BufferWriter {
+        BufferWriter::create(StandardStreamType::Stderr, choice)
+    }
+
+    /// If set, the separator given is printed between buffers. By default, no
+    /// separator is printed.
+    ///
+    /// The default value is `None`.
+    pub fn separator(&mut self, sep: Option<Vec<u8>>) {
+        self.separator = sep;
+    }
+
+    /// Creates a new `Buffer` with the current color preferences.
+    ///
+    /// A `Buffer` satisfies both `io::Write` and `WriteColor`. A `Buffer` can
+    /// be printed using the `print` method.
+    #[cfg(not(windows))]
+    pub fn buffer(&self) -> Buffer {
+        Buffer::new(self.color_choice)
+    }
+
+    /// Creates a new `Buffer` with the current color preferences.
+    ///
+    /// A `Buffer` satisfies both `io::Write` and `WriteColor`. A `Buffer` can
+    /// be printed using the `print` method.
+    #[cfg(windows)]
+    pub fn buffer(&self) -> Buffer {
+        Buffer::new(self.color_choice, self.console.is_some())
+    }
+
+    /// Prints the contents of the given buffer.
+    ///
+    /// It is safe to call this from multiple threads simultaneously. In
+    /// particular, all buffers are written atomically. No interleaving will
+    /// occur.
+    pub fn print(&self, buf: &Buffer) -> io::Result<()> {
+        if buf.is_empty() {
+            return Ok(());
+        }
+        let mut stream = self.stream.wrap(self.stream.get_ref().lock());
+        if let Some(ref sep) = self.separator {
+            if self.printed.load(Ordering::Relaxed) {
+                stream.write_all(sep)?;
+                stream.write_all(b"\n")?;
+            }
+        }
+        match buf.0 {
+            BufferInner::NoColor(ref b) => stream.write_all(&b.0)?,
+            BufferInner::Ansi(ref b) => stream.write_all(&b.0)?,
+            #[cfg(windows)]
+            BufferInner::Windows(ref b) => {
+                // We guarantee by construction that we have a console here.
+                // Namely, a BufferWriter is the only way to produce a Buffer.
+                let console_mutex = self
+                    .console
+                    .as_ref()
+                    .expect("got Windows buffer but have no Console");
+                let mut console = console_mutex.lock().unwrap();
+                b.print(&mut *console, &mut stream)?;
+            }
+        }
+        self.printed.store(true, Ordering::Relaxed);
+        Ok(())
+    }
+}
+
+/// Write colored text to memory.
+///
+/// `Buffer` is a platform independent abstraction for printing colored text to
+/// an in memory buffer. When the buffer is printed using a `BufferWriter`, the
+/// color information will be applied to the output device (a tty on Unix and a
+/// console on Windows).
+///
+/// A `Buffer` is typically created by calling the `BufferWriter.buffer`
+/// method, which will take color preferences and the environment into
+/// account. However, buffers can also be manually created using `no_color`,
+/// `ansi` or `console` (on Windows).
+#[derive(Clone, Debug)]
+pub struct Buffer(BufferInner);
+
+/// BufferInner is an enumeration of different buffer types.
+#[derive(Clone, Debug)]
+enum BufferInner {
+    /// No coloring information should be applied. This ignores all coloring
+    /// directives.
+    NoColor(NoColor<Vec<u8>>),
+    /// Apply coloring using ANSI escape sequences embedded into the buffer.
+    Ansi(Ansi<Vec<u8>>),
+    /// Apply coloring using the Windows console APIs. This buffer saves
+    /// color information in memory and only interacts with the console when
+    /// the buffer is printed.
+    #[cfg(windows)]
+    Windows(WindowsBuffer),
+}
+
+impl Buffer {
+    /// Create a new buffer with the given color settings.
+    #[cfg(not(windows))]
+    fn new(choice: ColorChoice) -> Buffer {
+        if choice.should_attempt_color() {
+            Buffer::ansi()
+        } else {
+            Buffer::no_color()
+        }
+    }
+
+    /// Create a new buffer with the given color settings.
+    ///
+    /// On Windows, one can elect to create a buffer capable of being written
+    /// to a console. Only enable it if a console is available.
+    ///
+    /// If coloring is desired and `console` is false, then ANSI escape
+    /// sequences are used instead.
+    #[cfg(windows)]
+    fn new(choice: ColorChoice, console: bool) -> Buffer {
+        if choice.should_attempt_color() {
+            if !console || choice.should_ansi() {
+                Buffer::ansi()
+            } else {
+                Buffer::console()
+            }
+        } else {
+            Buffer::no_color()
+        }
+    }
+
+    /// Create a buffer that drops all color information.
+    pub fn no_color() -> Buffer {
+        Buffer(BufferInner::NoColor(NoColor(vec![])))
+    }
+
+    /// Create a buffer that uses ANSI escape sequences.
+    pub fn ansi() -> Buffer {
+        Buffer(BufferInner::Ansi(Ansi(vec![])))
+    }
+
+    /// Create a buffer that can be written to a Windows console.
+    #[cfg(windows)]
+    pub fn console() -> Buffer {
+        Buffer(BufferInner::Windows(WindowsBuffer::new()))
+    }
+
+    /// Returns true if and only if this buffer is empty.
+    pub fn is_empty(&self) -> bool {
+        self.len() == 0
+    }
+
+    /// Returns the length of this buffer in bytes.
+    pub fn len(&self) -> usize {
+        match self.0 {
+            BufferInner::NoColor(ref b) => b.0.len(),
+            BufferInner::Ansi(ref b) => b.0.len(),
+            #[cfg(windows)]
+            BufferInner::Windows(ref b) => b.buf.len(),
+        }
+    }
+
+    /// Clears this buffer.
+    pub fn clear(&mut self) {
+        match self.0 {
+            BufferInner::NoColor(ref mut b) => b.0.clear(),
+            BufferInner::Ansi(ref mut b) => b.0.clear(),
+            #[cfg(windows)]
+            BufferInner::Windows(ref mut b) => b.clear(),
+        }
+    }
+
+    /// Consume this buffer and return the underlying raw data.
+    ///
+    /// On Windows, this unrecoverably drops all color information associated
+    /// with the buffer.
+    pub fn into_inner(self) -> Vec<u8> {
+        match self.0 {
+            BufferInner::NoColor(b) => b.0,
+            BufferInner::Ansi(b) => b.0,
+            #[cfg(windows)]
+            BufferInner::Windows(b) => b.buf,
+        }
+    }
+
+    /// Return the underlying data of the buffer.
+    pub fn as_slice(&self) -> &[u8] {
+        match self.0 {
+            BufferInner::NoColor(ref b) => &b.0,
+            BufferInner::Ansi(ref b) => &b.0,
+            #[cfg(windows)]
+            BufferInner::Windows(ref b) => &b.buf,
+        }
+    }
+
+    /// Return the underlying data of the buffer as a mutable slice.
+    pub fn as_mut_slice(&mut self) -> &mut [u8] {
+        match self.0 {
+            BufferInner::NoColor(ref mut b) => &mut b.0,
+            BufferInner::Ansi(ref mut b) => &mut b.0,
+            #[cfg(windows)]
+            BufferInner::Windows(ref mut b) => &mut b.buf,
+        }
+    }
+}
+
+impl io::Write for Buffer {
+    #[inline]
+    fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+        match self.0 {
+            BufferInner::NoColor(ref mut w) => w.write(buf),
+            BufferInner::Ansi(ref mut w) => w.write(buf),
+            #[cfg(windows)]
+            BufferInner::Windows(ref mut w) => w.write(buf),
+        }
+    }
+
+    #[inline]
+    fn flush(&mut self) -> io::Result<()> {
+        match self.0 {
+            BufferInner::NoColor(ref mut w) => w.flush(),
+            BufferInner::Ansi(ref mut w) => w.flush(),
+            #[cfg(windows)]
+            BufferInner::Windows(ref mut w) => w.flush(),
+        }
+    }
+}
+
+impl WriteColor for Buffer {
+    #[inline]
+    fn supports_color(&self) -> bool {
+        match self.0 {
+            BufferInner::NoColor(_) => false,
+            BufferInner::Ansi(_) => true,
+            #[cfg(windows)]
+            BufferInner::Windows(_) => true,
+        }
+    }
+
+    #[inline]
+    fn supports_hyperlinks(&self) -> bool {
+        match self.0 {
+            BufferInner::NoColor(_) => false,
+            BufferInner::Ansi(_) => true,
+            #[cfg(windows)]
+            BufferInner::Windows(_) => false,
+        }
+    }
+
+    #[inline]
+    fn set_color(&mut self, spec: &ColorSpec) -> io::Result<()> {
+        match self.0 {
+            BufferInner::NoColor(ref mut w) => w.set_color(spec),
+            BufferInner::Ansi(ref mut w) => w.set_color(spec),
+            #[cfg(windows)]
+            BufferInner::Windows(ref mut w) => w.set_color(spec),
+        }
+    }
+
+    #[inline]
+    fn set_hyperlink(&mut self, link: &HyperlinkSpec) -> io::Result<()> {
+        match self.0 {
+            BufferInner::NoColor(ref mut w) => w.set_hyperlink(link),
+            BufferInner::Ansi(ref mut w) => w.set_hyperlink(link),
+            #[cfg(windows)]
+            BufferInner::Windows(ref mut w) => w.set_hyperlink(link),
+        }
+    }
+
+    #[inline]
+    fn reset(&mut self) -> io::Result<()> {
+        match self.0 {
+            BufferInner::NoColor(ref mut w) => w.reset(),
+            BufferInner::Ansi(ref mut w) => w.reset(),
+            #[cfg(windows)]
+            BufferInner::Windows(ref mut w) => w.reset(),
+        }
+    }
+
+    #[inline]
+    fn is_synchronous(&self) -> bool {
+        false
+    }
+}
+
+/// Satisfies `WriteColor` but ignores all color options.
+#[derive(Clone, Debug)]
+pub struct NoColor<W>(W);
+
+impl<W: Write> NoColor<W> {
+    /// Create a new writer that satisfies `WriteColor` but drops all color
+    /// information.
+    pub fn new(wtr: W) -> NoColor<W> {
+        NoColor(wtr)
+    }
+
+    /// Consume this `NoColor` value and return the inner writer.
+    pub fn into_inner(self) -> W {
+        self.0
+    }
+
+    /// Return a reference to the inner writer.
+    pub fn get_ref(&self) -> &W {
+        &self.0
+    }
+
+    /// Return a mutable reference to the inner writer.
+    pub fn get_mut(&mut self) -> &mut W {
+        &mut self.0
+    }
+}
+
+impl<W: io::Write> io::Write for NoColor<W> {
+    #[inline]
+    fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+        self.0.write(buf)
+    }
+
+    #[inline]
+    fn flush(&mut self) -> io::Result<()> {
+        self.0.flush()
+    }
+}
+
+impl<W: io::Write> WriteColor for NoColor<W> {
+    #[inline]
+    fn supports_color(&self) -> bool {
+        false
+    }
+
+    #[inline]
+    fn supports_hyperlinks(&self) -> bool {
+        false
+    }
+
+    #[inline]
+    fn set_color(&mut self, _: &ColorSpec) -> io::Result<()> {
+        Ok(())
+    }
+
+    #[inline]
+    fn set_hyperlink(&mut self, _: &HyperlinkSpec) -> io::Result<()> {
+        Ok(())
+    }
+
+    #[inline]
+    fn reset(&mut self) -> io::Result<()> {
+        Ok(())
+    }
+
+    #[inline]
+    fn is_synchronous(&self) -> bool {
+        false
+    }
+}
+
+/// Satisfies `WriteColor` using standard ANSI escape sequences.
+#[derive(Clone, Debug)]
+pub struct Ansi<W>(W);
+
+impl<W: Write> Ansi<W> {
+    /// Create a new writer that satisfies `WriteColor` using standard ANSI
+    /// escape sequences.
+    pub fn new(wtr: W) -> Ansi<W> {
+        Ansi(wtr)
+    }
+
+    /// Consume this `Ansi` value and return the inner writer.
+    pub fn into_inner(self) -> W {
+        self.0
+    }
+
+    /// Return a reference to the inner writer.
+    pub fn get_ref(&self) -> &W {
+        &self.0
+    }
+
+    /// Return a mutable reference to the inner writer.
+    pub fn get_mut(&mut self) -> &mut W {
+        &mut self.0
+    }
+}
+
+impl<W: io::Write> io::Write for Ansi<W> {
+    #[inline]
+    fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+        self.0.write(buf)
+    }
+
+    // Adding this method here is not required because it has a default impl,
+    // but it seems to provide a perf improvement in some cases when using
+    // a `BufWriter` with lots of writes.
+    //
+    // See https://github.com/BurntSushi/termcolor/pull/56 for more details
+    // and a minimized example.
+    #[inline]
+    fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
+        self.0.write_all(buf)
+    }
+
+    #[inline]
+    fn flush(&mut self) -> io::Result<()> {
+        self.0.flush()
+    }
+}
+
+impl<W: io::Write> WriteColor for Ansi<W> {
+    #[inline]
+    fn supports_color(&self) -> bool {
+        true
+    }
+
+    #[inline]
+    fn supports_hyperlinks(&self) -> bool {
+        true
+    }
+
+    #[inline]
+    fn set_color(&mut self, spec: &ColorSpec) -> io::Result<()> {
+        if spec.reset {
+            self.reset()?;
+        }
+        if spec.bold {
+            self.write_str("\x1B[1m")?;
+        }
+        if spec.dimmed {
+            self.write_str("\x1B[2m")?;
+        }
+        if spec.italic {
+            self.write_str("\x1B[3m")?;
+        }
+        if spec.underline {
+            self.write_str("\x1B[4m")?;
+        }
+        if spec.strikethrough {
+            self.write_str("\x1B[9m")?;
+        }
+        if let Some(ref c) = spec.fg_color {
+            self.write_color(true, c, spec.intense)?;
+        }
+        if let Some(ref c) = spec.bg_color {
+            self.write_color(false, c, spec.intense)?;
+        }
+        Ok(())
+    }
+
+    #[inline]
+    fn set_hyperlink(&mut self, link: &HyperlinkSpec) -> io::Result<()> {
+        self.write_str("\x1B]8;;")?;
+        if let Some(uri) = link.uri() {
+            self.write_all(uri)?;
+        }
+        self.write_str("\x1B\\")
+    }
+
+    #[inline]
+    fn reset(&mut self) -> io::Result<()> {
+        self.write_str("\x1B[0m")
+    }
+
+    #[inline]
+    fn is_synchronous(&self) -> bool {
+        false
+    }
+}
+
+impl<W: io::Write> Ansi<W> {
+    fn write_str(&mut self, s: &str) -> io::Result<()> {
+        self.write_all(s.as_bytes())
+    }
+
+    fn write_color(
+        &mut self,
+        fg: bool,
+        c: &Color,
+        intense: bool,
+    ) -> io::Result<()> {
+        macro_rules! write_intense {
+            ($clr:expr) => {
+                if fg {
+                    self.write_str(concat!("\x1B[38;5;", $clr, "m"))
+                } else {
+                    self.write_str(concat!("\x1B[48;5;", $clr, "m"))
+                }
+            };
+        }
+        macro_rules! write_normal {
+            ($clr:expr) => {
+                if fg {
+                    self.write_str(concat!("\x1B[3", $clr, "m"))
+                } else {
+                    self.write_str(concat!("\x1B[4", $clr, "m"))
+                }
+            };
+        }
+        macro_rules! write_var_ansi_code {
+            ($pre:expr, $($code:expr),+) => {{
+                // The loop generates at worst a literal of the form
+                // '255,255,255m' which is 12-bytes.
+                // The largest `pre` expression we currently use is 7 bytes.
+                // This gives us the maximum of 19-bytes for our work buffer.
+                let pre_len = $pre.len();
+                assert!(pre_len <= 7);
+                let mut fmt = [0u8; 19];
+                fmt[..pre_len].copy_from_slice($pre);
+                let mut i = pre_len - 1;
+                $(
+                    let c1: u8 = ($code / 100) % 10;
+                    let c2: u8 = ($code / 10) % 10;
+                    let c3: u8 = $code % 10;
+                    let mut printed = false;
+
+                    if c1 != 0 {
+                        printed = true;
+                        i += 1;
+                        fmt[i] = b'0' + c1;
+                    }
+                    if c2 != 0 || printed {
+                        i += 1;
+                        fmt[i] = b'0' + c2;
+                    }
+                    // If we received a zero value we must still print a value.
+                    i += 1;
+                    fmt[i] = b'0' + c3;
+                    i += 1;
+                    fmt[i] = b';';
+                )+
+
+                fmt[i] = b'm';
+                self.write_all(&fmt[0..i+1])
+            }}
+        }
+        macro_rules! write_custom {
+            ($ansi256:expr) => {
+                if fg {
+                    write_var_ansi_code!(b"\x1B[38;5;", $ansi256)
+                } else {
+                    write_var_ansi_code!(b"\x1B[48;5;", $ansi256)
+                }
+            };
+
+            ($r:expr, $g:expr, $b:expr) => {{
+                if fg {
+                    write_var_ansi_code!(b"\x1B[38;2;", $r, $g, $b)
+                } else {
+                    write_var_ansi_code!(b"\x1B[48;2;", $r, $g, $b)
+                }
+            }};
+        }
+        if intense {
+            match *c {
+                Color::Black => write_intense!("8"),
+                Color::Blue => write_intense!("12"),
+                Color::Green => write_intense!("10"),
+                Color::Red => write_intense!("9"),
+                Color::Cyan => write_intense!("14"),
+                Color::Magenta => write_intense!("13"),
+                Color::Yellow => write_intense!("11"),
+                Color::White => write_intense!("15"),
+                Color::Ansi256(c) => write_custom!(c),
+                Color::Rgb(r, g, b) => write_custom!(r, g, b),
+                Color::__Nonexhaustive => unreachable!(),
+            }
+        } else {
+            match *c {
+                Color::Black => write_normal!("0"),
+                Color::Blue => write_normal!("4"),
+                Color::Green => write_normal!("2"),
+                Color::Red => write_normal!("1"),
+                Color::Cyan => write_normal!("6"),
+                Color::Magenta => write_normal!("5"),
+                Color::Yellow => write_normal!("3"),
+                Color::White => write_normal!("7"),
+                Color::Ansi256(c) => write_custom!(c),
+                Color::Rgb(r, g, b) => write_custom!(r, g, b),
+                Color::__Nonexhaustive => unreachable!(),
+            }
+        }
+    }
+}
+
+impl WriteColor for io::Sink {
+    fn supports_color(&self) -> bool {
+        false
+    }
+
+    fn supports_hyperlinks(&self) -> bool {
+        false
+    }
+
+    fn set_color(&mut self, _: &ColorSpec) -> io::Result<()> {
+        Ok(())
+    }
+
+    fn set_hyperlink(&mut self, _: &HyperlinkSpec) -> io::Result<()> {
+        Ok(())
+    }
+
+    fn reset(&mut self) -> io::Result<()> {
+        Ok(())
+    }
+}
+
+/// An in-memory buffer that provides Windows console coloring.
+///
+/// This doesn't actually communicate with the Windows console. Instead, it
+/// acts like a normal buffer but also saves the color information associated
+/// with positions in the buffer. It is only when the buffer is written to the
+/// console that coloring is actually applied.
+///
+/// This is roughly isomorphic to the ANSI based approach (i.e.,
+/// `Ansi<Vec<u8>>`), except with ANSI, the color information is embedded
+/// directly into the buffer.
+///
+/// Note that there is no way to write something generic like
+/// `WindowsConsole<W: io::Write>` since coloring on Windows is tied
+/// specifically to the console APIs, and therefore can't work on arbitrary
+/// writers.
+#[cfg(windows)]
+#[derive(Clone, Debug)]
+struct WindowsBuffer {
+    /// The actual content that should be printed.
+    buf: Vec<u8>,
+    /// A sequence of position oriented color specifications. Namely, each
+    /// element is a position and a color spec, where the color spec should
+    /// be applied at the position inside of `buf`.
+    ///
+    /// A missing color spec implies the underlying console should be reset.
+    colors: Vec<(usize, Option<ColorSpec>)>,
+}
+
+#[cfg(windows)]
+impl WindowsBuffer {
+    /// Create a new empty buffer for Windows console coloring.
+    fn new() -> WindowsBuffer {
+        WindowsBuffer { buf: vec![], colors: vec![] }
+    }
+
+    /// Push the given color specification into this buffer.
+    ///
+    /// This has the effect of setting the given color information at the
+    /// current position in the buffer.
+    fn push(&mut self, spec: Option<ColorSpec>) {
+        let pos = self.buf.len();
+        self.colors.push((pos, spec));
+    }
+
+    /// Print the contents to the given stream handle, and use the console
+    /// for coloring.
+    fn print(
+        &self,
+        console: &mut wincon::Console,
+        stream: &mut LossyStandardStream<IoStandardStreamLock>,
+    ) -> io::Result<()> {
+        let mut last = 0;
+        for &(pos, ref spec) in &self.colors {
+            stream.write_all(&self.buf[last..pos])?;
+            stream.flush()?;
+            last = pos;
+            match *spec {
+                None => console.reset()?,
+                Some(ref spec) => spec.write_console(console)?,
+            }
+        }
+        stream.write_all(&self.buf[last..])?;
+        stream.flush()
+    }
+
+    /// Clear the buffer.
+    fn clear(&mut self) {
+        self.buf.clear();
+        self.colors.clear();
+    }
+}
+
+#[cfg(windows)]
+impl io::Write for WindowsBuffer {
+    #[inline]
+    fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+        self.buf.extend_from_slice(buf);
+        Ok(buf.len())
+    }
+
+    #[inline]
+    fn flush(&mut self) -> io::Result<()> {
+        Ok(())
+    }
+}
+
+#[cfg(windows)]
+impl WriteColor for WindowsBuffer {
+    #[inline]
+    fn supports_color(&self) -> bool {
+        true
+    }
+
+    #[inline]
+    fn supports_hyperlinks(&self) -> bool {
+        false
+    }
+
+    #[inline]
+    fn set_color(&mut self, spec: &ColorSpec) -> io::Result<()> {
+        self.push(Some(spec.clone()));
+        Ok(())
+    }
+
+    #[inline]
+    fn set_hyperlink(&mut self, _: &HyperlinkSpec) -> io::Result<()> {
+        Ok(())
+    }
+
+    #[inline]
+    fn reset(&mut self) -> io::Result<()> {
+        self.push(None);
+        Ok(())
+    }
+
+    #[inline]
+    fn is_synchronous(&self) -> bool {
+        false
+    }
+}
+
+/// A color specification.
+#[derive(Clone, Debug, Eq, PartialEq)]
+pub struct ColorSpec {
+    fg_color: Option<Color>,
+    bg_color: Option<Color>,
+    bold: bool,
+    intense: bool,
+    underline: bool,
+    dimmed: bool,
+    italic: bool,
+    reset: bool,
+    strikethrough: bool,
+}
+
+impl Default for ColorSpec {
+    fn default() -> ColorSpec {
+        ColorSpec {
+            fg_color: None,
+            bg_color: None,
+            bold: false,
+            intense: false,
+            underline: false,
+            dimmed: false,
+            italic: false,
+            reset: true,
+            strikethrough: false,
+        }
+    }
+}
+
+impl ColorSpec {
+    /// Create a new color specification that has no colors or styles.
+    pub fn new() -> ColorSpec {
+        ColorSpec::default()
+    }
+
+    /// Get the foreground color.
+    pub fn fg(&self) -> Option<&Color> {
+        self.fg_color.as_ref()
+    }
+
+    /// Set the foreground color.
+    pub fn set_fg(&mut self, color: Option<Color>) -> &mut ColorSpec {
+        self.fg_color = color;
+        self
+    }
+
+    /// Get the background color.
+    pub fn bg(&self) -> Option<&Color> {
+        self.bg_color.as_ref()
+    }
+
+    /// Set the background color.
+    pub fn set_bg(&mut self, color: Option<Color>) -> &mut ColorSpec {
+        self.bg_color = color;
+        self
+    }
+
+    /// Get whether this is bold or not.
+    ///
+    /// Note that the bold setting has no effect in a Windows console.
+    pub fn bold(&self) -> bool {
+        self.bold
+    }
+
+    /// Set whether the text is bolded or not.
+    ///
+    /// Note that the bold setting has no effect in a Windows console.
+    pub fn set_bold(&mut self, yes: bool) -> &mut ColorSpec {
+        self.bold = yes;
+        self
+    }
+
+    /// Get whether this is dimmed or not.
+    ///
+    /// Note that the dimmed setting has no effect in a Windows console.
+    pub fn dimmed(&self) -> bool {
+        self.dimmed
+    }
+
+    /// Set whether the text is dimmed or not.
+    ///
+    /// Note that the dimmed setting has no effect in a Windows console.
+    pub fn set_dimmed(&mut self, yes: bool) -> &mut ColorSpec {
+        self.dimmed = yes;
+        self
+    }
+
+    /// Get whether this is italic or not.
+    ///
+    /// Note that the italic setting has no effect in a Windows console.
+    pub fn italic(&self) -> bool {
+        self.italic
+    }
+
+    /// Set whether the text is italicized or not.
+    ///
+    /// Note that the italic setting has no effect in a Windows console.
+    pub fn set_italic(&mut self, yes: bool) -> &mut ColorSpec {
+        self.italic = yes;
+        self
+    }
+
+    /// Get whether this is underline or not.
+    ///
+    /// Note that the underline setting has no effect in a Windows console.
+    pub fn underline(&self) -> bool {
+        self.underline
+    }
+
+    /// Set whether the text is underlined or not.
+    ///
+    /// Note that the underline setting has no effect in a Windows console.
+    pub fn set_underline(&mut self, yes: bool) -> &mut ColorSpec {
+        self.underline = yes;
+        self
+    }
+
+    /// Get whether this is strikethrough or not.
+    ///
+    /// Note that the strikethrough setting has no effect in a Windows console.
+    pub fn strikethrough(&self) -> bool {
+        self.strikethrough
+    }
+
+    /// Set whether the text is strikethrough or not.
+    ///
+    /// Note that the strikethrough setting has no effect in a Windows console.
+    pub fn set_strikethrough(&mut self, yes: bool) -> &mut ColorSpec {
+        self.strikethrough = yes;
+        self
+    }
+
+    /// Get whether reset is enabled or not.
+    ///
+    /// reset is enabled by default. When disabled and using ANSI escape
+    /// sequences, a "reset" code will be emitted every time a `ColorSpec`'s
+    /// settings are applied.
+    ///
+    /// Note that the reset setting has no effect in a Windows console.
+    pub fn reset(&self) -> bool {
+        self.reset
+    }
+
+    /// Set whether to reset the terminal whenever color settings are applied.
+    ///
+    /// reset is enabled by default. When disabled and using ANSI escape
+    /// sequences, a "reset" code will be emitted every time a `ColorSpec`'s
+    /// settings are applied.
+    ///
+    /// Typically this is useful if callers have a requirement to more
+    /// scrupulously manage the exact sequence of escape codes that are emitted
+    /// when using ANSI for colors.
+    ///
+    /// Note that the reset setting has no effect in a Windows console.
+    pub fn set_reset(&mut self, yes: bool) -> &mut ColorSpec {
+        self.reset = yes;
+        self
+    }
+
+    /// Get whether this is intense or not.
+    ///
+    /// On Unix-like systems, this will output the ANSI escape sequence
+    /// that will print a high-intensity version of the color
+    /// specified.
+    ///
+    /// On Windows systems, this will output the ANSI escape sequence
+    /// that will print a brighter version of the color specified.
+    pub fn intense(&self) -> bool {
+        self.intense
+    }
+
+    /// Set whether the text is intense or not.
+    ///
+    /// On Unix-like systems, this will output the ANSI escape sequence
+    /// that will print a high-intensity version of the color
+    /// specified.
+    ///
+    /// On Windows systems, this will output the ANSI escape sequence
+    /// that will print a brighter version of the color specified.
+    pub fn set_intense(&mut self, yes: bool) -> &mut ColorSpec {
+        self.intense = yes;
+        self
+    }
+
+    /// Returns true if this color specification has no colors or styles.
+    pub fn is_none(&self) -> bool {
+        self.fg_color.is_none()
+            && self.bg_color.is_none()
+            && !self.bold
+            && !self.underline
+            && !self.dimmed
+            && !self.italic
+            && !self.intense
+            && !self.strikethrough
+    }
+
+    /// Clears this color specification so that it has no color/style settings.
+    pub fn clear(&mut self) {
+        self.fg_color = None;
+        self.bg_color = None;
+        self.bold = false;
+        self.underline = false;
+        self.intense = false;
+        self.dimmed = false;
+        self.italic = false;
+        self.strikethrough = false;
+    }
+
+    /// Writes this color spec to the given Windows console.
+    #[cfg(windows)]
+    fn write_console(&self, console: &mut wincon::Console) -> io::Result<()> {
+        let fg_color = self.fg_color.and_then(|c| c.to_windows(self.intense));
+        if let Some((intense, color)) = fg_color {
+            console.fg(intense, color)?;
+        }
+        let bg_color = self.bg_color.and_then(|c| c.to_windows(self.intense));
+        if let Some((intense, color)) = bg_color {
+            console.bg(intense, color)?;
+        }
+        Ok(())
+    }
+}
+
+/// The set of available colors for the terminal foreground/background.
+///
+/// The `Ansi256` and `Rgb` colors will only output the correct codes when
+/// paired with the `Ansi` `WriteColor` implementation.
+///
+/// The `Ansi256` and `Rgb` color types are not supported when writing colors
+/// on Windows using the console. If they are used on Windows, then they are
+/// silently ignored and no colors will be emitted.
+///
+/// This set may expand over time.
+///
+/// This type has a `FromStr` impl that can parse colors from their human
+/// readable form. The format is as follows:
+///
+/// 1. Any of the explicitly listed colors in English. They are matched
+///    case insensitively.
+/// 2. A single 8-bit integer, in either decimal or hexadecimal format.
+/// 3. A triple of 8-bit integers separated by a comma, where each integer is
+///    in decimal or hexadecimal format.
+///
+/// Hexadecimal numbers are written with a `0x` prefix.
+#[allow(missing_docs)]
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+pub enum Color {
+    Black,
+    Blue,
+    Green,
+    Red,
+    Cyan,
+    Magenta,
+    Yellow,
+    White,
+    Ansi256(u8),
+    Rgb(u8, u8, u8),
+    #[doc(hidden)]
+    __Nonexhaustive,
+}
+
+impl Color {
+    /// Translate this color to a wincon::Color.
+    #[cfg(windows)]
+    fn to_windows(
+        self,
+        intense: bool,
+    ) -> Option<(wincon::Intense, wincon::Color)> {
+        use wincon::Intense::{No, Yes};
+
+        let color = match self {
+            Color::Black => wincon::Color::Black,
+            Color::Blue => wincon::Color::Blue,
+            Color::Green => wincon::Color::Green,
+            Color::Red => wincon::Color::Red,
+            Color::Cyan => wincon::Color::Cyan,
+            Color::Magenta => wincon::Color::Magenta,
+            Color::Yellow => wincon::Color::Yellow,
+            Color::White => wincon::Color::White,
+            Color::Ansi256(0) => return Some((No, wincon::Color::Black)),
+            Color::Ansi256(1) => return Some((No, wincon::Color::Red)),
+            Color::Ansi256(2) => return Some((No, wincon::Color::Green)),
+            Color::Ansi256(3) => return Some((No, wincon::Color::Yellow)),
+            Color::Ansi256(4) => return Some((No, wincon::Color::Blue)),
+            Color::Ansi256(5) => return Some((No, wincon::Color::Magenta)),
+            Color::Ansi256(6) => return Some((No, wincon::Color::Cyan)),
+            Color::Ansi256(7) => return Some((No, wincon::Color::White)),
+            Color::Ansi256(8) => return Some((Yes, wincon::Color::Black)),
+            Color::Ansi256(9) => return Some((Yes, wincon::Color::Red)),
+            Color::Ansi256(10) => return Some((Yes, wincon::Color::Green)),
+            Color::Ansi256(11) => return Some((Yes, wincon::Color::Yellow)),
+            Color::Ansi256(12) => return Some((Yes, wincon::Color::Blue)),
+            Color::Ansi256(13) => return Some((Yes, wincon::Color::Magenta)),
+            Color::Ansi256(14) => return Some((Yes, wincon::Color::Cyan)),
+            Color::Ansi256(15) => return Some((Yes, wincon::Color::White)),
+            Color::Ansi256(_) => return None,
+            Color::Rgb(_, _, _) => return None,
+            Color::__Nonexhaustive => unreachable!(),
+        };
+        let intense = if intense { Yes } else { No };
+        Some((intense, color))
+    }
+
+    /// Parses a numeric color string, either ANSI or RGB.
+    fn from_str_numeric(s: &str) -> Result<Color, ParseColorError> {
+        // The "ansi256" format is a single number (decimal or hex)
+        // corresponding to one of 256 colors.
+        //
+        // The "rgb" format is a triple of numbers (decimal or hex) delimited
+        // by a comma corresponding to one of 256^3 colors.
+
+        fn parse_number(s: &str) -> Option<u8> {
+            use std::u8;
+
+            if s.starts_with("0x") {
+                u8::from_str_radix(&s[2..], 16).ok()
+            } else {
+                u8::from_str_radix(s, 10).ok()
+            }
+        }
+
+        let codes: Vec<&str> = s.split(',').collect();
+        if codes.len() == 1 {
+            if let Some(n) = parse_number(&codes[0]) {
+                Ok(Color::Ansi256(n))
+            } else {
+                if s.chars().all(|c| c.is_digit(16)) {
+                    Err(ParseColorError {
+                        kind: ParseColorErrorKind::InvalidAnsi256,
+                        given: s.to_string(),
+                    })
+                } else {
+                    Err(ParseColorError {
+                        kind: ParseColorErrorKind::InvalidName,
+                        given: s.to_string(),
+                    })
+                }
+            }
+        } else if codes.len() == 3 {
+            let mut v = vec![];
+            for code in codes {
+                let n = parse_number(code).ok_or_else(|| ParseColorError {
+                    kind: ParseColorErrorKind::InvalidRgb,
+                    given: s.to_string(),
+                })?;
+                v.push(n);
+            }
+            Ok(Color::Rgb(v[0], v[1], v[2]))
+        } else {
+            Err(if s.contains(",") {
+                ParseColorError {
+                    kind: ParseColorErrorKind::InvalidRgb,
+                    given: s.to_string(),
+                }
+            } else {
+                ParseColorError {
+                    kind: ParseColorErrorKind::InvalidName,
+                    given: s.to_string(),
+                }
+            })
+        }
+    }
+}
+
+/// An error from parsing an invalid color specification.
+#[derive(Clone, Debug, Eq, PartialEq)]
+pub struct ParseColorError {
+    kind: ParseColorErrorKind,
+    given: String,
+}
+
+#[derive(Clone, Debug, Eq, PartialEq)]
+enum ParseColorErrorKind {
+    InvalidName,
+    InvalidAnsi256,
+    InvalidRgb,
+}
+
+impl ParseColorError {
+    /// Return the string that couldn't be parsed as a valid color.
+    pub fn invalid(&self) -> &str {
+        &self.given
+    }
+}
+
+impl error::Error for ParseColorError {
+    fn description(&self) -> &str {
+        use self::ParseColorErrorKind::*;
+        match self.kind {
+            InvalidName => "unrecognized color name",
+            InvalidAnsi256 => "invalid ansi256 color number",
+            InvalidRgb => "invalid RGB color triple",
+        }
+    }
+}
+
+impl fmt::Display for ParseColorError {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        use self::ParseColorErrorKind::*;
+        match self.kind {
+            InvalidName => write!(
+                f,
+                "unrecognized color name '{}'. Choose from: \
+                 black, blue, green, red, cyan, magenta, yellow, \
+                 white",
+                self.given
+            ),
+            InvalidAnsi256 => write!(
+                f,
+                "unrecognized ansi256 color number, \
+                 should be '[0-255]' (or a hex number), but is '{}'",
+                self.given
+            ),
+            InvalidRgb => write!(
+                f,
+                "unrecognized RGB color triple, \
+                 should be '[0-255],[0-255],[0-255]' (or a hex \
+                 triple), but is '{}'",
+                self.given
+            ),
+        }
+    }
+}
+
+impl FromStr for Color {
+    type Err = ParseColorError;
+
+    fn from_str(s: &str) -> Result<Color, ParseColorError> {
+        match &*s.to_lowercase() {
+            "black" => Ok(Color::Black),
+            "blue" => Ok(Color::Blue),
+            "green" => Ok(Color::Green),
+            "red" => Ok(Color::Red),
+            "cyan" => Ok(Color::Cyan),
+            "magenta" => Ok(Color::Magenta),
+            "yellow" => Ok(Color::Yellow),
+            "white" => Ok(Color::White),
+            _ => Color::from_str_numeric(s),
+        }
+    }
+}
+
+/// A hyperlink specification.
+#[derive(Clone, Debug)]
+pub struct HyperlinkSpec<'a> {
+    uri: Option<&'a [u8]>,
+}
+
+impl<'a> HyperlinkSpec<'a> {
+    /// Creates a new hyperlink specification.
+    pub fn open(uri: &'a [u8]) -> HyperlinkSpec<'a> {
+        HyperlinkSpec { uri: Some(uri) }
+    }
+
+    /// Creates a hyperlink specification representing no hyperlink.
+    pub fn close() -> HyperlinkSpec<'a> {
+        HyperlinkSpec { uri: None }
+    }
+
+    /// Returns the URI of the hyperlink if one is attached to this spec.
+    pub fn uri(&self) -> Option<&'a [u8]> {
+        self.uri
+    }
+}
+
+#[derive(Debug)]
+struct LossyStandardStream<W> {
+    wtr: W,
+    #[cfg(windows)]
+    is_console: bool,
+}
+
+impl<W: io::Write> LossyStandardStream<W> {
+    #[cfg(not(windows))]
+    fn new(wtr: W) -> LossyStandardStream<W> {
+        LossyStandardStream { wtr }
+    }
+
+    #[cfg(windows)]
+    fn new(wtr: W) -> LossyStandardStream<W> {
+        let is_console = wincon::Console::stdout().is_ok()
+            || wincon::Console::stderr().is_ok();
+        LossyStandardStream { wtr, is_console }
+    }
+
+    #[cfg(not(windows))]
+    fn wrap<Q: io::Write>(&self, wtr: Q) -> LossyStandardStream<Q> {
+        LossyStandardStream::new(wtr)
+    }
+
+    #[cfg(windows)]
+    fn wrap<Q: io::Write>(&self, wtr: Q) -> LossyStandardStream<Q> {
+        LossyStandardStream { wtr, is_console: self.is_console }
+    }
+
+    fn get_ref(&self) -> &W {
+        &self.wtr
+    }
+}
+
+impl<W: WriteColor> WriteColor for LossyStandardStream<W> {
+    fn supports_color(&self) -> bool {
+        self.wtr.supports_color()
+    }
+    fn supports_hyperlinks(&self) -> bool {
+        self.wtr.supports_hyperlinks()
+    }
+    fn set_color(&mut self, spec: &ColorSpec) -> io::Result<()> {
+        self.wtr.set_color(spec)
+    }
+    fn set_hyperlink(&mut self, link: &HyperlinkSpec) -> io::Result<()> {
+        self.wtr.set_hyperlink(link)
+    }
+    fn reset(&mut self) -> io::Result<()> {
+        self.wtr.reset()
+    }
+    fn is_synchronous(&self) -> bool {
+        self.wtr.is_synchronous()
+    }
+}
+
+impl<W: io::Write> io::Write for LossyStandardStream<W> {
+    #[cfg(not(windows))]
+    fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+        self.wtr.write(buf)
+    }
+
+    #[cfg(windows)]
+    fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+        if self.is_console {
+            write_lossy_utf8(&mut self.wtr, buf)
+        } else {
+            self.wtr.write(buf)
+        }
+    }
+
+    fn flush(&mut self) -> io::Result<()> {
+        self.wtr.flush()
+    }
+}
+
+#[cfg(windows)]
+fn write_lossy_utf8<W: io::Write>(mut w: W, buf: &[u8]) -> io::Result<usize> {
+    match ::std::str::from_utf8(buf) {
+        Ok(s) => w.write(s.as_bytes()),
+        Err(ref e) if e.valid_up_to() == 0 => {
+            w.write(b"\xEF\xBF\xBD")?;
+            Ok(1)
+        }
+        Err(e) => w.write(&buf[..e.valid_up_to()]),
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::{
+        Ansi, Color, ColorSpec, HyperlinkSpec, ParseColorError,
+        ParseColorErrorKind, StandardStream, WriteColor,
+    };
+
+    fn assert_is_send<T: Send>() {}
+
+    #[test]
+    fn standard_stream_is_send() {
+        assert_is_send::<StandardStream>();
+    }
+
+    #[test]
+    fn test_simple_parse_ok() {
+        let color = "green".parse::<Color>();
+        assert_eq!(color, Ok(Color::Green));
+    }
+
+    #[test]
+    fn test_256_parse_ok() {
+        let color = "7".parse::<Color>();
+        assert_eq!(color, Ok(Color::Ansi256(7)));
+
+        let color = "32".parse::<Color>();
+        assert_eq!(color, Ok(Color::Ansi256(32)));
+
+        let color = "0xFF".parse::<Color>();
+        assert_eq!(color, Ok(Color::Ansi256(0xFF)));
+    }
+
+    #[test]
+    fn test_256_parse_err_out_of_range() {
+        let color = "256".parse::<Color>();
+        assert_eq!(
+            color,
+            Err(ParseColorError {
+                kind: ParseColorErrorKind::InvalidAnsi256,
+                given: "256".to_string(),
+            })
+        );
+    }
+
+    #[test]
+    fn test_rgb_parse_ok() {
+        let color = "0,0,0".parse::<Color>();
+        assert_eq!(color, Ok(Color::Rgb(0, 0, 0)));
+
+        let color = "0,128,255".parse::<Color>();
+        assert_eq!(color, Ok(Color::Rgb(0, 128, 255)));
+
+        let color = "0x0,0x0,0x0".parse::<Color>();
+        assert_eq!(color, Ok(Color::Rgb(0, 0, 0)));
+
+        let color = "0x33,0x66,0xFF".parse::<Color>();
+        assert_eq!(color, Ok(Color::Rgb(0x33, 0x66, 0xFF)));
+    }
+
+    #[test]
+    fn test_rgb_parse_err_out_of_range() {
+        let color = "0,0,256".parse::<Color>();
+        assert_eq!(
+            color,
+            Err(ParseColorError {
+                kind: ParseColorErrorKind::InvalidRgb,
+                given: "0,0,256".to_string(),
+            })
+        );
+    }
+
+    #[test]
+    fn test_rgb_parse_err_bad_format() {
+        let color = "0,0".parse::<Color>();
+        assert_eq!(
+            color,
+            Err(ParseColorError {
+                kind: ParseColorErrorKind::InvalidRgb,
+                given: "0,0".to_string(),
+            })
+        );
+
+        let color = "not_a_color".parse::<Color>();
+        assert_eq!(
+            color,
+            Err(ParseColorError {
+                kind: ParseColorErrorKind::InvalidName,
+                given: "not_a_color".to_string(),
+            })
+        );
+    }
+
+    #[test]
+    fn test_var_ansi_write_rgb() {
+        let mut buf = Ansi::new(vec![]);
+        let _ = buf.write_color(true, &Color::Rgb(254, 253, 255), false);
+        assert_eq!(buf.0, b"\x1B[38;2;254;253;255m");
+    }
+
+    #[test]
+    fn test_reset() {
+        let spec = ColorSpec::new();
+        let mut buf = Ansi::new(vec![]);
+        buf.set_color(&spec).unwrap();
+        assert_eq!(buf.0, b"\x1B[0m");
+    }
+
+    #[test]
+    fn test_no_reset() {
+        let mut spec = ColorSpec::new();
+        spec.set_reset(false);
+
+        let mut buf = Ansi::new(vec![]);
+        buf.set_color(&spec).unwrap();
+        assert_eq!(buf.0, b"");
+    }
+
+    #[test]
+    fn test_var_ansi_write_256() {
+        let mut buf = Ansi::new(vec![]);
+        let _ = buf.write_color(false, &Color::Ansi256(7), false);
+        assert_eq!(buf.0, b"\x1B[48;5;7m");
+
+        let mut buf = Ansi::new(vec![]);
+        let _ = buf.write_color(false, &Color::Ansi256(208), false);
+        assert_eq!(buf.0, b"\x1B[48;5;208m");
+    }
+
+    fn all_attributes() -> Vec<ColorSpec> {
+        let mut result = vec![];
+        for fg in vec![None, Some(Color::Red)] {
+            for bg in vec![None, Some(Color::Red)] {
+                for bold in vec![false, true] {
+                    for underline in vec![false, true] {
+                        for intense in vec![false, true] {
+                            for italic in vec![false, true] {
+                                for strikethrough in vec![false, true] {
+                                    for dimmed in vec![false, true] {
+                                        let mut color = ColorSpec::new();
+                                        color.set_fg(fg);
+                                        color.set_bg(bg);
+                                        color.set_bold(bold);
+                                        color.set_underline(underline);
+                                        color.set_intense(intense);
+                                        color.set_italic(italic);
+                                        color.set_dimmed(dimmed);
+                                        color.set_strikethrough(strikethrough);
+                                        result.push(color);
+                                    }
+                                }
+                            }
+                        }
+                    }
+                }
+            }
+        }
+        result
+    }
+
+    #[test]
+    fn test_is_none() {
+        for (i, color) in all_attributes().iter().enumerate() {
+            assert_eq!(
+                i == 0,
+                color.is_none(),
+                "{:?} => {}",
+                color,
+                color.is_none()
+            )
+        }
+    }
+
+    #[test]
+    fn test_clear() {
+        for color in all_attributes() {
+            let mut color1 = color.clone();
+            color1.clear();
+            assert!(color1.is_none(), "{:?} => {:?}", color, color1);
+        }
+    }
+
+    #[test]
+    fn test_ansi_hyperlink() {
+        let mut buf = Ansi::new(vec![]);
+        buf.set_hyperlink(&HyperlinkSpec::open(b"https://example.com"))
+            .unwrap();
+        buf.write_str("label").unwrap();
+        buf.set_hyperlink(&HyperlinkSpec::close()).unwrap();
+
+        assert_eq!(
+            buf.0,
+            b"\x1B]8;;https://example.com\x1B\\label\x1B]8;;\x1B\\".to_vec()
+        );
+    }
+}
diff --git a/crates/termtree/.cargo-checksum.json b/crates/termtree/.cargo-checksum.json
new file mode 100644
index 0000000..ff696de
--- /dev/null
+++ b/crates/termtree/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.lock":"da61cce61f9bc2d2a360f7456f20289de1dc5d5f7a6412797b5f8230ff482a6d","Cargo.toml":"08e7f8f9acd2a8148533f340e1dffd42bd0bc19ea11a51075736b27eb53270f5","LICENSE":"ea6e2604012cb26afc148549e63c1e44c1ac2d41817e7370d8d49c43ca9249aa","README.md":"7eac81f10fbebb1df5a57eded07f91baa9092fc36f64aeaa55b0bb69865ea36c","examples/tree.rs":"4872086d9f32d88343d5f24a04cdffeb16b99b944fd504f06ac8173c557f01cf","src/lib.rs":"83b118e0bf4c509d645beda86992ad287b3f9f0241fbd4a6aec095cddc6b9a13","src/tests.rs":"015b65ad98d27f675d56594bee3ba8c1b9865ba5bd86edb09832914f8ee8f176"},"package":"3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76"}
\ No newline at end of file
diff --git a/crates/termtree/Android.bp b/crates/termtree/Android.bp
new file mode 100644
index 0000000..7ce3b87
--- /dev/null
+++ b/crates/termtree/Android.bp
@@ -0,0 +1,45 @@
+// This file is generated by cargo_embargo.
+// Do not modify this file because the changes will be overridden on upgrade.
+
+package {
+    default_applicable_licenses: ["external_rust_crates_termtree_license"],
+    default_team: "trendy_team_android_rust",
+}
+
+license {
+    name: "external_rust_crates_termtree_license",
+    visibility: [":__subpackages__"],
+    license_kinds: ["SPDX-license-identifier-MIT"],
+    license_text: ["LICENSE"],
+}
+
+rust_library {
+    name: "libtermtree",
+    host_supported: true,
+    crate_name: "termtree",
+    cargo_env_compat: true,
+    cargo_pkg_version: "0.4.1",
+    crate_root: "src/lib.rs",
+    edition: "2018",
+    apex_available: [
+        "//apex_available:platform",
+        "//apex_available:anyapex",
+    ],
+    product_available: true,
+    vendor_available: true,
+}
+
+rust_test {
+    name: "termtree_test_src_lib",
+    host_supported: true,
+    crate_name: "termtree",
+    cargo_env_compat: true,
+    cargo_pkg_version: "0.4.1",
+    crate_root: "src/lib.rs",
+    test_suites: ["general-tests"],
+    auto_gen_config: true,
+    test_options: {
+        unit_test: true,
+    },
+    edition: "2018",
+}
diff --git a/crates/termtree/Cargo.lock b/crates/termtree/Cargo.lock
new file mode 100644
index 0000000..69736c1
--- /dev/null
+++ b/crates/termtree/Cargo.lock
@@ -0,0 +1,7 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "termtree"
+version = "0.4.1"
diff --git a/crates/termtree/Cargo.toml b/crates/termtree/Cargo.toml
new file mode 100644
index 0000000..f0e5f2b
--- /dev/null
+++ b/crates/termtree/Cargo.toml
@@ -0,0 +1,72 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2018"
+name = "termtree"
+version = "0.4.1"
+include = [
+    "src/**/*",
+    "Cargo.toml",
+    "LICENSE*",
+    "README.md",
+    "examples/**/*",
+]
+description = "Visualize tree-like data on the command-line"
+homepage = "https://github.com/rust-cli/termtree"
+documentation = "https://docs.rs/termtree"
+readme = "README.md"
+keywords = [
+    "cli",
+    "tree",
+    "dag",
+]
+categories = [
+    "command-line-interface",
+    "visualization",
+]
+license = "MIT"
+repository = "https://github.com/rust-cli/termtree"
+
+[[package.metadata.release.pre-release-replacements]]
+file = "CHANGELOG.md"
+search = "Unreleased"
+replace = "{{version}}"
+min = 1
+
+[[package.metadata.release.pre-release-replacements]]
+file = "CHANGELOG.md"
+search = '\.\.\.HEAD'
+replace = "...{{tag_name}}"
+exactly = 1
+
+[[package.metadata.release.pre-release-replacements]]
+file = "CHANGELOG.md"
+search = "ReleaseDate"
+replace = "{{date}}"
+min = 1
+
+[[package.metadata.release.pre-release-replacements]]
+file = "CHANGELOG.md"
+search = "<!-- next-header -->"
+replace = """
+<!-- next-header -->
+## [Unreleased] - ReleaseDate
+"""
+exactly = 1
+
+[[package.metadata.release.pre-release-replacements]]
+file = "CHANGELOG.md"
+search = "<!-- next-url -->"
+replace = """
+<!-- next-url -->
+[Unreleased]: https://github.com/rust-cli/termtree/compare/{{tag_name}}...HEAD"""
+exactly = 1
diff --git a/crates/termtree/LICENSE b/crates/termtree/LICENSE
new file mode 100644
index 0000000..d5fcce5
--- /dev/null
+++ b/crates/termtree/LICENSE
@@ -0,0 +1,20 @@
+Copyright (c) 2017 Doug Tangren
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/crates/termtree/METADATA b/crates/termtree/METADATA
new file mode 100644
index 0000000..8bc6904
--- /dev/null
+++ b/crates/termtree/METADATA
@@ -0,0 +1,19 @@
+name: "termtree"
+description: "Visualize tree-like data on the command-line"
+third_party {
+  identifier {
+    type: "crates.io"
+    value: "https://crates.io/crates/termtree"
+  }
+  identifier {
+    type: "Archive"
+    value: "https://static.crates.io/crates/termtree/termtree-0.4.1.crate"
+  }
+  version: "0.4.1"
+  license_type: NOTICE
+  last_upgrade_date {
+    year: 2023
+    month: 11
+    day: 6
+  }
+}
diff --git a/crates/termtree/MODULE_LICENSE_MIT b/crates/termtree/MODULE_LICENSE_MIT
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/crates/termtree/MODULE_LICENSE_MIT
diff --git a/crates/termtree/README.md b/crates/termtree/README.md
new file mode 100644
index 0000000..71d4594
--- /dev/null
+++ b/crates/termtree/README.md
@@ -0,0 +1,51 @@
+# termtree [![Main](https://github.com/rust-cli/termtree/actions/workflows/main.yml/badge.svg)](https://github.com/rust-cli/termtree/actions/workflows/main.yml)
+
+> Visualize tree-like data on the command-line
+
+[API documentation](https://docs.rs/termtree)
+
+## Example
+
+An example program is provided under the "examples" directory to mimic the `tree(1)`
+linux program
+
+```bash
+$ cargo run --example tree target
+    Finished debug [unoptimized + debuginfo] target(s) in 0.0 secs
+     Running `target/debug/examples/tree target`
+target
+└── debug
+    ├── .cargo-lock
+    ├── .fingerprint
+    |   └── termtree-21a5bdbd42e0b6da
+    |       ├── dep-example-tree
+    |       ├── dep-lib-termtree
+    |       ├── example-tree
+    |       ├── example-tree.json
+    |       ├── lib-termtree
+    |       └── lib-termtree.json
+    ├── build
+    ├── deps
+    |   └── libtermtree.rlib
+    ├── examples
+    |   ├── tree
+    |   └── tree.dSYM
+    |       └── Contents
+    |           ├── Info.plist
+    |           └── Resources
+    |               └── DWARF
+    |                   └── tree
+    ├── libtermtree.rlib
+    └── native
+```
+
+## Related Crates
+
+- [`treeline`](https://crates.io/crates/treeline): termtree was forked from this.
+- [`tree_decorator`](https://crates.io/crates/tree_decorator)
+- [`xtree`](https://crates.io/crates/xtree)
+- [`ptree`](https://crates.io/crates/ptree)
+
+## License
+
+Licensed under MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
diff --git a/crates/termtree/cargo_embargo.json b/crates/termtree/cargo_embargo.json
new file mode 100644
index 0000000..d40889a
--- /dev/null
+++ b/crates/termtree/cargo_embargo.json
@@ -0,0 +1,4 @@
+{
+    "run_cargo": false,
+    "tests": true
+}
diff --git a/crates/termtree/examples/tree.rs b/crates/termtree/examples/tree.rs
new file mode 100644
index 0000000..0a9bca3
--- /dev/null
+++ b/crates/termtree/examples/tree.rs
@@ -0,0 +1,32 @@
+use termtree::Tree;
+
+use std::path::Path;
+use std::{env, fs, io};
+
+fn label<P: AsRef<Path>>(p: P) -> String {
+    p.as_ref().file_name().unwrap().to_str().unwrap().to_owned()
+}
+
+fn tree<P: AsRef<Path>>(p: P) -> io::Result<Tree<String>> {
+    let result = fs::read_dir(&p)?.filter_map(|e| e.ok()).fold(
+        Tree::new(label(p.as_ref().canonicalize()?)),
+        |mut root, entry| {
+            let dir = entry.metadata().unwrap();
+            if dir.is_dir() {
+                root.push(tree(entry.path()).unwrap());
+            } else {
+                root.push(Tree::new(label(entry.path())));
+            }
+            root
+        },
+    );
+    Ok(result)
+}
+
+fn main() {
+    let dir = env::args().nth(1).unwrap_or_else(|| String::from("."));
+    match tree(dir) {
+        Ok(tree) => println!("{}", tree),
+        Err(err) => println!("error: {}", err),
+    }
+}
diff --git a/crates/termtree/src/lib.rs b/crates/termtree/src/lib.rs
new file mode 100644
index 0000000..7893d6d
--- /dev/null
+++ b/crates/termtree/src/lib.rs
@@ -0,0 +1,210 @@
+#![allow(clippy::branches_sharing_code)]
+
+#[cfg(test)]
+mod tests;
+
+use std::collections::VecDeque;
+use std::fmt::{self, Display};
+use std::rc::Rc;
+
+/// a simple recursive type which is able to render its
+/// components in a tree-like format
+#[derive(Debug, Clone)]
+pub struct Tree<D: Display> {
+    pub root: D,
+    pub leaves: Vec<Tree<D>>,
+    multiline: bool,
+    glyphs: GlyphPalette,
+}
+
+impl<D: Display> Tree<D> {
+    pub fn new(root: D) -> Self {
+        Tree {
+            root,
+            leaves: Vec::new(),
+            multiline: false,
+            glyphs: GlyphPalette::new(),
+        }
+    }
+
+    pub fn with_leaves(mut self, leaves: impl IntoIterator<Item = impl Into<Tree<D>>>) -> Self {
+        self.leaves = leaves.into_iter().map(Into::into).collect();
+        self
+    }
+
+    /// Ensure all lines for `root` are indented
+    pub fn with_multiline(mut self, yes: bool) -> Self {
+        self.multiline = yes;
+        self
+    }
+
+    /// Customize the rendering of this node
+    pub fn with_glyphs(mut self, glyphs: GlyphPalette) -> Self {
+        self.glyphs = glyphs;
+        self
+    }
+}
+
+impl<D: Display> Tree<D> {
+    /// Ensure all lines for `root` are indented
+    pub fn set_multiline(&mut self, yes: bool) -> &mut Self {
+        self.multiline = yes;
+        self
+    }
+
+    /// Customize the rendering of this node
+    pub fn set_glyphs(&mut self, glyphs: GlyphPalette) -> &mut Self {
+        self.glyphs = glyphs;
+        self
+    }
+}
+
+impl<D: Display> Tree<D> {
+    pub fn push(&mut self, leaf: impl Into<Tree<D>>) -> &mut Self {
+        self.leaves.push(leaf.into());
+        self
+    }
+}
+
+impl<D: Display> From<D> for Tree<D> {
+    fn from(inner: D) -> Self {
+        Self::new(inner)
+    }
+}
+
+impl<D: Display> Extend<D> for Tree<D> {
+    fn extend<T: IntoIterator<Item = D>>(&mut self, iter: T) {
+        self.leaves.extend(iter.into_iter().map(Into::into))
+    }
+}
+
+impl<D: Display> Extend<Tree<D>> for Tree<D> {
+    fn extend<T: IntoIterator<Item = Tree<D>>>(&mut self, iter: T) {
+        self.leaves.extend(iter)
+    }
+}
+
+impl<D: Display> Display for Tree<D> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        self.root.fmt(f)?; // Pass along `f.alternate()`
+        writeln!(f)?;
+        let mut queue = DisplauQueue::new();
+        let no_space = Rc::new(Vec::new());
+        enqueue_leaves(&mut queue, self, no_space);
+        while let Some((last, leaf, spaces)) = queue.pop_front() {
+            let mut prefix = (
+                if last {
+                    leaf.glyphs.last_item
+                } else {
+                    leaf.glyphs.middle_item
+                },
+                leaf.glyphs.item_indent,
+            );
+
+            if leaf.multiline {
+                let rest_prefix = (
+                    if last {
+                        leaf.glyphs.last_skip
+                    } else {
+                        leaf.glyphs.middle_skip
+                    },
+                    leaf.glyphs.skip_indent,
+                );
+                debug_assert_eq!(prefix.0.chars().count(), rest_prefix.0.chars().count());
+                debug_assert_eq!(prefix.1.chars().count(), rest_prefix.1.chars().count());
+
+                let root = if f.alternate() {
+                    format!("{:#}", leaf.root)
+                } else {
+                    format!("{:}", leaf.root)
+                };
+                for line in root.lines() {
+                    // print single line
+                    for s in spaces.as_slice() {
+                        if *s {
+                            self.glyphs.last_skip.fmt(f)?;
+                            self.glyphs.skip_indent.fmt(f)?;
+                        } else {
+                            self.glyphs.middle_skip.fmt(f)?;
+                            self.glyphs.skip_indent.fmt(f)?;
+                        }
+                    }
+                    prefix.0.fmt(f)?;
+                    prefix.1.fmt(f)?;
+                    line.fmt(f)?;
+                    writeln!(f)?;
+                    prefix = rest_prefix;
+                }
+            } else {
+                // print single line
+                for s in spaces.as_slice() {
+                    if *s {
+                        self.glyphs.last_skip.fmt(f)?;
+                        self.glyphs.skip_indent.fmt(f)?;
+                    } else {
+                        self.glyphs.middle_skip.fmt(f)?;
+                        self.glyphs.skip_indent.fmt(f)?;
+                    }
+                }
+                prefix.0.fmt(f)?;
+                prefix.1.fmt(f)?;
+                leaf.root.fmt(f)?; // Pass along `f.alternate()`
+                writeln!(f)?;
+            }
+
+            // recurse
+            if !leaf.leaves.is_empty() {
+                let s: &Vec<bool> = &spaces;
+                let mut child_spaces = s.clone();
+                child_spaces.push(last);
+                let child_spaces = Rc::new(child_spaces);
+                enqueue_leaves(&mut queue, leaf, child_spaces);
+            }
+        }
+        Ok(())
+    }
+}
+
+type DisplauQueue<'t, D> = VecDeque<(bool, &'t Tree<D>, Rc<Vec<bool>>)>;
+
+fn enqueue_leaves<'t, D: Display>(
+    queue: &mut DisplauQueue<'t, D>,
+    parent: &'t Tree<D>,
+    spaces: Rc<Vec<bool>>,
+) {
+    for (i, leaf) in parent.leaves.iter().rev().enumerate() {
+        let last = i == 0;
+        queue.push_front((last, leaf, spaces.clone()));
+    }
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub struct GlyphPalette {
+    pub middle_item: &'static str,
+    pub last_item: &'static str,
+    pub item_indent: &'static str,
+
+    pub middle_skip: &'static str,
+    pub last_skip: &'static str,
+    pub skip_indent: &'static str,
+}
+
+impl GlyphPalette {
+    pub const fn new() -> Self {
+        Self {
+            middle_item: "├",
+            last_item: "└",
+            item_indent: "── ",
+
+            middle_skip: "│",
+            last_skip: " ",
+            skip_indent: "   ",
+        }
+    }
+}
+
+impl Default for GlyphPalette {
+    fn default() -> Self {
+        Self::new()
+    }
+}
diff --git a/crates/termtree/src/tests.rs b/crates/termtree/src/tests.rs
new file mode 100644
index 0000000..4d60cf1
--- /dev/null
+++ b/crates/termtree/src/tests.rs
@@ -0,0 +1,48 @@
+use super::*;
+
+#[test]
+fn render_tree_root() {
+    let tree = Tree::new("foo");
+    assert_eq!(format!("{}", tree), "foo\n")
+}
+
+#[test]
+fn render_tree_with_leaves() {
+    let tree = Tree::new("foo").with_leaves([Tree::new("bar").with_leaves(["baz"])]);
+    assert_eq!(
+        format!("{}", tree),
+        r#"foo
+└── bar
+    └── baz
+"#
+    )
+}
+
+#[test]
+fn render_tree_with_multiple_leaves() {
+    let tree = Tree::new("foo").with_leaves(["bar", "baz"]);
+    assert_eq!(
+        format!("{}", tree),
+        r#"foo
+├── bar
+└── baz
+"#
+    )
+}
+
+#[test]
+fn render_tree_with_multiline_leaf() {
+    let tree = Tree::new("foo").with_leaves([
+        Tree::new("hello\nworld").with_multiline(true),
+        Tree::new("goodbye\nworld").with_multiline(true),
+    ]);
+    assert_eq!(
+        format!("{}", tree),
+        r#"foo
+├── hello
+│   world
+└── goodbye
+    world
+"#
+    )
+}
diff --git a/crates/textwrap/.cargo-checksum.json b/crates/textwrap/.cargo-checksum.json
new file mode 100644
index 0000000..187bfe5
--- /dev/null
+++ b/crates/textwrap/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"CHANGELOG.md":"8831dd3b382369ba55d81ef7536e353940c387a677fdafe10b5e595ac5bfe79e","Cargo.lock":"b400c8a50ad0d96a551f08e2ec6d7c8b0ce5eead99d923f3aa6c870d5afc3f76","Cargo.toml":"1e528c895be9aa55b10b416f33328a36550ecfc99567cde573889f2397c03aad","LICENSE":"ce93600c49fbb3e14df32efe752264644f6a2f8e08a735ba981725799e5309ef","README.md":"18493423b205d5c65482cb9a53c3b27b1527541a40440330e35be2672ed44961","rustfmt.toml":"02637ad90caa19885b25b1ce8230657b3703402775d9db83687a3f55de567509","src/core.rs":"533b1516778553d01b6e3ec5962877b38605545a5041cbfffdd265a93e7de3af","src/fuzzing.rs":"99f44651a324afb0d09f49c11983dec9719c9ff1a986123cdd6046dc7bdb9d24","src/indentation.rs":"f41ee8be41e01620c7d88b76f81a01ce6a619939505eaf3fcfe6c8021fae022b","src/lib.rs":"44b608426f2b9149653c793f82d1ff31f34fa729869ce4a9fe117c604493fcc3","src/line_ending.rs":"bf416f683ab952d4df75d5dc3c199e7ae7740db2c5982ac1a20c3f4b186ded76","src/word_separators.rs":"0d4240dd354c10f66f3f5abb64d73b329a0a87646749244288c5871a96362009","src/word_splitters.rs":"5a3a601414433227aff009d721fa60a94a28a0c7501b54bbbecedda9a2add3ba","src/wrap_algorithms.rs":"754503a00d39965cc8dfb2d854e1b399bbe49260d33bfb2423bdefe8110aca94","src/wrap_algorithms/optimal_fit.rs":"d0b4e2115790b9a145317af60b3ff87e758df82e0a692fe7a78a2b3b445faeef","tests/indent.rs":"51f977db11632a32fafecf86af88413d51238fe6efcf18ec52fac89133714278","tests/version-numbers.rs":"9e964f58dbdf051fc6fe0d6542ab312d3e95f26c3fd14bce84449bb625e45761"},"package":"222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d"}
\ No newline at end of file
diff --git a/crates/textwrap/Android.bp b/crates/textwrap/Android.bp
new file mode 100644
index 0000000..211c924
--- /dev/null
+++ b/crates/textwrap/Android.bp
@@ -0,0 +1,31 @@
+// This file is generated by cargo_embargo.
+// Do not modify this file because the changes will be overridden on upgrade.
+
+package {
+    default_applicable_licenses: ["external_rust_crates_textwrap_license"],
+    default_team: "trendy_team_android_rust",
+}
+
+license {
+    name: "external_rust_crates_textwrap_license",
+    visibility: [":__subpackages__"],
+    license_kinds: ["SPDX-license-identifier-MIT"],
+    license_text: ["LICENSE"],
+}
+
+rust_library {
+    name: "libtextwrap",
+    host_supported: true,
+    crate_name: "textwrap",
+    cargo_env_compat: true,
+    cargo_pkg_version: "0.16.0",
+    crate_root: "src/lib.rs",
+    edition: "2021",
+    apex_available: [
+        "//apex_available:platform",
+        "com.android.compos",
+        "com.android.virt",
+    ],
+    product_available: true,
+    vendor_available: true,
+}
diff --git a/crates/textwrap/CHANGELOG.md b/crates/textwrap/CHANGELOG.md
new file mode 100644
index 0000000..5d0dd61
--- /dev/null
+++ b/crates/textwrap/CHANGELOG.md
@@ -0,0 +1,596 @@
+# Changelog
+
+This file lists the most important changes made in each release of
+`textwrap`.
+
+## Version 0.16.0 (2022-10-23)
+
+This release marks `Options` as `non_exhaustive` and extends it to
+make line endings configurable, it adds new fast paths to `fill` and
+`wrap`, and it fixes crashes in `unfill` and `refill`.
+
+* [#480](https://github.com/mgeisler/textwrap/pull/480): Mark
+  `Options` as `non_exhaustive`. This will allow us to extend the
+  struct in the future without breaking backwards compatibility.
+* [#478](https://github.com/mgeisler/textwrap/pull/478): Add fast
+  paths to `fill` and `wrap`. This makes the functions 10-25 times
+  faster when the no wrapping is needed.
+* [#468](https://github.com/mgeisler/textwrap/pull/468): Fix `refill`
+  to add back correct line ending.
+* [#467](https://github.com/mgeisler/textwrap/pull/467): Fix crashes
+  in `unfill` and `refill`.
+* [#458](https://github.com/mgeisler/textwrap/pull/458): Test with
+  Rust 1.56 (first compiler release with support for Rust 2021).
+* [#454](https://github.com/mgeisler/textwrap/pull/454): Make line
+  endings configurable.
+* [#448](https://github.com/mgeisler/textwrap/pull/448): Migrate to
+  the Rust 2021 edition.
+
+## Version 0.15.1 (2022-09-15)
+
+This release was yanked since it accidentally broke backwards
+compatibility with 0.15.0.
+
+## Version 0.15.0 (2022-02-27)
+
+This is a major feature release with two main changes:
+
+* [#421](https://github.com/mgeisler/textwrap/pull/421): Use `f64`
+  instead of `usize` for fragment widths.
+
+  This fixes problems with overflows in the internal computations of
+  `wrap_optimal_fit` when fragments (words) or line lenghts had
+  extreme values, such as `usize::MAX`.
+
+* [#438](https://github.com/mgeisler/textwrap/pull/438): Simplify
+  `Options` by removing generic type parameters.
+
+  This change removes the new generic parameters introduced in version
+  0.14, as well as the original `WrapSplitter` parameter which has
+  been present since very early versions.
+
+  The result is a simplification of function and struct signatures
+  across the board. So what used to be
+
+  ```rust
+  let options: Options<
+      wrap_algorithms::FirstFit,
+      word_separators::AsciiSpace,
+      word_splitters::HyphenSplitter,
+  > = Options::new(80);
+  ```
+
+  if types are fully written out, is now simply
+
+  ```rust
+  let options: Options<'_> = Options::new(80);
+  ```
+
+  The anonymous lifetime represent the lifetime of the
+  `initial_indent` and `subsequent_indent` strings. The change is
+  nearly performance neutral (a 1-2% regression).
+
+Smaller improvements and changes:
+
+* [#404](https://github.com/mgeisler/textwrap/pull/404): Make
+  documentation for short last-line penalty more precise.
+* [#405](https://github.com/mgeisler/textwrap/pull/405): Cleanup and
+  simplify `Options` docstring.
+* [#411](https://github.com/mgeisler/textwrap/pull/411): Default to
+  `OptimalFit` in interactive example.
+* [#415](https://github.com/mgeisler/textwrap/pull/415): Add demo
+  program to help compute binary sizes.
+* [#423](https://github.com/mgeisler/textwrap/pull/423): Add fuzz
+  tests with fully arbitrary fragments.
+* [#424](https://github.com/mgeisler/textwrap/pull/424): Change
+  `wrap_optimal_fit` penalties to non-negative numbers.
+* [#430](https://github.com/mgeisler/textwrap/pull/430): Add
+  `debug-words` example.
+* [#432](https://github.com/mgeisler/textwrap/pull/432): Use precise
+  dependency versions in Cargo.toml.
+
+## Version 0.14.2 (2021-06-27)
+
+The 0.14.1 release included more changes than intended and has been
+yanked. The change intended for 0.14.1 is now included in 0.14.2.
+
+## Version 0.14.1 (2021-06-26)
+
+This release fixes a panic reported by @Makoto, thanks!
+
+* [#391](https://github.com/mgeisler/textwrap/pull/391): Fix panic in
+  `find_words` due to string access outside of a character boundary.
+
+## Version 0.14.0 (2021-06-05)
+
+This is a major feature release which makes Textwrap more configurable
+and flexible. The high-level API of `textwrap::wrap` and
+`textwrap::fill` remains unchanged, but low-level structs have moved
+around.
+
+The biggest change is the introduction of new generic type parameters
+to the `Options` struct. These parameters lets you statically
+configure the wrapping algorithm, the word separator, and the word
+splitter. If you previously spelled out the full type for `Options`,
+you now need to take the extra type parameters into account. This
+means that
+
+```rust
+let options: Options<HyphenSplitter> = Options::new(80);
+```
+
+changes to
+
+```rust
+let options: Options<
+    wrap_algorithms::FirstFit,
+    word_separators::AsciiSpace,
+    word_splitters::HyphenSplitter,
+> = Options::new(80);
+```
+
+This is quite a mouthful, so we suggest using type inferrence where
+possible. You won’t see any chance if you call `wrap` directly with a
+width or with an `Options` value constructed on the fly. Please open
+an issue if this causes problems for you!
+
+### New `WordSeparator` Trait
+
+* [#332](https://github.com/mgeisler/textwrap/pull/332): Add
+  `WordSeparator` trait to allow customizing how words are found in a
+  line of text. Until now, Textwrap would always assume that words are
+  separated by ASCII space characters. You can now customize this as
+  needed.
+
+* [#313](https://github.com/mgeisler/textwrap/pull/313): Add support
+  for using the Unicode line breaking algorithm to find words. This is
+  done by adding a second implementation of the new `WordSeparator`
+  trait. The implementation uses the unicode-linebreak crate, which is
+  a new optional dependency.
+
+  With this, Textwrap can be used with East-Asian languages such as
+  Chinese or Japanese where there are no spaces between words.
+  Breaking a long sequence of emojis is another example where line
+  breaks might be wanted even if there are no whitespace to be found.
+  Feedback would be appreciated for this feature.
+
+
+### Indent
+
+* [#353](https://github.com/mgeisler/textwrap/pull/353): Trim trailing
+  whitespace from `prefix` in `indent`.
+
+  Before, empty lines would get no prefix added. Now, empty lines have
+  a trimmed prefix added. This little trick makes `indent` much more
+  useful since you can now safely indent with `"# "` without creating
+  trailing whitespace in the output due to the trailing whitespace in
+  your prefix.
+
+* [#354](https://github.com/mgeisler/textwrap/pull/354): Make `indent`
+  about 20% faster by preallocating the output string.
+
+
+### Documentation
+
+* [#308](https://github.com/mgeisler/textwrap/pull/308): Document
+  handling of leading and trailing whitespace when wrapping text.
+
+### WebAssembly Demo
+
+* [#310](https://github.com/mgeisler/textwrap/pull/310): Thanks to
+  WebAssembly, you can now try out Textwrap directly in your browser.
+  Please try it out: https://mgeisler.github.io/textwrap/.
+
+### New Generic Parameters
+
+* [#331](https://github.com/mgeisler/textwrap/pull/331): Remove outer
+  boxing from `Options`.
+
+* [#357](https://github.com/mgeisler/textwrap/pull/357): Replace
+  `core::WrapAlgorithm` enum with a `wrap_algorithms::WrapAlgorithm`
+  trait. This allows for arbitrary wrapping algorithms to be plugged
+  into the library.
+
+* [#358](https://github.com/mgeisler/textwrap/pull/358): Switch
+  wrapping functions to use a slice for `line_widths`.
+
+* [#368](https://github.com/mgeisler/textwrap/pull/368): Move
+  `WordSeparator` and `WordSplitter` traits to separate modules.
+  Before, Textwrap had several top-level structs such as
+  `NoHyphenation` and `HyphenSplitter`. These implementations of
+  `WordSplitter` now lives in a dedicated `word_splitters` module.
+  Similarly, we have a new `word_separators` module for
+  implementations of `WordSeparator`.
+
+* [#369](https://github.com/mgeisler/textwrap/pull/369): Rename
+  `Options::splitter` to `Options::word_splitter` for consistency with
+  the other fields backed by traits.
+
+## Version 0.13.4 (2021-02-23)
+
+This release removes `println!` statements which was left behind in
+`unfill` by mistake.
+
+* [#296](https://github.com/mgeisler/textwrap/pull/296): Improve house
+  building example with more comments.
+* [#297](https://github.com/mgeisler/textwrap/pull/297): Remove debug
+  prints in the new `unfill` function.
+
+## Version 0.13.3 (2021-02-20)
+
+This release contains a bugfix for `indent` and improved handling of
+emojis. We’ve also added a new function for formatting text in columns
+and functions for reformatting already wrapped text.
+
+* [#276](https://github.com/mgeisler/textwrap/pull/276): Extend
+  `core::display_width` to handle emojis when the unicode-width Cargo
+  feature is disabled.
+* [#279](https://github.com/mgeisler/textwrap/pull/279): Make `indent`
+  preserve existing newlines in the input string. Before,
+  `indent("foo", "")` would return `"foo\n"` by mistake. It now
+  returns `"foo"` instead.
+* [#281](https://github.com/mgeisler/textwrap/pull/281): Ensure all
+  `Options` fields have examples.
+* [#282](https://github.com/mgeisler/textwrap/pull/282): Add a
+  `wrap_columns` function.
+* [#294](https://github.com/mgeisler/textwrap/pull/294): Add new
+  `unfill` and `refill` functions.
+
+## Version 0.13.2 (2020-12-30)
+
+This release primarily makes all dependencies optional. This makes it
+possible to slim down textwrap as needed.
+
+* [#254](https://github.com/mgeisler/textwrap/pull/254): `impl
+  WordSplitter` for `Box<T> where T: WordSplitter`.
+* [#255](https://github.com/mgeisler/textwrap/pull/255): Use command
+  line arguments as initial text in interactive example.
+* [#256](https://github.com/mgeisler/textwrap/pull/256): Introduce
+  fuzz tests for `wrap_optimal_fit` and `wrap_first_fit`.
+* [#260](https://github.com/mgeisler/textwrap/pull/260): Make the
+  unicode-width dependency optional.
+* [#261](https://github.com/mgeisler/textwrap/pull/261): Make the
+  smawk dependency optional.
+
+## Version 0.13.1 (2020-12-10)
+
+This is a bugfix release which fixes a regression in 0.13.0. The bug
+meant that colored text was wrapped incorrectly.
+
+* [#245](https://github.com/mgeisler/textwrap/pull/245): Support
+  deleting a word with Ctrl-Backspace in the interactive demo.
+* [#246](https://github.com/mgeisler/textwrap/pull/246): Show build
+  type (debug/release) in interactive demo.
+* [#249](https://github.com/mgeisler/textwrap/pull/249): Correctly
+  compute width while skipping over ANSI escape sequences.
+
+## Version 0.13.0 (2020-12-05)
+
+This is a major release which rewrites the core logic, adds many new
+features, and fixes a couple of bugs. Most programs which use
+`textwrap` stays the same, incompatibilities and upgrade notes are
+given below.
+
+Clone the repository and run the following to explore the new features
+in an interactive demo (Linux only):
+
+```sh
+$ cargo run --example interactive --all-features
+```
+
+### Bug Fixes
+
+#### Rewritten core wrapping algorithm
+
+* [#221](https://github.com/mgeisler/textwrap/pull/221): Reformulate
+  wrapping in terms of words with whitespace and penalties.
+
+The core wrapping algorithm has been completely rewritten. This fixed
+bugs and simplified the code, while also making it possible to use
+`textwrap` outside the context of the terminal.
+
+As part of this, trailing whitespace is now discarded consistently
+from wrapped lines. Before we would inconsistently remove whitespace
+at the end of wrapped lines, except for the last. Leading whitespace
+is still preserved.
+
+### New Features
+
+#### Optimal-fit wrapping
+
+* [#234](https://github.com/mgeisler/textwrap/pull/234): Introduce
+  wrapping using an optimal-fit algorithm.
+
+This release adds support for new wrapping algorithm which finds a
+globally optimal set of line breaks, taking certain penalties into
+account. As an example, the old algorithm would produce
+
+    "To be, or"
+    "not to be:"
+    "that is"
+    "the"
+    "question"
+
+Notice how the fourth line with “the” is very short. The new algorithm
+shortens the previous lines slightly to produce fewer short lines:
+
+    "To be,"
+    "or not to"
+    "be: that"
+    "is the"
+    "question"
+
+Use the new `textwrap::core::WrapAlgorithm` enum to select between the
+new and old algorithm. By default, the new algorithm is used.
+
+The optimal-fit algorithm is inspired by the line breaking algorithm
+used in TeX, described in the 1981 article [_Breaking Paragraphs into
+Lines_](http://www.eprg.org/G53DOC/pdfs/knuth-plass-breaking.pdf) by
+Knuth and Plass.
+
+#### In-place wrapping
+
+* [#226](https://github.com/mgeisler/textwrap/pull/226): Add a
+  `fill_inplace` function.
+
+When the text you want to fill is already a temporary `String`, you
+can now mutate it in-place with `fill_inplace`:
+
+```rust
+let mut greeting = format!("Greetings {}, welcome to the game! You have {} lives left.",
+                           player.name, player.lives);
+fill_inplace(&mut greeting, line_width);
+```
+
+This is faster than calling `fill` and it will reuse the memory
+already allocated for the string.
+
+### Changed Features
+
+#### `Wrapper` is replaced with `Options`
+
+* [#213](https://github.com/mgeisler/textwrap/pull/213): Simplify API
+  with only top-level functions.
+* [#215](https://github.com/mgeisler/textwrap/pull/215): Reintroducing
+  the type parameter on `Options` (previously known as `Wrapper`).
+* [#219](https://github.com/mgeisler/textwrap/pull/219): Allow using
+  trait objects with `fill` & `wrap`.
+* [#227](https://github.com/mgeisler/textwrap/pull/227): Replace
+  `WrapOptions` with `Into<Options>`.
+
+The `Wrapper` struct held the options (line width, indentation, etc)
+for wrapping text. It was also the entry point for actually wrapping
+the text via its methods such as `wrap`, `wrap_iter`,
+`into_wrap_iter`, and `fill` methods.
+
+The struct has been replaced by a simpler `Options` struct which only
+holds options. The `Wrapper` methods are gone, their job has been
+taken over by the top-level `wrap` and `fill` functions. The signature
+of these functions have changed from
+
+```rust
+fn fill(s: &str, width: usize) -> String;
+
+fn wrap(s: &str, width: usize) -> Vec<Cow<'_, str>>;
+```
+
+to the more general
+
+```rust
+fn fill<'a, S, Opt>(text: &str, options: Opt) -> String
+where
+    S: WordSplitter,
+    Opt: Into<Options<'a, S>>;
+
+fn wrap<'a, S, Opt>(text: &str, options: Opt) -> Vec<Cow<'_, str>>
+where
+    S: WordSplitter,
+    Opt: Into<Options<'a, S>>;
+```
+
+The `Into<Options<'a, S>` bound allows you to pass an `usize` (which
+is interpreted as the line width) *and* a full `Options` object. This
+allows the new functions to work like the old, plus you can now fully
+customize the behavior of the wrapping via `Options` when needed.
+
+Code that call `textwrap::wrap` or `textwrap::fill` can remain
+unchanged. Code that calls into `Wrapper::wrap` or `Wrapper::fill`
+will need to be update. This is a mechanical change, please see
+[#213](https://github.com/mgeisler/textwrap/pull/213) for examples.
+
+Thanks to @CryptJar and @Koxiat for their support in the PRs above!
+
+### Removed Features
+
+* The `wrap_iter` and `into_wrap_iter` methods are gone. This means
+  that lazy iteration is no longer supported: you always get all
+  wrapped lines back as a `Vec`. This was done to simplify the code
+  and to support the optimal-fit algorithm.
+
+  The first-fit algorithm could still be implemented in an incremental
+  fashion. Please let us know if this is important to you.
+
+### Other Changes
+
+* [#206](https://github.com/mgeisler/textwrap/pull/206): Change
+  `Wrapper.splitter` from `T: WordSplitter` to `Box<dyn
+  WordSplitter>`.
+* [#216](https://github.com/mgeisler/textwrap/pull/216): Forbid the
+  use of unsafe code.
+
+## Version 0.12.1 (2020-07-03)
+
+This is a bugfix release.
+
+* Fixed [#176][issue-176]: Mention compile-time wrapping by linking to
+  the [`textwrap-macros` crate].
+* Fixed [#193][issue-193]: Wrapping with `break_words(false)` was
+  broken and would cause extra whitespace to be inserted when words
+  were longer than the line width.
+
+## Version 0.12.0 (2020-06-26)
+
+The code has been updated to the [Rust 2018 edition][rust-2018] and
+each new release of `textwrap` will only support the latest stable
+version of Rust. Trying to support older Rust versions is a fool's
+errand: our dependencies keep releasing new patch versions that
+require newer and newer versions of Rust.
+
+The `term_size` feature has been replaced by `terminal_size`. The API
+is unchanged, it is just the name of the Cargo feature that changed.
+
+The `hyphenation` feature now only embeds the hyphenation patterns for
+US-English. This slims down the dependency.
+
+* Fixed [#140][issue-140]: Ignore ANSI escape sequences.
+* Fixed [#158][issue-158]: Unintended wrapping when using external splitter.
+* Fixed [#177][issue-177]: Update examples to the 2018 edition.
+
+## Version 0.11.0 (2018-12-09)
+
+Due to our dependencies bumping their minimum supported version of
+Rust, the minimum version of Rust we test against is now 1.22.0.
+
+* Merged [#141][issue-141]: Fix `dedent` handling of empty lines and
+  trailing newlines. Thanks @bbqsrc!
+* Fixed [#151][issue-151]: Release of version with hyphenation 0.7.
+
+## Version 0.10.0 (2018-04-28)
+
+Due to our dependencies bumping their minimum supported version of
+Rust, the minimum version of Rust we test against is now 1.17.0.
+
+* Fixed [#99][issue-99]: Word broken even though it would fit on line.
+* Fixed [#107][issue-107]: Automatic hyphenation is off by one.
+* Fixed [#122][issue-122]: Take newlines into account when wrapping.
+* Fixed [#129][issue-129]: Panic on string with em-dash.
+
+## Version 0.9.0 (2017-10-05)
+
+The dependency on `term_size` is now optional, and by default this
+feature is not enabled. This is a *breaking change* for users of
+`Wrapper::with_termwidth`. Enable the `term_size` feature to restore
+the old functionality.
+
+Added a regression test for the case where `width` is set to
+`usize::MAX`, thanks @Fraser999! All public structs now implement
+`Debug`, thanks @hcpl!
+
+* Fixed [#101][issue-101]: Make `term_size` an optional dependency.
+
+## Version 0.8.0 (2017-09-04)
+
+The `Wrapper` stuct is now generic over the type of word splitter
+being used. This means less boxing and a nicer API. The
+`Wrapper::word_splitter` method has been removed. This is a *breaking
+API change* if you used the method to change the word splitter.
+
+The `Wrapper` struct has two new methods that will wrap the input text
+lazily: `Wrapper::wrap_iter` and `Wrapper::into_wrap_iter`. Use those
+if you will be iterating over the wrapped lines one by one.
+
+* Fixed [#59][issue-59]: `wrap` could return an iterator. Thanks
+  @hcpl!
+* Fixed [#81][issue-81]: Set `html_root_url`.
+
+## Version 0.7.0 (2017-07-20)
+
+Version 0.7.0 changes the return type of `Wrapper::wrap` from
+`Vec<String>` to `Vec<Cow<'a, str>>`. This means that the output lines
+borrow data from the input string. This is a *breaking API change* if
+you relied on the exact return type of `Wrapper::wrap`. Callers of the
+`textwrap::fill` convenience function will see no breakage.
+
+The above change and other optimizations makes version 0.7.0 roughly
+15-30% faster than version 0.6.0.
+
+The `squeeze_whitespace` option has been removed since it was
+complicating the above optimization. Let us know if this option is
+important for you so we can provide a work around.
+
+* Fixed [#58][issue-58]: Add a "fast_wrap" function.
+* Fixed [#61][issue-61]: Documentation errors.
+
+## Version 0.6.0 (2017-05-22)
+
+Version 0.6.0 adds builder methods to `Wrapper` for easy one-line
+initialization and configuration:
+
+```rust
+let wrapper = Wrapper::new(60).break_words(false);
+```
+
+It also add a new `NoHyphenation` word splitter that will never split
+words, not even at existing hyphens.
+
+* Fixed [#28][issue-28]: Support not squeezing whitespace.
+
+## Version 0.5.0 (2017-05-15)
+
+Version 0.5.0 has *breaking API changes*. However, this only affects
+code using the hyphenation feature. The feature is now optional, so
+you will first need to enable the `hyphenation` feature as described
+above. Afterwards, please change your code from
+```rust
+wrapper.corpus = Some(&corpus);
+```
+to
+```rust
+wrapper.splitter = Box::new(corpus);
+```
+
+Other changes include optimizations, so version 0.5.0 is roughly
+10-15% faster than version 0.4.0.
+
+* Fixed [#19][issue-19]: Add support for finding terminal size.
+* Fixed [#25][issue-25]: Handle words longer than `self.width`.
+* Fixed [#26][issue-26]: Support custom indentation.
+* Fixed [#36][issue-36]: Support building without `hyphenation`.
+* Fixed [#39][issue-39]: Respect non-breaking spaces.
+
+## Version 0.4.0 (2017-01-24)
+
+Documented complexities and tested these via `cargo bench`.
+
+* Fixed [#13][issue-13]: Immediatedly add word if it fits.
+* Fixed [#14][issue-14]: Avoid splitting on initial hyphens.
+
+## Version 0.3.0 (2017-01-07)
+
+Added support for automatic hyphenation.
+
+## Version 0.2.0 (2016-12-28)
+
+Introduced `Wrapper` struct. Added support for wrapping on hyphens.
+
+## Version 0.1.0 (2016-12-17)
+
+First public release with support for wrapping strings on whitespace.
+
+[rust-2018]: https://doc.rust-lang.org/edition-guide/rust-2018/
+[`textwrap-macros` crate]: https://crates.io/crates/textwrap-macros
+
+[issue-13]: https://github.com/mgeisler/textwrap/issues/13
+[issue-14]: https://github.com/mgeisler/textwrap/issues/14
+[issue-19]: https://github.com/mgeisler/textwrap/issues/19
+[issue-25]: https://github.com/mgeisler/textwrap/issues/25
+[issue-26]: https://github.com/mgeisler/textwrap/issues/26
+[issue-28]: https://github.com/mgeisler/textwrap/issues/28
+[issue-36]: https://github.com/mgeisler/textwrap/issues/36
+[issue-39]: https://github.com/mgeisler/textwrap/issues/39
+[issue-58]: https://github.com/mgeisler/textwrap/issues/58
+[issue-59]: https://github.com/mgeisler/textwrap/issues/59
+[issue-61]: https://github.com/mgeisler/textwrap/issues/61
+[issue-81]: https://github.com/mgeisler/textwrap/issues/81
+[issue-99]: https://github.com/mgeisler/textwrap/issues/99
+[issue-101]: https://github.com/mgeisler/textwrap/issues/101
+[issue-107]: https://github.com/mgeisler/textwrap/issues/107
+[issue-122]: https://github.com/mgeisler/textwrap/issues/122
+[issue-129]: https://github.com/mgeisler/textwrap/issues/129
+[issue-140]: https://github.com/mgeisler/textwrap/issues/140
+[issue-141]: https://github.com/mgeisler/textwrap/issues/141
+[issue-151]: https://github.com/mgeisler/textwrap/issues/151
+[issue-158]: https://github.com/mgeisler/textwrap/issues/158
+[issue-176]: https://github.com/mgeisler/textwrap/issues/176
+[issue-177]: https://github.com/mgeisler/textwrap/issues/177
+[issue-193]: https://github.com/mgeisler/textwrap/issues/193
diff --git a/crates/textwrap/Cargo.lock b/crates/textwrap/Cargo.lock
new file mode 100644
index 0000000..414d793
--- /dev/null
+++ b/crates/textwrap/Cargo.lock
@@ -0,0 +1,558 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "ahash"
+version = "0.7.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47"
+dependencies = [
+ "getrandom",
+ "once_cell",
+ "version_check",
+]
+
+[[package]]
+name = "aho-corasick"
+version = "0.7.19"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b4f55bd91a0978cbfd91c457a164bab8b4001c833b7f323132c0a4e1922dd44e"
+dependencies = [
+ "memchr",
+]
+
+[[package]]
+name = "bincode"
+version = "1.3.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad"
+dependencies = [
+ "serde",
+]
+
+[[package]]
+name = "bitflags"
+version = "1.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
+
+[[package]]
+name = "cc"
+version = "1.0.73"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2fff2a6927b3bb87f9595d67196a70493f627687a71d87a0d692242c33f58c11"
+
+[[package]]
+name = "cfg-if"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
+
+[[package]]
+name = "errno"
+version = "0.2.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f639046355ee4f37944e44f60642c6f3a7efa3cf6b78c78a0d989a8ce6c396a1"
+dependencies = [
+ "errno-dragonfly",
+ "libc",
+ "winapi",
+]
+
+[[package]]
+name = "errno-dragonfly"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf"
+dependencies = [
+ "cc",
+ "libc",
+]
+
+[[package]]
+name = "form_urlencoded"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a9c384f161156f5260c24a097c56119f9be8c798586aecc13afbcbe7b7e26bf8"
+dependencies = [
+ "percent-encoding",
+]
+
+[[package]]
+name = "fst"
+version = "0.4.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7ab85b9b05e3978cc9a9cf8fea7f01b494e1a09ed3037e16ba39edc7a29eb61a"
+
+[[package]]
+name = "getrandom"
+version = "0.2.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31"
+dependencies = [
+ "cfg-if",
+ "libc",
+ "wasi",
+]
+
+[[package]]
+name = "hashbrown"
+version = "0.12.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
+dependencies = [
+ "ahash",
+]
+
+[[package]]
+name = "hyphenation"
+version = "0.8.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bcf4dd4c44ae85155502a52c48739c8a48185d1449fff1963cffee63c28a50f0"
+dependencies = [
+ "bincode",
+ "fst",
+ "hyphenation_commons",
+ "pocket-resources",
+ "serde",
+]
+
+[[package]]
+name = "hyphenation_commons"
+version = "0.8.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5febe7a2ade5c7d98eb8b75f946c046b335324b06a14ea0998271504134c05bf"
+dependencies = [
+ "fst",
+ "serde",
+]
+
+[[package]]
+name = "idna"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6"
+dependencies = [
+ "unicode-bidi",
+ "unicode-normalization",
+]
+
+[[package]]
+name = "io-lifetimes"
+version = "0.7.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e6e481ccbe3dea62107216d0d1138bb8ad8e5e5c43009a098bd1990272c497b0"
+
+[[package]]
+name = "libc"
+version = "0.2.135"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "68783febc7782c6c5cb401fbda4de5a9898be1762314da0bb2c10ced61f18b0c"
+
+[[package]]
+name = "linux-raw-sys"
+version = "0.0.46"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d4d2456c373231a208ad294c33dc5bff30051eafd954cd4caae83a712b12854d"
+
+[[package]]
+name = "memchr"
+version = "2.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d"
+
+[[package]]
+name = "numtoa"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b8f8bdf33df195859076e54ab11ee78a1b208382d3a26ec40d142ffc1ecc49ef"
+
+[[package]]
+name = "once_cell"
+version = "1.15.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e82dad04139b71a90c080c8463fe0dc7902db5192d939bd0950f074d014339e1"
+
+[[package]]
+name = "percent-encoding"
+version = "2.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e"
+
+[[package]]
+name = "pocket-resources"
+version = "0.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c135f38778ad324d9e9ee68690bac2c1a51f340fdf96ca13e2ab3914eb2e51d8"
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.47"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5ea3d908b0e36316caf9e9e2c4625cdde190a7e6f440d794667ed17a1855e725"
+dependencies = [
+ "unicode-ident",
+]
+
+[[package]]
+name = "pulldown-cmark"
+version = "0.8.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ffade02495f22453cd593159ea2f59827aae7f53fa8323f756799b670881dcf8"
+dependencies = [
+ "bitflags",
+ "memchr",
+ "unicase",
+]
+
+[[package]]
+name = "quote"
+version = "1.0.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179"
+dependencies = [
+ "proc-macro2",
+]
+
+[[package]]
+name = "redox_syscall"
+version = "0.2.16"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a"
+dependencies = [
+ "bitflags",
+]
+
+[[package]]
+name = "redox_termios"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8440d8acb4fd3d277125b4bd01a6f38aee8d814b3b5fc09b3f2b825d37d3fe8f"
+dependencies = [
+ "redox_syscall",
+]
+
+[[package]]
+name = "regex"
+version = "1.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4c4eb3267174b8c6c2f654116623910a0fef09c4753f8dd83db29c48a0df988b"
+dependencies = [
+ "aho-corasick",
+ "memchr",
+ "regex-syntax",
+]
+
+[[package]]
+name = "regex-syntax"
+version = "0.6.27"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244"
+
+[[package]]
+name = "rustix"
+version = "0.35.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "985947f9b6423159c4726323f373be0a21bdb514c5af06a849cb3d2dce2d01e8"
+dependencies = [
+ "bitflags",
+ "errno",
+ "io-lifetimes",
+ "libc",
+ "linux-raw-sys",
+ "windows-sys",
+]
+
+[[package]]
+name = "semver"
+version = "1.0.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e25dfac463d778e353db5be2449d1cce89bd6fd23c9f1ea21310ce6e5a1b29c4"
+
+[[package]]
+name = "serde"
+version = "1.0.147"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d193d69bae983fc11a79df82342761dfbf28a99fc8d203dca4c3c1b590948965"
+dependencies = [
+ "serde_derive",
+]
+
+[[package]]
+name = "serde_derive"
+version = "1.0.147"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4f1d362ca8fc9c3e3a7484440752472d68a6caa98f1ab81d99b5dfe517cec852"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "smawk"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f67ad224767faa3c7d8b6d91985b78e70a1324408abcb1cfcc2be4c06bc06043"
+
+[[package]]
+name = "syn"
+version = "1.0.103"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a864042229133ada95abf3b54fdc62ef5ccabe9515b64717bcb9a1919e59445d"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
+]
+
+[[package]]
+name = "terminal_size"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8440c860cf79def6164e4a0a983bcc2305d82419177a0e0c71930d049e3ac5a1"
+dependencies = [
+ "rustix",
+ "windows-sys",
+]
+
+[[package]]
+name = "termion"
+version = "2.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "659c1f379f3408c7e5e84c7d0da6d93404e3800b6b9d063ba24436419302ec90"
+dependencies = [
+ "libc",
+ "numtoa",
+ "redox_syscall",
+ "redox_termios",
+]
+
+[[package]]
+name = "textwrap"
+version = "0.16.0"
+dependencies = [
+ "hyphenation",
+ "smawk",
+ "terminal_size",
+ "termion",
+ "unic-emoji-char",
+ "unicode-linebreak",
+ "unicode-width",
+ "version-sync",
+]
+
+[[package]]
+name = "tinyvec"
+version = "1.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50"
+dependencies = [
+ "tinyvec_macros",
+]
+
+[[package]]
+name = "tinyvec_macros"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c"
+
+[[package]]
+name = "toml"
+version = "0.5.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8d82e1a7758622a465f8cee077614c73484dac5b836c02ff6a40d5d1010324d7"
+dependencies = [
+ "serde",
+]
+
+[[package]]
+name = "unic-char-property"
+version = "0.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a8c57a407d9b6fa02b4795eb81c5b6652060a15a7903ea981f3d723e6c0be221"
+dependencies = [
+ "unic-char-range",
+]
+
+[[package]]
+name = "unic-char-range"
+version = "0.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0398022d5f700414f6b899e10b8348231abf9173fa93144cbc1a43b9793c1fbc"
+
+[[package]]
+name = "unic-common"
+version = "0.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "80d7ff825a6a654ee85a63e80f92f054f904f21e7d12da4e22f9834a4aaa35bc"
+
+[[package]]
+name = "unic-emoji-char"
+version = "0.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0b07221e68897210270a38bde4babb655869637af0f69407f96053a34f76494d"
+dependencies = [
+ "unic-char-property",
+ "unic-char-range",
+ "unic-ucd-version",
+]
+
+[[package]]
+name = "unic-ucd-version"
+version = "0.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "96bd2f2237fe450fcd0a1d2f5f4e91711124f7857ba2e964247776ebeeb7b0c4"
+dependencies = [
+ "unic-common",
+]
+
+[[package]]
+name = "unicase"
+version = "2.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6"
+dependencies = [
+ "version_check",
+]
+
+[[package]]
+name = "unicode-bidi"
+version = "0.3.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992"
+
+[[package]]
+name = "unicode-ident"
+version = "1.0.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3"
+
+[[package]]
+name = "unicode-linebreak"
+version = "0.1.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c5faade31a542b8b35855fff6e8def199853b2da8da256da52f52f1316ee3137"
+dependencies = [
+ "hashbrown",
+ "regex",
+]
+
+[[package]]
+name = "unicode-normalization"
+version = "0.1.22"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921"
+dependencies = [
+ "tinyvec",
+]
+
+[[package]]
+name = "unicode-width"
+version = "0.1.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b"
+
+[[package]]
+name = "url"
+version = "2.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0d68c799ae75762b8c3fe375feb6600ef5602c883c5d21eb51c09f22b83c4643"
+dependencies = [
+ "form_urlencoded",
+ "idna",
+ "percent-encoding",
+]
+
+[[package]]
+name = "version-sync"
+version = "0.9.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "99d0801cec07737d88cb900e6419f6f68733867f90b3faaa837e84692e101bf0"
+dependencies = [
+ "proc-macro2",
+ "pulldown-cmark",
+ "regex",
+ "semver",
+ "syn",
+ "toml",
+ "url",
+]
+
+[[package]]
+name = "version_check"
+version = "0.9.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
+
+[[package]]
+name = "wasi"
+version = "0.11.0+wasi-snapshot-preview1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
+
+[[package]]
+name = "winapi"
+version = "0.3.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
+dependencies = [
+ "winapi-i686-pc-windows-gnu",
+ "winapi-x86_64-pc-windows-gnu",
+]
+
+[[package]]
+name = "winapi-i686-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
+
+[[package]]
+name = "winapi-x86_64-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
+
+[[package]]
+name = "windows-sys"
+version = "0.36.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2"
+dependencies = [
+ "windows_aarch64_msvc",
+ "windows_i686_gnu",
+ "windows_i686_msvc",
+ "windows_x86_64_gnu",
+ "windows_x86_64_msvc",
+]
+
+[[package]]
+name = "windows_aarch64_msvc"
+version = "0.36.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47"
+
+[[package]]
+name = "windows_i686_gnu"
+version = "0.36.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6"
+
+[[package]]
+name = "windows_i686_msvc"
+version = "0.36.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024"
+
+[[package]]
+name = "windows_x86_64_gnu"
+version = "0.36.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1"
+
+[[package]]
+name = "windows_x86_64_msvc"
+version = "0.36.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680"
diff --git a/crates/textwrap/Cargo.toml b/crates/textwrap/Cargo.toml
new file mode 100644
index 0000000..20472e4
--- /dev/null
+++ b/crates/textwrap/Cargo.toml
@@ -0,0 +1,90 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2021"
+name = "textwrap"
+version = "0.16.0"
+authors = ["Martin Geisler <martin@geisler.net>"]
+exclude = [
+    ".github/",
+    ".gitignore",
+    "benchmarks/",
+    "examples/",
+    "fuzz/",
+    "images/",
+]
+description = "Library for word wrapping, indenting, and dedenting strings. Has optional support for Unicode and emojis as well as machine hyphenation."
+documentation = "https://docs.rs/textwrap/"
+readme = "README.md"
+keywords = [
+    "text",
+    "formatting",
+    "wrap",
+    "typesetting",
+    "hyphenation",
+]
+categories = [
+    "text-processing",
+    "command-line-interface",
+]
+license = "MIT"
+repository = "https://github.com/mgeisler/textwrap"
+
+[package.metadata.docs.rs]
+all-features = true
+
+[[example]]
+name = "hyphenation"
+path = "examples/hyphenation.rs"
+required-features = ["hyphenation"]
+
+[[example]]
+name = "termwidth"
+path = "examples/termwidth.rs"
+required-features = ["terminal_size"]
+
+[dependencies.hyphenation]
+version = "0.8.4"
+features = ["embed_en-us"]
+optional = true
+
+[dependencies.smawk]
+version = "0.3.1"
+optional = true
+
+[dependencies.terminal_size]
+version = "0.2.1"
+optional = true
+
+[dependencies.unicode-linebreak]
+version = "0.1.4"
+optional = true
+
+[dependencies.unicode-width]
+version = "0.1.10"
+optional = true
+
+[dev-dependencies.unic-emoji-char]
+version = "0.9.0"
+
+[dev-dependencies.version-sync]
+version = "0.9.4"
+
+[features]
+default = [
+    "unicode-linebreak",
+    "unicode-width",
+    "smawk",
+]
+
+[target."cfg(unix)".dev-dependencies.termion]
+version = "2.0.1"
diff --git a/crates/textwrap/LICENSE b/crates/textwrap/LICENSE
new file mode 100644
index 0000000..0d37ec3
--- /dev/null
+++ b/crates/textwrap/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2016 Martin Geisler
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/crates/textwrap/METADATA b/crates/textwrap/METADATA
new file mode 100644
index 0000000..8236055
--- /dev/null
+++ b/crates/textwrap/METADATA
@@ -0,0 +1,23 @@
+# This project was upgraded with external_updater.
+# Usage: tools/external_updater/updater.sh update rust/crates/textwrap
+# For more info, check https://cs.android.com/android/platform/superproject/+/master:tools/external_updater/README.md
+
+name: "textwrap"
+description: "Library for word wrapping, indenting, and dedenting strings. Has optional support for Unicode and emojis as well as machine hyphenation."
+third_party {
+  url {
+    type: HOMEPAGE
+    value: "https://crates.io/crates/textwrap"
+  }
+  url {
+    type: ARCHIVE
+    value: "https://static.crates.io/crates/textwrap/textwrap-0.16.0.crate"
+  }
+  version: "0.16.0"
+  license_type: NOTICE
+  last_upgrade_date {
+    year: 2022
+    month: 12
+    day: 19
+  }
+}
diff --git a/crates/textwrap/MODULE_LICENSE_MIT b/crates/textwrap/MODULE_LICENSE_MIT
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/crates/textwrap/MODULE_LICENSE_MIT
diff --git a/crates/textwrap/NOTICE b/crates/textwrap/NOTICE
new file mode 120000
index 0000000..7a694c9
--- /dev/null
+++ b/crates/textwrap/NOTICE
@@ -0,0 +1 @@
+LICENSE
\ No newline at end of file
diff --git a/crates/textwrap/README.md b/crates/textwrap/README.md
new file mode 100644
index 0000000..cdf4eac
--- /dev/null
+++ b/crates/textwrap/README.md
@@ -0,0 +1,176 @@
+# Textwrap
+
+[![](https://github.com/mgeisler/textwrap/workflows/build/badge.svg)][build-status]
+[![](https://codecov.io/gh/mgeisler/textwrap/branch/master/graph/badge.svg)][codecov]
+[![](https://img.shields.io/crates/v/textwrap.svg)][crates-io]
+[![](https://docs.rs/textwrap/badge.svg)][api-docs]
+
+Textwrap is a library for wrapping and indenting text. It is most
+often used by command-line programs to format dynamic output nicely so
+it looks good in a terminal. You can also use Textwrap to wrap text
+set in a proportional font—such as text used to generate PDF files, or
+drawn on a [HTML5 canvas using WebAssembly][wasm-demo].
+
+## Usage
+
+To use the textwrap crate, add this to your `Cargo.toml` file:
+```toml
+[dependencies]
+textwrap = "0.16"
+```
+
+By default, this enables word wrapping with support for Unicode
+strings. Extra features can be enabled with Cargo features—and the
+Unicode support can be disabled if needed. This allows you slim down
+the library and so you will only pay for the features you actually
+use.
+
+Please see the [_Cargo Features_ in the crate
+documentation](https://docs.rs/textwrap/#cargo-features) for a full
+list of the available features as well as their impact on the size of
+your binary.
+
+## Documentation
+
+**[API documentation][api-docs]**
+
+## Getting Started
+
+Word wrapping is easy using the `wrap` and `fill` functions:
+
+```rust
+#[cfg(feature = "smawk")] {
+let text = "textwrap: an efficient and powerful library for wrapping text.";
+assert_eq!(
+    textwrap::wrap(text, 28),
+    vec![
+        "textwrap: an efficient",
+        "and powerful library for",
+        "wrapping text.",
+    ]
+);
+}
+```
+
+Sharp-eyed readers will notice that the first line is 22 columns wide.
+So why is the word “and” put in the second line when there is space
+for it in the first line?
+
+The explanation is that textwrap does not just wrap text one line at a
+time. Instead, it uses an optimal-fit algorithm which looks ahead and
+chooses line breaks which minimize the gaps left at ends of lines.
+This is controlled with the `smawk` Cargo feature, which is why the
+example is wrapped in the `cfg`-block.
+
+Without look-ahead, the first line would be longer and the text would
+look like this:
+
+```rust
+#[cfg(not(feature = "smawk"))] {
+let text = "textwrap: an efficient and powerful library for wrapping text.";
+assert_eq!(
+    textwrap::wrap(text, 28),
+    vec![
+        "textwrap: an efficient and",
+        "powerful library for",
+        "wrapping text.",
+    ]
+);
+}
+```
+
+The second line is now shorter and the text is more ragged. The kind
+of wrapping can be configured via `Options::wrap_algorithm`.
+
+If you enable the `hyphenation` Cargo feature, you get support for
+automatic hyphenation for [about 70 languages][patterns] via
+high-quality TeX hyphenation patterns.
+
+Your program must load the hyphenation pattern and configure
+`Options::word_splitter` to use it:
+
+```rust
+#[cfg(feature = "hyphenation")] {
+use hyphenation::{Language, Load, Standard};
+use textwrap::{fill, Options, WordSplitter};
+
+let dictionary = Standard::from_embedded(Language::EnglishUS).unwrap();
+let options = textwrap::Options::new(28).word_splitter(WordSplitter::Hyphenation(dictionary));
+let text = "textwrap: an efficient and powerful library for wrapping text.";
+
+assert_eq!(
+    textwrap::wrap(text, &options),
+    vec![
+        "textwrap: an efficient and",
+        "powerful library for wrap-",
+        "ping text."
+    ]
+);
+}
+```
+
+The US-English hyphenation patterns are embedded when you enable the
+`hyphenation` feature. They are licensed under a [permissive
+license][en-us license] and take up about 88 KB in your binary. If you
+need hyphenation for other languages, you need to download a
+[precompiled `.bincode` file][bincode] and load it yourself. Please
+see the [`hyphenation` documentation] for details.
+
+## Wrapping Strings at Compile Time
+
+If your strings are known at compile time, please take a look at the
+procedural macros from the [`textwrap-macros` crate].
+
+## Examples
+
+The library comes with [a
+collection](https://github.com/mgeisler/textwrap/tree/master/examples)
+of small example programs that shows various features.
+
+If you want to see Textwrap in action right away, then take a look at
+[`examples/wasm/`], which shows how to wrap sans-serif, serif, and
+monospace text. It uses WebAssembly and is automatically deployed to
+https://mgeisler.github.io/textwrap/.
+
+For the command-line examples, you’re invited to clone the repository
+and try them out for yourself! Of special note is
+[`examples/interactive.rs`]. This is a demo program which demonstrates
+most of the available features: you can enter text and adjust the
+width at which it is wrapped interactively. You can also adjust the
+`Options` used to see the effect of different `WordSplitter`s and wrap
+algorithms.
+
+Run the demo with
+
+```sh
+$ cargo run --example interactive
+```
+
+The demo needs a Linux terminal to function.
+
+## Release History
+
+Please see the [CHANGELOG file] for details on the changes made in
+each release.
+
+## License
+
+Textwrap can be distributed according to the [MIT license][mit].
+Contributions will be accepted under the same license.
+
+[crates-io]: https://crates.io/crates/textwrap
+[build-status]: https://github.com/mgeisler/textwrap/actions?query=workflow%3Abuild+branch%3Amaster
+[codecov]: https://codecov.io/gh/mgeisler/textwrap
+[wasm-demo]: https://mgeisler.github.io/textwrap/
+[`textwrap-macros` crate]: https://crates.io/crates/textwrap-macros
+[`hyphenation` example]: https://github.com/mgeisler/textwrap/blob/master/examples/hyphenation.rs
+[`termwidth` example]: https://github.com/mgeisler/textwrap/blob/master/examples/termwidth.rs
+[patterns]: https://github.com/tapeinosyne/hyphenation/tree/master/patterns-tex
+[en-us license]: https://github.com/hyphenation/tex-hyphen/blob/master/hyph-utf8/tex/generic/hyph-utf8/patterns/tex/hyph-en-us.tex
+[bincode]: https://github.com/tapeinosyne/hyphenation/tree/master/dictionaries
+[`hyphenation` documentation]: http://docs.rs/hyphenation
+[`examples/wasm/`]: https://github.com/mgeisler/textwrap/tree/master/examples/wasm
+[`examples/interactive.rs`]: https://github.com/mgeisler/textwrap/tree/master/examples/interactive.rs
+[api-docs]: https://docs.rs/textwrap/
+[CHANGELOG file]: https://github.com/mgeisler/textwrap/blob/master/CHANGELOG.md
+[mit]: LICENSE
diff --git a/crates/textwrap/TEST_MAPPING b/crates/textwrap/TEST_MAPPING
new file mode 100644
index 0000000..43da34b
--- /dev/null
+++ b/crates/textwrap/TEST_MAPPING
@@ -0,0 +1,20 @@
+// Generated by update_crate_tests.py for tests that depend on this crate.
+{
+  "imports": [
+    {
+      "path": "external/rust/crates/base64"
+    },
+    {
+      "path": "external/rust/crates/clap/2.33.3"
+    },
+    {
+      "path": "external/rust/crates/tinytemplate"
+    },
+    {
+      "path": "external/rust/crates/tinyvec"
+    },
+    {
+      "path": "external/rust/crates/unicode-xid"
+    }
+  ]
+}
diff --git a/crates/textwrap/cargo_embargo.json b/crates/textwrap/cargo_embargo.json
new file mode 100644
index 0000000..5a3fcaa
--- /dev/null
+++ b/crates/textwrap/cargo_embargo.json
@@ -0,0 +1,9 @@
+{
+  "apex_available": [
+    "//apex_available:platform",
+    "com.android.compos",
+    "com.android.virt"
+  ],
+  "features": [],
+  "run_cargo": false
+}
diff --git a/crates/textwrap/rustfmt.toml b/crates/textwrap/rustfmt.toml
new file mode 100644
index 0000000..c1578aa
--- /dev/null
+++ b/crates/textwrap/rustfmt.toml
@@ -0,0 +1 @@
+imports_granularity = "Module"
diff --git a/crates/textwrap/src/core.rs b/crates/textwrap/src/core.rs
new file mode 100644
index 0000000..0ab4ef8
--- /dev/null
+++ b/crates/textwrap/src/core.rs
@@ -0,0 +1,433 @@
+//! Building blocks for advanced wrapping functionality.
+//!
+//! The functions and structs in this module can be used to implement
+//! advanced wrapping functionality when the [`wrap`](super::wrap) and
+//! [`fill`](super::fill) function don't do what you want.
+//!
+//! In general, you want to follow these steps when wrapping
+//! something:
+//!
+//! 1. Split your input into [`Fragment`]s. These are abstract blocks
+//!    of text or content which can be wrapped into lines. See
+//!    [`WordSeparator`](crate::word_separators::WordSeparator) for
+//!    how to do this for text.
+//!
+//! 2. Potentially split your fragments into smaller pieces. This
+//!    allows you to implement things like hyphenation. If you use the
+//!    `Word` type, you can use [`WordSplitter`](crate::WordSplitter)
+//!    enum for this.
+//!
+//! 3. Potentially break apart fragments that are still too large to
+//!    fit on a single line. This is implemented in [`break_words`].
+//!
+//! 4. Finally take your fragments and put them into lines. There are
+//!    two algorithms for this in the
+//!    [`wrap_algorithms`](crate::wrap_algorithms) module:
+//!    [`wrap_optimal_fit`](crate::wrap_algorithms::wrap_optimal_fit)
+//!    and [`wrap_first_fit`](crate::wrap_algorithms::wrap_first_fit).
+//!    The former produces better line breaks, the latter is faster.
+//!
+//! 5. Iterate through the slices returned by the wrapping functions
+//!    and construct your lines of output.
+//!
+//! Please [open an issue](https://github.com/mgeisler/textwrap/) if
+//! the functionality here is not sufficient or if you have ideas for
+//! improving it. We would love to hear from you!
+
+/// The CSI or “Control Sequence Introducer” introduces an ANSI escape
+/// sequence. This is typically used for colored text and will be
+/// ignored when computing the text width.
+const CSI: (char, char) = ('\x1b', '[');
+/// The final bytes of an ANSI escape sequence must be in this range.
+const ANSI_FINAL_BYTE: std::ops::RangeInclusive<char> = '\x40'..='\x7e';
+
+/// Skip ANSI escape sequences. The `ch` is the current `char`, the
+/// `chars` provide the following characters. The `chars` will be
+/// modified if `ch` is the start of an ANSI escape sequence.
+#[inline]
+pub(crate) fn skip_ansi_escape_sequence<I: Iterator<Item = char>>(ch: char, chars: &mut I) -> bool {
+    if ch == CSI.0 && chars.next() == Some(CSI.1) {
+        // We have found the start of an ANSI escape code, typically
+        // used for colored terminal text. We skip until we find a
+        // "final byte" in the range 0x40–0x7E.
+        for ch in chars {
+            if ANSI_FINAL_BYTE.contains(&ch) {
+                return true;
+            }
+        }
+    }
+    false
+}
+
+#[cfg(feature = "unicode-width")]
+#[inline]
+fn ch_width(ch: char) -> usize {
+    unicode_width::UnicodeWidthChar::width(ch).unwrap_or(0)
+}
+
+/// First character which [`ch_width`] will classify as double-width.
+/// Please see [`display_width`].
+#[cfg(not(feature = "unicode-width"))]
+const DOUBLE_WIDTH_CUTOFF: char = '\u{1100}';
+
+#[cfg(not(feature = "unicode-width"))]
+#[inline]
+fn ch_width(ch: char) -> usize {
+    if ch < DOUBLE_WIDTH_CUTOFF {
+        1
+    } else {
+        2
+    }
+}
+
+/// Compute the display width of `text` while skipping over ANSI
+/// escape sequences.
+///
+/// # Examples
+///
+/// ```
+/// use textwrap::core::display_width;
+///
+/// assert_eq!(display_width("Café Plain"), 10);
+/// assert_eq!(display_width("\u{1b}[31mCafé Rouge\u{1b}[0m"), 10);
+/// ```
+///
+/// **Note:** When the `unicode-width` Cargo feature is disabled, the
+/// width of a `char` is determined by a crude approximation which
+/// simply counts chars below U+1100 as 1 column wide, and all other
+/// characters as 2 columns wide. With the feature enabled, function
+/// will correctly deal with [combining characters] in their
+/// decomposed form (see [Unicode equivalence]).
+///
+/// An example of a decomposed character is “é”, which can be
+/// decomposed into: “e” followed by a combining acute accent: “◌́”.
+/// Without the `unicode-width` Cargo feature, every `char` below
+/// U+1100 has a width of 1. This includes the combining accent:
+///
+/// ```
+/// use textwrap::core::display_width;
+///
+/// assert_eq!(display_width("Cafe Plain"), 10);
+/// #[cfg(feature = "unicode-width")]
+/// assert_eq!(display_width("Cafe\u{301} Plain"), 10);
+/// #[cfg(not(feature = "unicode-width"))]
+/// assert_eq!(display_width("Cafe\u{301} Plain"), 11);
+/// ```
+///
+/// ## Emojis and CJK Characters
+///
+/// Characters such as emojis and [CJK characters] used in the
+/// Chinese, Japanese, and Korean langauges are seen as double-width,
+/// even if the `unicode-width` feature is disabled:
+///
+/// ```
+/// use textwrap::core::display_width;
+///
+/// assert_eq!(display_width("😂😭🥺🤣✨😍🙏🥰😊🔥"), 20);
+/// assert_eq!(display_width("你好"), 4);  // “Nǐ hǎo” or “Hello” in Chinese
+/// ```
+///
+/// # Limitations
+///
+/// The displayed width of a string cannot always be computed from the
+/// string alone. This is because the width depends on the rendering
+/// engine used. This is particularly visible with [emoji modifier
+/// sequences] where a base emoji is modified with, e.g., skin tone or
+/// hair color modifiers. It is up to the rendering engine to detect
+/// this and to produce a suitable emoji.
+///
+/// A simple example is “❤️”, which consists of “❤” (U+2764: Black
+/// Heart Symbol) followed by U+FE0F (Variation Selector-16). By
+/// itself, “❤” is a black heart, but if you follow it with the
+/// variant selector, you may get a wider red heart.
+///
+/// A more complex example would be “👨‍🦰” which should depict a man
+/// with red hair. Here the computed width is too large — and the
+/// width differs depending on the use of the `unicode-width` feature:
+///
+/// ```
+/// use textwrap::core::display_width;
+///
+/// assert_eq!("👨‍🦰".chars().collect::<Vec<char>>(), ['\u{1f468}', '\u{200d}', '\u{1f9b0}']);
+/// #[cfg(feature = "unicode-width")]
+/// assert_eq!(display_width("👨‍🦰"), 4);
+/// #[cfg(not(feature = "unicode-width"))]
+/// assert_eq!(display_width("👨‍🦰"), 6);
+/// ```
+///
+/// This happens because the grapheme consists of three code points:
+/// “👨” (U+1F468: Man), Zero Width Joiner (U+200D), and “🦰”
+/// (U+1F9B0: Red Hair). You can see them above in the test. With
+/// `unicode-width` enabled, the ZWJ is correctly seen as having zero
+/// width, without it is counted as a double-width character.
+///
+/// ## Terminal Support
+///
+/// Modern browsers typically do a great job at combining characters
+/// as shown above, but terminals often struggle more. As an example,
+/// Gnome Terminal version 3.38.1, shows “❤️” as a big red heart, but
+/// shows "👨‍🦰" as “👨🦰”.
+///
+/// [combining characters]: https://en.wikipedia.org/wiki/Combining_character
+/// [Unicode equivalence]: https://en.wikipedia.org/wiki/Unicode_equivalence
+/// [CJK characters]: https://en.wikipedia.org/wiki/CJK_characters
+/// [emoji modifier sequences]: https://unicode.org/emoji/charts/full-emoji-modifiers.html
+pub fn display_width(text: &str) -> usize {
+    let mut chars = text.chars();
+    let mut width = 0;
+    while let Some(ch) = chars.next() {
+        if skip_ansi_escape_sequence(ch, &mut chars) {
+            continue;
+        }
+        width += ch_width(ch);
+    }
+    width
+}
+
+/// A (text) fragment denotes the unit which we wrap into lines.
+///
+/// Fragments represent an abstract _word_ plus the _whitespace_
+/// following the word. In case the word falls at the end of the line,
+/// the whitespace is dropped and a so-called _penalty_ is inserted
+/// instead (typically `"-"` if the word was hyphenated).
+///
+/// For wrapping purposes, the precise content of the word, the
+/// whitespace, and the penalty is irrelevant. All we need to know is
+/// the displayed width of each part, which this trait provides.
+pub trait Fragment: std::fmt::Debug {
+    /// Displayed width of word represented by this fragment.
+    fn width(&self) -> f64;
+
+    /// Displayed width of the whitespace that must follow the word
+    /// when the word is not at the end of a line.
+    fn whitespace_width(&self) -> f64;
+
+    /// Displayed width of the penalty that must be inserted if the
+    /// word falls at the end of a line.
+    fn penalty_width(&self) -> f64;
+}
+
+/// A piece of wrappable text, including any trailing whitespace.
+///
+/// A `Word` is an example of a [`Fragment`], so it has a width,
+/// trailing whitespace, and potentially a penalty item.
+#[derive(Debug, Copy, Clone, PartialEq, Eq)]
+pub struct Word<'a> {
+    /// Word content.
+    pub word: &'a str,
+    /// Whitespace to insert if the word does not fall at the end of a line.
+    pub whitespace: &'a str,
+    /// Penalty string to insert if the word falls at the end of a line.
+    pub penalty: &'a str,
+    // Cached width in columns.
+    pub(crate) width: usize,
+}
+
+impl std::ops::Deref for Word<'_> {
+    type Target = str;
+
+    fn deref(&self) -> &Self::Target {
+        self.word
+    }
+}
+
+impl<'a> Word<'a> {
+    /// Construct a `Word` from a string.
+    ///
+    /// A trailing stretch of `' '` is automatically taken to be the
+    /// whitespace part of the word.
+    pub fn from(word: &str) -> Word<'_> {
+        let trimmed = word.trim_end_matches(' ');
+        Word {
+            word: trimmed,
+            width: display_width(trimmed),
+            whitespace: &word[trimmed.len()..],
+            penalty: "",
+        }
+    }
+
+    /// Break this word into smaller words with a width of at most
+    /// `line_width`. The whitespace and penalty from this `Word` is
+    /// added to the last piece.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use textwrap::core::Word;
+    /// assert_eq!(
+    ///     Word::from("Hello!  ").break_apart(3).collect::<Vec<_>>(),
+    ///     vec![Word::from("Hel"), Word::from("lo!  ")]
+    /// );
+    /// ```
+    pub fn break_apart<'b>(&'b self, line_width: usize) -> impl Iterator<Item = Word<'a>> + 'b {
+        let mut char_indices = self.word.char_indices();
+        let mut offset = 0;
+        let mut width = 0;
+
+        std::iter::from_fn(move || {
+            while let Some((idx, ch)) = char_indices.next() {
+                if skip_ansi_escape_sequence(ch, &mut char_indices.by_ref().map(|(_, ch)| ch)) {
+                    continue;
+                }
+
+                if width > 0 && width + ch_width(ch) > line_width {
+                    let word = Word {
+                        word: &self.word[offset..idx],
+                        width: width,
+                        whitespace: "",
+                        penalty: "",
+                    };
+                    offset = idx;
+                    width = ch_width(ch);
+                    return Some(word);
+                }
+
+                width += ch_width(ch);
+            }
+
+            if offset < self.word.len() {
+                let word = Word {
+                    word: &self.word[offset..],
+                    width: width,
+                    whitespace: self.whitespace,
+                    penalty: self.penalty,
+                };
+                offset = self.word.len();
+                return Some(word);
+            }
+
+            None
+        })
+    }
+}
+
+impl Fragment for Word<'_> {
+    #[inline]
+    fn width(&self) -> f64 {
+        self.width as f64
+    }
+
+    // We assume the whitespace consist of ' ' only. This allows us to
+    // compute the display width in constant time.
+    #[inline]
+    fn whitespace_width(&self) -> f64 {
+        self.whitespace.len() as f64
+    }
+
+    // We assume the penalty is `""` or `"-"`. This allows us to
+    // compute the display width in constant time.
+    #[inline]
+    fn penalty_width(&self) -> f64 {
+        self.penalty.len() as f64
+    }
+}
+
+/// Forcibly break words wider than `line_width` into smaller words.
+///
+/// This simply calls [`Word::break_apart`] on words that are too
+/// wide. This means that no extra `'-'` is inserted, the word is
+/// simply broken into smaller pieces.
+pub fn break_words<'a, I>(words: I, line_width: usize) -> Vec<Word<'a>>
+where
+    I: IntoIterator<Item = Word<'a>>,
+{
+    let mut shortened_words = Vec::new();
+    for word in words {
+        if word.width() > line_width as f64 {
+            shortened_words.extend(word.break_apart(line_width));
+        } else {
+            shortened_words.push(word);
+        }
+    }
+    shortened_words
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[cfg(feature = "unicode-width")]
+    use unicode_width::UnicodeWidthChar;
+
+    #[test]
+    fn skip_ansi_escape_sequence_works() {
+        let blue_text = "\u{1b}[34mHello\u{1b}[0m";
+        let mut chars = blue_text.chars();
+        let ch = chars.next().unwrap();
+        assert!(skip_ansi_escape_sequence(ch, &mut chars));
+        assert_eq!(chars.next(), Some('H'));
+    }
+
+    #[test]
+    fn emojis_have_correct_width() {
+        use unic_emoji_char::is_emoji;
+
+        // Emojis in the Basic Latin (ASCII) and Latin-1 Supplement
+        // blocks all have a width of 1 column. This includes
+        // characters such as '#' and '©'.
+        for ch in '\u{1}'..'\u{FF}' {
+            if is_emoji(ch) {
+                let desc = format!("{:?} U+{:04X}", ch, ch as u32);
+
+                #[cfg(feature = "unicode-width")]
+                assert_eq!(ch.width().unwrap(), 1, "char: {}", desc);
+
+                #[cfg(not(feature = "unicode-width"))]
+                assert_eq!(ch_width(ch), 1, "char: {}", desc);
+            }
+        }
+
+        // Emojis in the remaining blocks of the Basic Multilingual
+        // Plane (BMP), in the Supplementary Multilingual Plane (SMP),
+        // and in the Supplementary Ideographic Plane (SIP), are all 1
+        // or 2 columns wide when unicode-width is used, and always 2
+        // columns wide otherwise. This includes all of our favorite
+        // emojis such as 😊.
+        for ch in '\u{FF}'..'\u{2FFFF}' {
+            if is_emoji(ch) {
+                let desc = format!("{:?} U+{:04X}", ch, ch as u32);
+
+                #[cfg(feature = "unicode-width")]
+                assert!(ch.width().unwrap() <= 2, "char: {}", desc);
+
+                #[cfg(not(feature = "unicode-width"))]
+                assert_eq!(ch_width(ch), 2, "char: {}", desc);
+            }
+        }
+
+        // The remaining planes contain almost no assigned code points
+        // and thus also no emojis.
+    }
+
+    #[test]
+    fn display_width_works() {
+        assert_eq!("Café Plain".len(), 11); // “é” is two bytes
+        assert_eq!(display_width("Café Plain"), 10);
+        assert_eq!(display_width("\u{1b}[31mCafé Rouge\u{1b}[0m"), 10);
+    }
+
+    #[test]
+    fn display_width_narrow_emojis() {
+        #[cfg(feature = "unicode-width")]
+        assert_eq!(display_width("⁉"), 1);
+
+        // The ⁉ character is above DOUBLE_WIDTH_CUTOFF.
+        #[cfg(not(feature = "unicode-width"))]
+        assert_eq!(display_width("⁉"), 2);
+    }
+
+    #[test]
+    fn display_width_narrow_emojis_variant_selector() {
+        #[cfg(feature = "unicode-width")]
+        assert_eq!(display_width("⁉\u{fe0f}"), 1);
+
+        // The variant selector-16 is also counted.
+        #[cfg(not(feature = "unicode-width"))]
+        assert_eq!(display_width("⁉\u{fe0f}"), 4);
+    }
+
+    #[test]
+    fn display_width_emojis() {
+        assert_eq!(display_width("😂😭🥺🤣✨😍🙏🥰😊🔥"), 20);
+    }
+}
diff --git a/crates/textwrap/src/fuzzing.rs b/crates/textwrap/src/fuzzing.rs
new file mode 100644
index 0000000..24d59fd
--- /dev/null
+++ b/crates/textwrap/src/fuzzing.rs
@@ -0,0 +1,23 @@
+//! Fuzzing helpers.
+
+use super::Options;
+use std::borrow::Cow;
+
+/// Exposed for fuzzing so we can check the slow path is correct.
+pub fn fill_slow_path<'a>(text: &str, options: Options<'_>) -> String {
+    super::fill_slow_path(text, options)
+}
+
+/// Exposed for fuzzing so we can check the slow path is correct.
+pub fn wrap_single_line<'a>(line: &'a str, options: &Options<'_>, lines: &mut Vec<Cow<'a, str>>) {
+    super::wrap_single_line(line, options, lines);
+}
+
+/// Exposed for fuzzing so we can check the slow path is correct.
+pub fn wrap_single_line_slow_path<'a>(
+    line: &'a str,
+    options: &Options<'_>,
+    lines: &mut Vec<Cow<'a, str>>,
+) {
+    super::wrap_single_line_slow_path(line, options, lines)
+}
diff --git a/crates/textwrap/src/indentation.rs b/crates/textwrap/src/indentation.rs
new file mode 100644
index 0000000..2f3a853
--- /dev/null
+++ b/crates/textwrap/src/indentation.rs
@@ -0,0 +1,347 @@
+//! Functions related to adding and removing indentation from lines of
+//! text.
+//!
+//! The functions here can be used to uniformly indent or dedent
+//! (unindent) word wrapped lines of text.
+
+/// Indent each line by the given prefix.
+///
+/// # Examples
+///
+/// ```
+/// use textwrap::indent;
+///
+/// assert_eq!(indent("First line.\nSecond line.\n", "  "),
+///            "  First line.\n  Second line.\n");
+/// ```
+///
+/// When indenting, trailing whitespace is stripped from the prefix.
+/// This means that empty lines remain empty afterwards:
+///
+/// ```
+/// use textwrap::indent;
+///
+/// assert_eq!(indent("First line.\n\n\nSecond line.\n", "  "),
+///            "  First line.\n\n\n  Second line.\n");
+/// ```
+///
+/// Notice how `"\n\n\n"` remained as `"\n\n\n"`.
+///
+/// This feature is useful when you want to indent text and have a
+/// space between your prefix and the text. In this case, you _don't_
+/// want a trailing space on empty lines:
+///
+/// ```
+/// use textwrap::indent;
+///
+/// assert_eq!(indent("foo = 123\n\nprint(foo)\n", "# "),
+///            "# foo = 123\n#\n# print(foo)\n");
+/// ```
+///
+/// Notice how `"\n\n"` became `"\n#\n"` instead of `"\n# \n"` which
+/// would have trailing whitespace.
+///
+/// Leading and trailing whitespace coming from the text itself is
+/// kept unchanged:
+///
+/// ```
+/// use textwrap::indent;
+///
+/// assert_eq!(indent(" \t  Foo   ", "->"), "-> \t  Foo   ");
+/// ```
+pub fn indent(s: &str, prefix: &str) -> String {
+    // We know we'll need more than s.len() bytes for the output, but
+    // without counting '\n' characters (which is somewhat slow), we
+    // don't know exactly how much. However, we can preemptively do
+    // the first doubling of the output size.
+    let mut result = String::with_capacity(2 * s.len());
+    let trimmed_prefix = prefix.trim_end();
+    for (idx, line) in s.split_terminator('\n').enumerate() {
+        if idx > 0 {
+            result.push('\n');
+        }
+        if line.trim().is_empty() {
+            result.push_str(trimmed_prefix);
+        } else {
+            result.push_str(prefix);
+        }
+        result.push_str(line);
+    }
+    if s.ends_with('\n') {
+        // split_terminator will have eaten the final '\n'.
+        result.push('\n');
+    }
+    result
+}
+
+/// Removes common leading whitespace from each line.
+///
+/// This function will look at each non-empty line and determine the
+/// maximum amount of whitespace that can be removed from all lines:
+///
+/// ```
+/// use textwrap::dedent;
+///
+/// assert_eq!(dedent("
+///     1st line
+///       2nd line
+///     3rd line
+/// "), "
+/// 1st line
+///   2nd line
+/// 3rd line
+/// ");
+/// ```
+pub fn dedent(s: &str) -> String {
+    let mut prefix = "";
+    let mut lines = s.lines();
+
+    // We first search for a non-empty line to find a prefix.
+    for line in &mut lines {
+        let mut whitespace_idx = line.len();
+        for (idx, ch) in line.char_indices() {
+            if !ch.is_whitespace() {
+                whitespace_idx = idx;
+                break;
+            }
+        }
+
+        // Check if the line had anything but whitespace
+        if whitespace_idx < line.len() {
+            prefix = &line[..whitespace_idx];
+            break;
+        }
+    }
+
+    // We then continue looking through the remaining lines to
+    // possibly shorten the prefix.
+    for line in &mut lines {
+        let mut whitespace_idx = line.len();
+        for ((idx, a), b) in line.char_indices().zip(prefix.chars()) {
+            if a != b {
+                whitespace_idx = idx;
+                break;
+            }
+        }
+
+        // Check if the line had anything but whitespace and if we
+        // have found a shorter prefix
+        if whitespace_idx < line.len() && whitespace_idx < prefix.len() {
+            prefix = &line[..whitespace_idx];
+        }
+    }
+
+    // We now go over the lines a second time to build the result.
+    let mut result = String::new();
+    for line in s.lines() {
+        if line.starts_with(prefix) && line.chars().any(|c| !c.is_whitespace()) {
+            let (_, tail) = line.split_at(prefix.len());
+            result.push_str(tail);
+        }
+        result.push('\n');
+    }
+
+    if result.ends_with('\n') && !s.ends_with('\n') {
+        let new_len = result.len() - 1;
+        result.truncate(new_len);
+    }
+
+    result
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[test]
+    fn indent_empty() {
+        assert_eq!(indent("\n", "  "), "\n");
+    }
+
+    #[test]
+    #[rustfmt::skip]
+    fn indent_nonempty() {
+        let text = [
+            "  foo\n",
+            "bar\n",
+            "  baz\n",
+        ].join("");
+        let expected = [
+            "//   foo\n",
+            "// bar\n",
+            "//   baz\n",
+        ].join("");
+        assert_eq!(indent(&text, "// "), expected);
+    }
+
+    #[test]
+    #[rustfmt::skip]
+    fn indent_empty_line() {
+        let text = [
+            "  foo",
+            "bar",
+            "",
+            "  baz",
+        ].join("\n");
+        let expected = [
+            "//   foo",
+            "// bar",
+            "//",
+            "//   baz",
+        ].join("\n");
+        assert_eq!(indent(&text, "// "), expected);
+    }
+
+    #[test]
+    fn dedent_empty() {
+        assert_eq!(dedent(""), "");
+    }
+
+    #[test]
+    #[rustfmt::skip]
+    fn dedent_multi_line() {
+        let x = [
+            "    foo",
+            "  bar",
+            "    baz",
+        ].join("\n");
+        let y = [
+            "  foo",
+            "bar",
+            "  baz"
+        ].join("\n");
+        assert_eq!(dedent(&x), y);
+    }
+
+    #[test]
+    #[rustfmt::skip]
+    fn dedent_empty_line() {
+        let x = [
+            "    foo",
+            "  bar",
+            "   ",
+            "    baz"
+        ].join("\n");
+        let y = [
+            "  foo",
+            "bar",
+            "",
+            "  baz"
+        ].join("\n");
+        assert_eq!(dedent(&x), y);
+    }
+
+    #[test]
+    #[rustfmt::skip]
+    fn dedent_blank_line() {
+        let x = [
+            "      foo",
+            "",
+            "        bar",
+            "          foo",
+            "          bar",
+            "          baz",
+        ].join("\n");
+        let y = [
+            "foo",
+            "",
+            "  bar",
+            "    foo",
+            "    bar",
+            "    baz",
+        ].join("\n");
+        assert_eq!(dedent(&x), y);
+    }
+
+    #[test]
+    #[rustfmt::skip]
+    fn dedent_whitespace_line() {
+        let x = [
+            "      foo",
+            " ",
+            "        bar",
+            "          foo",
+            "          bar",
+            "          baz",
+        ].join("\n");
+        let y = [
+            "foo",
+            "",
+            "  bar",
+            "    foo",
+            "    bar",
+            "    baz",
+        ].join("\n");
+        assert_eq!(dedent(&x), y);
+    }
+
+    #[test]
+    #[rustfmt::skip]
+    fn dedent_mixed_whitespace() {
+        let x = [
+            "\tfoo",
+            "  bar",
+        ].join("\n");
+        let y = [
+            "\tfoo",
+            "  bar",
+        ].join("\n");
+        assert_eq!(dedent(&x), y);
+    }
+
+    #[test]
+    #[rustfmt::skip]
+    fn dedent_tabbed_whitespace() {
+        let x = [
+            "\t\tfoo",
+            "\t\t\tbar",
+        ].join("\n");
+        let y = [
+            "foo",
+            "\tbar",
+        ].join("\n");
+        assert_eq!(dedent(&x), y);
+    }
+
+    #[test]
+    #[rustfmt::skip]
+    fn dedent_mixed_tabbed_whitespace() {
+        let x = [
+            "\t  \tfoo",
+            "\t  \t\tbar",
+        ].join("\n");
+        let y = [
+            "foo",
+            "\tbar",
+        ].join("\n");
+        assert_eq!(dedent(&x), y);
+    }
+
+    #[test]
+    #[rustfmt::skip]
+    fn dedent_mixed_tabbed_whitespace2() {
+        let x = [
+            "\t  \tfoo",
+            "\t    \tbar",
+        ].join("\n");
+        let y = [
+            "\tfoo",
+            "  \tbar",
+        ].join("\n");
+        assert_eq!(dedent(&x), y);
+    }
+
+    #[test]
+    #[rustfmt::skip]
+    fn dedent_preserve_no_terminating_newline() {
+        let x = [
+            "  foo",
+            "    bar",
+        ].join("\n");
+        let y = [
+            "foo",
+            "  bar",
+        ].join("\n");
+        assert_eq!(dedent(&x), y);
+    }
+}
diff --git a/crates/textwrap/src/lib.rs b/crates/textwrap/src/lib.rs
new file mode 100644
index 0000000..ed346a2
--- /dev/null
+++ b/crates/textwrap/src/lib.rs
@@ -0,0 +1,2058 @@
+//! The textwrap library provides functions for word wrapping and
+//! indenting text.
+//!
+//! # Wrapping Text
+//!
+//! Wrapping text can be very useful in command-line programs where
+//! you want to format dynamic output nicely so it looks good in a
+//! terminal. A quick example:
+//!
+//! ```
+//! # #[cfg(feature = "smawk")] {
+//! let text = "textwrap: a small library for wrapping text.";
+//! assert_eq!(textwrap::wrap(text, 18),
+//!            vec!["textwrap: a",
+//!                 "small library for",
+//!                 "wrapping text."]);
+//! # }
+//! ```
+//!
+//! The [`wrap`] function returns the individual lines, use [`fill`]
+//! is you want the lines joined with `'\n'` to form a `String`.
+//!
+//! If you enable the `hyphenation` Cargo feature, you can get
+//! automatic hyphenation for a number of languages:
+//!
+//! ```
+//! #[cfg(feature = "hyphenation")] {
+//! use hyphenation::{Language, Load, Standard};
+//! use textwrap::{wrap, Options, WordSplitter};
+//!
+//! let text = "textwrap: a small library for wrapping text.";
+//! let dictionary = Standard::from_embedded(Language::EnglishUS).unwrap();
+//! let options = Options::new(18).word_splitter(WordSplitter::Hyphenation(dictionary));
+//! assert_eq!(wrap(text, &options),
+//!            vec!["textwrap: a small",
+//!                 "library for wrap-",
+//!                 "ping text."]);
+//! }
+//! ```
+//!
+//! See also the [`unfill`] and [`refill`] functions which allow you to
+//! manipulate already wrapped text.
+//!
+//! ## Wrapping Strings at Compile Time
+//!
+//! If your strings are known at compile time, please take a look at
+//! the procedural macros from the [textwrap-macros] crate.
+//!
+//! ## Displayed Width vs Byte Size
+//!
+//! To word wrap text, one must know the width of each word so one can
+//! know when to break lines. This library will by default measure the
+//! width of text using the _displayed width_, not the size in bytes.
+//! The `unicode-width` Cargo feature controls this.
+//!
+//! This is important for non-ASCII text. ASCII characters such as `a`
+//! and `!` are simple and take up one column each. This means that
+//! the displayed width is equal to the string length in bytes.
+//! However, non-ASCII characters and symbols take up more than one
+//! byte when UTF-8 encoded: `é` is `0xc3 0xa9` (two bytes) and `⚙` is
+//! `0xe2 0x9a 0x99` (three bytes) in UTF-8, respectively.
+//!
+//! This is why we take care to use the displayed width instead of the
+//! byte count when computing line lengths. All functions in this
+//! library handle Unicode characters like this when the
+//! `unicode-width` Cargo feature is enabled (it is enabled by
+//! default).
+//!
+//! # Indentation and Dedentation
+//!
+//! The textwrap library also offers functions for adding a prefix to
+//! every line of a string and to remove leading whitespace. As an
+//! example, the [`indent`] function allows you to turn lines of text
+//! into a bullet list:
+//!
+//! ```
+//! let before = "\
+//! foo
+//! bar
+//! baz
+//! ";
+//! let after = "\
+//! * foo
+//! * bar
+//! * baz
+//! ";
+//! assert_eq!(textwrap::indent(before, "* "), after);
+//! ```
+//!
+//! Removing leading whitespace is done with [`dedent`]:
+//!
+//! ```
+//! let before = "
+//!     Some
+//!       indented
+//!         text
+//! ";
+//! let after = "
+//! Some
+//!   indented
+//!     text
+//! ";
+//! assert_eq!(textwrap::dedent(before), after);
+//! ```
+//!
+//! # Cargo Features
+//!
+//! The textwrap library can be slimmed down as needed via a number of
+//! Cargo features. This means you only pay for the features you
+//! actually use.
+//!
+//! The full dependency graph, where dashed lines indicate optional
+//! dependencies, is shown below:
+//!
+//! <img src="https://raw.githubusercontent.com/mgeisler/textwrap/master/images/textwrap-0.16.0.svg">
+//!
+//! ## Default Features
+//!
+//! These features are enabled by default:
+//!
+//! * `unicode-linebreak`: enables finding words using the
+//!   [unicode-linebreak] crate, which implements the line breaking
+//!   algorithm described in [Unicode Standard Annex
+//!   #14](https://www.unicode.org/reports/tr14/).
+//!
+//!   This feature can be disabled if you are happy to find words
+//!   separated by ASCII space characters only. People wrapping text
+//!   with emojis or East-Asian characters will want most likely want
+//!   to enable this feature. See [`WordSeparator`] for details.
+//!
+//! * `unicode-width`: enables correct width computation of non-ASCII
+//!   characters via the [unicode-width] crate. Without this feature,
+//!   every [`char`] is 1 column wide, except for emojis which are 2
+//!   columns wide. See the [`core::display_width`] function for
+//!   details.
+//!
+//!   This feature can be disabled if you only need to wrap ASCII
+//!   text, or if the functions in [`core`] are used directly with
+//!   [`core::Fragment`]s for which the widths have been computed in
+//!   other ways.
+//!
+//! * `smawk`: enables linear-time wrapping of the whole paragraph via
+//!   the [smawk] crate. See the [`wrap_algorithms::wrap_optimal_fit`]
+//!   function for details on the optimal-fit algorithm.
+//!
+//!   This feature can be disabled if you only ever intend to use
+//!   [`wrap_algorithms::wrap_first_fit`].
+//!
+//! <!-- begin binary-sizes -->
+//!
+//! With Rust 1.64.0, the size impact of the above features on your
+//! binary is as follows:
+//!
+//! | Configuration                            |  Binary Size |    Delta |
+//! | :---                                     |         ---: |     ---: |
+//! | quick-and-dirty implementation           |       289 KB |     — KB |
+//! | textwrap without default features        |       305 KB |    16 KB |
+//! | textwrap with smawk                      |       317 KB |    28 KB |
+//! | textwrap with unicode-width              |       309 KB |    20 KB |
+//! | textwrap with unicode-linebreak          |       342 KB |    53 KB |
+//!
+//! <!-- end binary-sizes -->
+//!
+//! The above sizes are the stripped sizes and the binary is compiled
+//! in release mode with this profile:
+//!
+//! ```toml
+//! [profile.release]
+//! lto = true
+//! codegen-units = 1
+//! ```
+//!
+//! See the [binary-sizes demo] if you want to reproduce these
+//! results.
+//!
+//! ## Optional Features
+//!
+//! These Cargo features enable new functionality:
+//!
+//! * `terminal_size`: enables automatic detection of the terminal
+//!   width via the [terminal_size] crate. See the
+//!   [`Options::with_termwidth`] constructor for details.
+//!
+//! * `hyphenation`: enables language-sensitive hyphenation via the
+//!   [hyphenation] crate. See the [`word_splitters::WordSplitter`]
+//!   trait for details.
+//!
+//! [unicode-linebreak]: https://docs.rs/unicode-linebreak/
+//! [unicode-width]: https://docs.rs/unicode-width/
+//! [smawk]: https://docs.rs/smawk/
+//! [binary-sizes demo]: https://github.com/mgeisler/textwrap/tree/master/examples/binary-sizes
+//! [textwrap-macros]: https://docs.rs/textwrap-macros/
+//! [terminal_size]: https://docs.rs/terminal_size/
+//! [hyphenation]: https://docs.rs/hyphenation/
+
+#![doc(html_root_url = "https://docs.rs/textwrap/0.16.0")]
+#![forbid(unsafe_code)] // See https://github.com/mgeisler/textwrap/issues/210
+#![deny(missing_docs)]
+#![deny(missing_debug_implementations)]
+#![allow(clippy::redundant_field_names)]
+
+// Make `cargo test` execute the README doctests.
+#[cfg(doctest)]
+#[doc = include_str!("../README.md")]
+mod readme_doctest {}
+
+use std::borrow::Cow;
+
+mod indentation;
+pub use crate::indentation::{dedent, indent};
+
+mod word_separators;
+pub use word_separators::WordSeparator;
+
+pub mod word_splitters;
+pub use word_splitters::WordSplitter;
+
+pub mod wrap_algorithms;
+pub use wrap_algorithms::WrapAlgorithm;
+
+mod line_ending;
+pub use line_ending::LineEnding;
+
+pub mod core;
+
+// This module is only active when running fuzz tests. It provides
+// access to private helpers.
+#[cfg(fuzzing)]
+pub mod fuzzing;
+
+/// Holds configuration options for wrapping and filling text.
+#[non_exhaustive]
+#[derive(Debug, Clone)]
+pub struct Options<'a> {
+    /// The width in columns at which the text will be wrapped.
+    pub width: usize,
+    /// Line ending used for breaking lines.
+    pub line_ending: LineEnding,
+    /// Indentation used for the first line of output. See the
+    /// [`Options::initial_indent`] method.
+    pub initial_indent: &'a str,
+    /// Indentation used for subsequent lines of output. See the
+    /// [`Options::subsequent_indent`] method.
+    pub subsequent_indent: &'a str,
+    /// Allow long words to be broken if they cannot fit on a line.
+    /// When set to `false`, some lines may be longer than
+    /// `self.width`. See the [`Options::break_words`] method.
+    pub break_words: bool,
+    /// Wrapping algorithm to use, see the implementations of the
+    /// [`wrap_algorithms::WrapAlgorithm`] trait for details.
+    pub wrap_algorithm: WrapAlgorithm,
+    /// The line breaking algorithm to use, see
+    /// [`word_separators::WordSeparator`] trait for an overview and
+    /// possible implementations.
+    pub word_separator: WordSeparator,
+    /// The method for splitting words. This can be used to prohibit
+    /// splitting words on hyphens, or it can be used to implement
+    /// language-aware machine hyphenation.
+    pub word_splitter: WordSplitter,
+}
+
+impl<'a> From<&'a Options<'a>> for Options<'a> {
+    fn from(options: &'a Options<'a>) -> Self {
+        Self {
+            width: options.width,
+            line_ending: options.line_ending,
+            initial_indent: options.initial_indent,
+            subsequent_indent: options.subsequent_indent,
+            break_words: options.break_words,
+            word_separator: options.word_separator,
+            wrap_algorithm: options.wrap_algorithm,
+            word_splitter: options.word_splitter.clone(),
+        }
+    }
+}
+
+impl<'a> From<usize> for Options<'a> {
+    fn from(width: usize) -> Self {
+        Options::new(width)
+    }
+}
+
+impl<'a> Options<'a> {
+    /// Creates a new [`Options`] with the specified width.
+    ///
+    /// The other fields are given default values as follows:
+    ///
+    /// ```
+    /// # use textwrap::{LineEnding, Options, WordSplitter, WordSeparator, WrapAlgorithm};
+    /// # let width = 80;
+    /// let options = Options::new(width);
+    /// assert_eq!(options.line_ending, LineEnding::LF);
+    /// assert_eq!(options.initial_indent, "");
+    /// assert_eq!(options.subsequent_indent, "");
+    /// assert_eq!(options.break_words, true);
+    ///
+    /// #[cfg(feature = "unicode-linebreak")]
+    /// assert_eq!(options.word_separator, WordSeparator::UnicodeBreakProperties);
+    /// #[cfg(not(feature = "unicode-linebreak"))]
+    /// assert_eq!(options.word_separator, WordSeparator::AsciiSpace);
+    ///
+    /// #[cfg(feature = "smawk")]
+    /// assert_eq!(options.wrap_algorithm, WrapAlgorithm::new_optimal_fit());
+    /// #[cfg(not(feature = "smawk"))]
+    /// assert_eq!(options.wrap_algorithm, WrapAlgorithm::FirstFit);
+    ///
+    /// assert_eq!(options.word_splitter, WordSplitter::HyphenSplitter);
+    /// ```
+    ///
+    /// Note that the default word separator and wrap algorithms
+    /// changes based on the available Cargo features. The best
+    /// available algorithms are used by default.
+    pub const fn new(width: usize) -> Self {
+        Options {
+            width,
+            line_ending: LineEnding::LF,
+            initial_indent: "",
+            subsequent_indent: "",
+            break_words: true,
+            word_separator: WordSeparator::new(),
+            wrap_algorithm: WrapAlgorithm::new(),
+            word_splitter: WordSplitter::HyphenSplitter,
+        }
+    }
+
+    /// Creates a new [`Options`] with `width` set to the current
+    /// terminal width. If the terminal width cannot be determined
+    /// (typically because the standard input and output is not
+    /// connected to a terminal), a width of 80 characters will be
+    /// used. Other settings use the same defaults as
+    /// [`Options::new`].
+    ///
+    /// Equivalent to:
+    ///
+    /// ```no_run
+    /// use textwrap::{termwidth, Options};
+    ///
+    /// let options = Options::new(termwidth());
+    /// ```
+    ///
+    /// **Note:** Only available when the `terminal_size` feature is
+    /// enabled.
+    #[cfg(feature = "terminal_size")]
+    pub fn with_termwidth() -> Self {
+        Self::new(termwidth())
+    }
+
+    /// Change [`self.line_ending`]. This specifies which of the
+    /// supported line endings should be used to break the lines of the
+    /// input text.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use textwrap::{refill, LineEnding, Options};
+    ///
+    /// let options = Options::new(15).line_ending(LineEnding::CRLF);
+    /// assert_eq!(refill("This is a little example.", options),
+    ///            "This is a\r\nlittle example.");
+    /// ```
+    ///
+    /// [`self.line_ending`]: #structfield.line_ending
+    pub fn line_ending(self, line_ending: LineEnding) -> Self {
+        Options {
+            line_ending,
+            ..self
+        }
+    }
+
+    /// Change [`self.initial_indent`]. The initial indentation is
+    /// used on the very first line of output.
+    ///
+    /// # Examples
+    ///
+    /// Classic paragraph indentation can be achieved by specifying an
+    /// initial indentation and wrapping each paragraph by itself:
+    ///
+    /// ```
+    /// use textwrap::{wrap, Options};
+    ///
+    /// let options = Options::new(16).initial_indent("    ");
+    /// assert_eq!(wrap("This is a little example.", options),
+    ///            vec!["    This is a",
+    ///                 "little example."]);
+    /// ```
+    ///
+    /// [`self.initial_indent`]: #structfield.initial_indent
+    pub fn initial_indent(self, indent: &'a str) -> Self {
+        Options {
+            initial_indent: indent,
+            ..self
+        }
+    }
+
+    /// Change [`self.subsequent_indent`]. The subsequent indentation
+    /// is used on lines following the first line of output.
+    ///
+    /// # Examples
+    ///
+    /// Combining initial and subsequent indentation lets you format a
+    /// single paragraph as a bullet list:
+    ///
+    /// ```
+    /// use textwrap::{wrap, Options};
+    ///
+    /// let options = Options::new(12)
+    ///     .initial_indent("* ")
+    ///     .subsequent_indent("  ");
+    /// #[cfg(feature = "smawk")]
+    /// assert_eq!(wrap("This is a little example.", options),
+    ///            vec!["* This is",
+    ///                 "  a little",
+    ///                 "  example."]);
+    ///
+    /// // Without the `smawk` feature, the wrapping is a little different:
+    /// #[cfg(not(feature = "smawk"))]
+    /// assert_eq!(wrap("This is a little example.", options),
+    ///            vec!["* This is a",
+    ///                 "  little",
+    ///                 "  example."]);
+    /// ```
+    ///
+    /// [`self.subsequent_indent`]: #structfield.subsequent_indent
+    pub fn subsequent_indent(self, indent: &'a str) -> Self {
+        Options {
+            subsequent_indent: indent,
+            ..self
+        }
+    }
+
+    /// Change [`self.break_words`]. This controls if words longer
+    /// than `self.width` can be broken, or if they will be left
+    /// sticking out into the right margin.
+    ///
+    /// See [`Options::word_splitter`] instead if you want to control
+    /// hyphenation.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use textwrap::{wrap, Options};
+    ///
+    /// let options = Options::new(4).break_words(true);
+    /// assert_eq!(wrap("This is a little example.", options),
+    ///            vec!["This",
+    ///                 "is a",
+    ///                 "litt",
+    ///                 "le",
+    ///                 "exam",
+    ///                 "ple."]);
+    /// ```
+    ///
+    /// [`self.break_words`]: #structfield.break_words
+    pub fn break_words(self, setting: bool) -> Self {
+        Options {
+            break_words: setting,
+            ..self
+        }
+    }
+
+    /// Change [`self.word_separator`].
+    ///
+    /// See [`word_separators::WordSeparator`] for details on the choices.
+    ///
+    /// [`self.word_separator`]: #structfield.word_separator
+    pub fn word_separator(self, word_separator: WordSeparator) -> Options<'a> {
+        Options {
+            width: self.width,
+            line_ending: self.line_ending,
+            initial_indent: self.initial_indent,
+            subsequent_indent: self.subsequent_indent,
+            break_words: self.break_words,
+            word_separator: word_separator,
+            wrap_algorithm: self.wrap_algorithm,
+            word_splitter: self.word_splitter,
+        }
+    }
+
+    /// Change [`self.wrap_algorithm`].
+    ///
+    /// See the [`wrap_algorithms::WrapAlgorithm`] trait for details on
+    /// the choices.
+    ///
+    /// [`self.wrap_algorithm`]: #structfield.wrap_algorithm
+    pub fn wrap_algorithm(self, wrap_algorithm: WrapAlgorithm) -> Options<'a> {
+        Options {
+            width: self.width,
+            line_ending: self.line_ending,
+            initial_indent: self.initial_indent,
+            subsequent_indent: self.subsequent_indent,
+            break_words: self.break_words,
+            word_separator: self.word_separator,
+            wrap_algorithm: wrap_algorithm,
+            word_splitter: self.word_splitter,
+        }
+    }
+
+    /// Change [`self.word_splitter`]. The
+    /// [`word_splitters::WordSplitter`] is used to fit part of a word
+    /// into the current line when wrapping text.
+    ///
+    /// See [`Options::break_words`] instead if you want to control the
+    /// handling of words longer than the line width.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use textwrap::{wrap, Options, WordSplitter};
+    ///
+    /// // The default is WordSplitter::HyphenSplitter.
+    /// let options = Options::new(5);
+    /// assert_eq!(wrap("foo-bar-baz", &options),
+    ///            vec!["foo-", "bar-", "baz"]);
+    ///
+    /// // The word is now so long that break_words kick in:
+    /// let options = Options::new(5)
+    ///     .word_splitter(WordSplitter::NoHyphenation);
+    /// assert_eq!(wrap("foo-bar-baz", &options),
+    ///            vec!["foo-b", "ar-ba", "z"]);
+    ///
+    /// // If you want to breaks at all, disable both:
+    /// let options = Options::new(5)
+    ///     .break_words(false)
+    ///     .word_splitter(WordSplitter::NoHyphenation);
+    /// assert_eq!(wrap("foo-bar-baz", &options),
+    ///            vec!["foo-bar-baz"]);
+    /// ```
+    ///
+    /// [`self.word_splitter`]: #structfield.word_splitter
+    pub fn word_splitter(self, word_splitter: WordSplitter) -> Options<'a> {
+        Options {
+            width: self.width,
+            line_ending: self.line_ending,
+            initial_indent: self.initial_indent,
+            subsequent_indent: self.subsequent_indent,
+            break_words: self.break_words,
+            word_separator: self.word_separator,
+            wrap_algorithm: self.wrap_algorithm,
+            word_splitter,
+        }
+    }
+}
+
+/// Return the current terminal width.
+///
+/// If the terminal width cannot be determined (typically because the
+/// standard output is not connected to a terminal), a default width
+/// of 80 characters will be used.
+///
+/// # Examples
+///
+/// Create an [`Options`] for wrapping at the current terminal width
+/// with a two column margin to the left and the right:
+///
+/// ```no_run
+/// use textwrap::{termwidth, Options};
+///
+/// let width = termwidth() - 4; // Two columns on each side.
+/// let options = Options::new(width)
+///     .initial_indent("  ")
+///     .subsequent_indent("  ");
+/// ```
+///
+/// **Note:** Only available when the `terminal_size` Cargo feature is
+/// enabled.
+#[cfg(feature = "terminal_size")]
+pub fn termwidth() -> usize {
+    terminal_size::terminal_size().map_or(80, |(terminal_size::Width(w), _)| w.into())
+}
+
+/// Fill a line of text at a given width.
+///
+/// The result is a [`String`], complete with newlines between each
+/// line. Use the [`wrap`] function if you need access to the
+/// individual lines.
+///
+/// The easiest way to use this function is to pass an integer for
+/// `width_or_options`:
+///
+/// ```
+/// use textwrap::fill;
+///
+/// assert_eq!(
+///     fill("Memory safety without garbage collection.", 15),
+///     "Memory safety\nwithout garbage\ncollection."
+/// );
+/// ```
+///
+/// If you need to customize the wrapping, you can pass an [`Options`]
+/// instead of an `usize`:
+///
+/// ```
+/// use textwrap::{fill, Options};
+///
+/// let options = Options::new(15)
+///     .initial_indent("- ")
+///     .subsequent_indent("  ");
+/// assert_eq!(
+///     fill("Memory safety without garbage collection.", &options),
+///     "- Memory safety\n  without\n  garbage\n  collection."
+/// );
+/// ```
+pub fn fill<'a, Opt>(text: &str, width_or_options: Opt) -> String
+where
+    Opt: Into<Options<'a>>,
+{
+    let options = width_or_options.into();
+
+    if text.len() < options.width && !text.contains('\n') && options.initial_indent.is_empty() {
+        String::from(text.trim_end_matches(' '))
+    } else {
+        fill_slow_path(text, options)
+    }
+}
+
+/// Slow path for fill.
+///
+/// This is taken when `text` is longer than `options.width`.
+fn fill_slow_path(text: &str, options: Options<'_>) -> String {
+    // This will avoid reallocation in simple cases (no
+    // indentation, no hyphenation).
+    let mut result = String::with_capacity(text.len());
+
+    let line_ending_str = options.line_ending.as_str();
+    for (i, line) in wrap(text, options).iter().enumerate() {
+        if i > 0 {
+            result.push_str(line_ending_str);
+        }
+        result.push_str(line);
+    }
+
+    result
+}
+
+/// Unpack a paragraph of already-wrapped text.
+///
+/// This function attempts to recover the original text from a single
+/// paragraph of text produced by the [`fill`] function. This means
+/// that it turns
+///
+/// ```text
+/// textwrap: a small
+/// library for
+/// wrapping text.
+/// ```
+///
+/// back into
+///
+/// ```text
+/// textwrap: a small library for wrapping text.
+/// ```
+///
+/// In addition, it will recognize a common prefix and a common line
+/// ending among the lines.
+///
+/// The prefix of the first line is returned in
+/// [`Options::initial_indent`] and the prefix (if any) of the the
+/// other lines is returned in [`Options::subsequent_indent`].
+///
+/// Line ending is returned in [`Options::line_ending`]. If line ending
+/// can not be confidently detected (mixed or no line endings in the
+/// input), [`LineEnding::LF`] will be returned.
+///
+/// In addition to `' '`, the prefixes can consist of characters used
+/// for unordered lists (`'-'`, `'+'`, and `'*'`) and block quotes
+/// (`'>'`) in Markdown as well as characters often used for inline
+/// comments (`'#'` and `'/'`).
+///
+/// The text must come from a single wrapped paragraph. This means
+/// that there can be no empty lines (`"\n\n"` or `"\r\n\r\n"`) within
+/// the text. It is unspecified what happens if `unfill` is called on
+/// more than one paragraph of text.
+///
+/// # Examples
+///
+/// ```
+/// use textwrap::{LineEnding, unfill};
+///
+/// let (text, options) = unfill("\
+/// * This is an
+///   example of
+///   a list item.
+/// ");
+///
+/// assert_eq!(text, "This is an example of a list item.\n");
+/// assert_eq!(options.initial_indent, "* ");
+/// assert_eq!(options.subsequent_indent, "  ");
+/// assert_eq!(options.line_ending, LineEnding::LF);
+/// ```
+pub fn unfill(text: &str) -> (String, Options<'_>) {
+    let prefix_chars: &[_] = &[' ', '-', '+', '*', '>', '#', '/'];
+
+    let mut options = Options::new(0);
+    for (idx, line) in text.lines().enumerate() {
+        options.width = std::cmp::max(options.width, core::display_width(line));
+        let without_prefix = line.trim_start_matches(prefix_chars);
+        let prefix = &line[..line.len() - without_prefix.len()];
+
+        if idx == 0 {
+            options.initial_indent = prefix;
+        } else if idx == 1 {
+            options.subsequent_indent = prefix;
+        } else if idx > 1 {
+            for ((idx, x), y) in prefix.char_indices().zip(options.subsequent_indent.chars()) {
+                if x != y {
+                    options.subsequent_indent = &prefix[..idx];
+                    break;
+                }
+            }
+            if prefix.len() < options.subsequent_indent.len() {
+                options.subsequent_indent = prefix;
+            }
+        }
+    }
+
+    let mut unfilled = String::with_capacity(text.len());
+    let mut detected_line_ending = None;
+
+    for (idx, (line, ending)) in line_ending::NonEmptyLines(text).enumerate() {
+        if idx == 0 {
+            unfilled.push_str(&line[options.initial_indent.len()..]);
+        } else {
+            unfilled.push(' ');
+            unfilled.push_str(&line[options.subsequent_indent.len()..]);
+        }
+        match (detected_line_ending, ending) {
+            (None, Some(_)) => detected_line_ending = ending,
+            (Some(LineEnding::CRLF), Some(LineEnding::LF)) => detected_line_ending = ending,
+            _ => (),
+        }
+    }
+
+    // Add back a line ending if `text` ends with the one we detect.
+    if let Some(line_ending) = detected_line_ending {
+        if text.ends_with(line_ending.as_str()) {
+            unfilled.push_str(line_ending.as_str());
+        }
+    }
+
+    options.line_ending = detected_line_ending.unwrap_or(LineEnding::LF);
+    (unfilled, options)
+}
+
+/// Refill a paragraph of wrapped text with a new width.
+///
+/// This function will first use the [`unfill`] function to remove
+/// newlines from the text. Afterwards the text is filled again using
+/// the [`fill`] function.
+///
+/// The `new_width_or_options` argument specify the new width and can
+/// specify other options as well — except for
+/// [`Options::initial_indent`] and [`Options::subsequent_indent`],
+/// which are deduced from `filled_text`.
+///
+/// # Examples
+///
+/// ```
+/// use textwrap::refill;
+///
+/// // Some loosely wrapped text. The "> " prefix is recognized automatically.
+/// let text = "\
+/// > Memory
+/// > safety without garbage
+/// > collection.
+/// ";
+///
+/// assert_eq!(refill(text, 20), "\
+/// > Memory safety
+/// > without garbage
+/// > collection.
+/// ");
+///
+/// assert_eq!(refill(text, 40), "\
+/// > Memory safety without garbage
+/// > collection.
+/// ");
+///
+/// assert_eq!(refill(text, 60), "\
+/// > Memory safety without garbage collection.
+/// ");
+/// ```
+///
+/// You can also reshape bullet points:
+///
+/// ```
+/// use textwrap::refill;
+///
+/// let text = "\
+/// - This is my
+///   list item.
+/// ";
+///
+/// assert_eq!(refill(text, 20), "\
+/// - This is my list
+///   item.
+/// ");
+/// ```
+pub fn refill<'a, Opt>(filled_text: &str, new_width_or_options: Opt) -> String
+where
+    Opt: Into<Options<'a>>,
+{
+    let mut new_options = new_width_or_options.into();
+    let (text, options) = unfill(filled_text);
+    // The original line ending is kept by `unfill`.
+    let stripped = text.strip_suffix(options.line_ending.as_str());
+    let new_line_ending = new_options.line_ending.as_str();
+
+    new_options.initial_indent = options.initial_indent;
+    new_options.subsequent_indent = options.subsequent_indent;
+    let mut refilled = fill(stripped.unwrap_or(&text), new_options);
+
+    // Add back right line ending if we stripped one off above.
+    if stripped.is_some() {
+        refilled.push_str(new_line_ending);
+    }
+    refilled
+}
+
+/// Wrap a line of text at a given width.
+///
+/// The result is a vector of lines, each line is of type [`Cow<'_,
+/// str>`](Cow), which means that the line will borrow from the input
+/// `&str` if possible. The lines do not have trailing whitespace,
+/// including a final `'\n'`. Please use the [`fill`] function if you
+/// need a [`String`] instead.
+///
+/// The easiest way to use this function is to pass an integer for
+/// `width_or_options`:
+///
+/// ```
+/// use textwrap::wrap;
+///
+/// let lines = wrap("Memory safety without garbage collection.", 15);
+/// assert_eq!(lines, &[
+///     "Memory safety",
+///     "without garbage",
+///     "collection.",
+/// ]);
+/// ```
+///
+/// If you need to customize the wrapping, you can pass an [`Options`]
+/// instead of an `usize`:
+///
+/// ```
+/// use textwrap::{wrap, Options};
+///
+/// let options = Options::new(15)
+///     .initial_indent("- ")
+///     .subsequent_indent("  ");
+/// let lines = wrap("Memory safety without garbage collection.", &options);
+/// assert_eq!(lines, &[
+///     "- Memory safety",
+///     "  without",
+///     "  garbage",
+///     "  collection.",
+/// ]);
+/// ```
+///
+/// # Optimal-Fit Wrapping
+///
+/// By default, `wrap` will try to ensure an even right margin by
+/// finding breaks which avoid short lines. We call this an
+/// “optimal-fit algorithm” since the line breaks are computed by
+/// considering all possible line breaks. The alternative is a
+/// “first-fit algorithm” which simply accumulates words until they no
+/// longer fit on the line.
+///
+/// As an example, using the first-fit algorithm to wrap the famous
+/// Hamlet quote “To be, or not to be: that is the question” in a
+/// narrow column with room for only 10 characters looks like this:
+///
+/// ```
+/// # use textwrap::{WrapAlgorithm::FirstFit, Options, wrap};
+/// #
+/// # let lines = wrap("To be, or not to be: that is the question",
+/// #                  Options::new(10).wrap_algorithm(FirstFit));
+/// # assert_eq!(lines.join("\n") + "\n", "\
+/// To be, or
+/// not to be:
+/// that is
+/// the
+/// question
+/// # ");
+/// ```
+///
+/// Notice how the second to last line is quite narrow because
+/// “question” was too large to fit? The greedy first-fit algorithm
+/// doesn’t look ahead, so it has no other option than to put
+/// “question” onto its own line.
+///
+/// With the optimal-fit wrapping algorithm, the previous lines are
+/// shortened slightly in order to make the word “is” go into the
+/// second last line:
+///
+/// ```
+/// # #[cfg(feature = "smawk")] {
+/// # use textwrap::{Options, WrapAlgorithm, wrap};
+/// #
+/// # let lines = wrap(
+/// #     "To be, or not to be: that is the question",
+/// #     Options::new(10).wrap_algorithm(WrapAlgorithm::new_optimal_fit())
+/// # );
+/// # assert_eq!(lines.join("\n") + "\n", "\
+/// To be,
+/// or not to
+/// be: that
+/// is the
+/// question
+/// # "); }
+/// ```
+///
+/// Please see [`WrapAlgorithm`] for details on the choices.
+///
+/// # Examples
+///
+/// The returned iterator yields lines of type `Cow<'_, str>`. If
+/// possible, the wrapped lines will borrow from the input string. As
+/// an example, a hanging indentation, the first line can borrow from
+/// the input, but the subsequent lines become owned strings:
+///
+/// ```
+/// use std::borrow::Cow::{Borrowed, Owned};
+/// use textwrap::{wrap, Options};
+///
+/// let options = Options::new(15).subsequent_indent("....");
+/// let lines = wrap("Wrapping text all day long.", &options);
+/// let annotated = lines
+///     .iter()
+///     .map(|line| match line {
+///         Borrowed(text) => format!("[Borrowed] {}", text),
+///         Owned(text) => format!("[Owned]    {}", text),
+///     })
+///     .collect::<Vec<_>>();
+/// assert_eq!(
+///     annotated,
+///     &[
+///         "[Borrowed] Wrapping text",
+///         "[Owned]    ....all day",
+///         "[Owned]    ....long.",
+///     ]
+/// );
+/// ```
+///
+/// ## Leading and Trailing Whitespace
+///
+/// As a rule, leading whitespace (indentation) is preserved and
+/// trailing whitespace is discarded.
+///
+/// In more details, when wrapping words into lines, words are found
+/// by splitting the input text on space characters. One or more
+/// spaces (shown here as “␣”) are attached to the end of each word:
+///
+/// ```text
+/// "Foo␣␣␣bar␣baz" -> ["Foo␣␣␣", "bar␣", "baz"]
+/// ```
+///
+/// These words are then put into lines. The interword whitespace is
+/// preserved, unless the lines are wrapped so that the `"Foo␣␣␣"`
+/// word falls at the end of a line:
+///
+/// ```
+/// use textwrap::wrap;
+///
+/// assert_eq!(wrap("Foo   bar baz", 10), vec!["Foo   bar", "baz"]);
+/// assert_eq!(wrap("Foo   bar baz", 8), vec!["Foo", "bar baz"]);
+/// ```
+///
+/// Notice how the trailing whitespace is removed in both case: in the
+/// first example, `"bar␣"` becomes `"bar"` and in the second case
+/// `"Foo␣␣␣"` becomes `"Foo"`.
+///
+/// Leading whitespace is preserved when the following word fits on
+/// the first line. To understand this, consider how words are found
+/// in a text with leading spaces:
+///
+/// ```text
+/// "␣␣foo␣bar" -> ["␣␣", "foo␣", "bar"]
+/// ```
+///
+/// When put into lines, the indentation is preserved if `"foo"` fits
+/// on the first line, otherwise you end up with an empty line:
+///
+/// ```
+/// use textwrap::wrap;
+///
+/// assert_eq!(wrap("  foo bar", 8), vec!["  foo", "bar"]);
+/// assert_eq!(wrap("  foo bar", 4), vec!["", "foo", "bar"]);
+/// ```
+pub fn wrap<'a, Opt>(text: &str, width_or_options: Opt) -> Vec<Cow<'_, str>>
+where
+    Opt: Into<Options<'a>>,
+{
+    let options: Options = width_or_options.into();
+    let line_ending_str = options.line_ending.as_str();
+
+    let mut lines = Vec::new();
+    for line in text.split(line_ending_str) {
+        wrap_single_line(line, &options, &mut lines);
+    }
+
+    lines
+}
+
+fn wrap_single_line<'a>(line: &'a str, options: &Options<'_>, lines: &mut Vec<Cow<'a, str>>) {
+    let indent = if lines.is_empty() {
+        options.initial_indent
+    } else {
+        options.subsequent_indent
+    };
+    if line.len() < options.width && indent.is_empty() {
+        lines.push(Cow::from(line.trim_end_matches(' ')));
+    } else {
+        wrap_single_line_slow_path(line, options, lines)
+    }
+}
+
+/// Wrap a single line of text.
+///
+/// This is taken when `line` is longer than `options.width`.
+fn wrap_single_line_slow_path<'a>(
+    line: &'a str,
+    options: &Options<'_>,
+    lines: &mut Vec<Cow<'a, str>>,
+) {
+    let initial_width = options
+        .width
+        .saturating_sub(core::display_width(options.initial_indent));
+    let subsequent_width = options
+        .width
+        .saturating_sub(core::display_width(options.subsequent_indent));
+    let line_widths = [initial_width, subsequent_width];
+
+    let words = options.word_separator.find_words(line);
+    let split_words = word_splitters::split_words(words, &options.word_splitter);
+    let broken_words = if options.break_words {
+        let mut broken_words = core::break_words(split_words, line_widths[1]);
+        if !options.initial_indent.is_empty() {
+            // Without this, the first word will always go into the
+            // first line. However, since we break words based on the
+            // _second_ line width, it can be wrong to unconditionally
+            // put the first word onto the first line. An empty
+            // zero-width word fixed this.
+            broken_words.insert(0, core::Word::from(""));
+        }
+        broken_words
+    } else {
+        split_words.collect::<Vec<_>>()
+    };
+
+    let wrapped_words = options.wrap_algorithm.wrap(&broken_words, &line_widths);
+
+    let mut idx = 0;
+    for words in wrapped_words {
+        let last_word = match words.last() {
+            None => {
+                lines.push(Cow::from(""));
+                continue;
+            }
+            Some(word) => word,
+        };
+
+        // We assume here that all words are contiguous in `line`.
+        // That is, the sum of their lengths should add up to the
+        // length of `line`.
+        let len = words
+            .iter()
+            .map(|word| word.len() + word.whitespace.len())
+            .sum::<usize>()
+            - last_word.whitespace.len();
+
+        // The result is owned if we have indentation, otherwise we
+        // can simply borrow an empty string.
+        let mut result = if lines.is_empty() && !options.initial_indent.is_empty() {
+            Cow::Owned(options.initial_indent.to_owned())
+        } else if !lines.is_empty() && !options.subsequent_indent.is_empty() {
+            Cow::Owned(options.subsequent_indent.to_owned())
+        } else {
+            // We can use an empty string here since string
+            // concatenation for `Cow` preserves a borrowed value when
+            // either side is empty.
+            Cow::from("")
+        };
+
+        result += &line[idx..idx + len];
+
+        if !last_word.penalty.is_empty() {
+            result.to_mut().push_str(last_word.penalty);
+        }
+
+        lines.push(result);
+
+        // Advance by the length of `result`, plus the length of
+        // `last_word.whitespace` -- even if we had a penalty, we need
+        // to skip over the whitespace.
+        idx += len + last_word.whitespace.len();
+    }
+}
+
+/// Wrap text into columns with a given total width.
+///
+/// The `left_gap`, `middle_gap` and `right_gap` arguments specify the
+/// strings to insert before, between, and after the columns. The
+/// total width of all columns and all gaps is specified using the
+/// `total_width_or_options` argument. This argument can simply be an
+/// integer if you want to use default settings when wrapping, or it
+/// can be a [`Options`] value if you want to customize the wrapping.
+///
+/// If the columns are narrow, it is recommended to set
+/// [`Options::break_words`] to `true` to prevent words from
+/// protruding into the margins.
+///
+/// The per-column width is computed like this:
+///
+/// ```
+/// # let (left_gap, middle_gap, right_gap) = ("", "", "");
+/// # let columns = 2;
+/// # let options = textwrap::Options::new(80);
+/// let inner_width = options.width
+///     - textwrap::core::display_width(left_gap)
+///     - textwrap::core::display_width(right_gap)
+///     - textwrap::core::display_width(middle_gap) * (columns - 1);
+/// let column_width = inner_width / columns;
+/// ```
+///
+/// The `text` is wrapped using [`wrap`] and the given `options`
+/// argument, but the width is overwritten to the computed
+/// `column_width`.
+///
+/// # Panics
+///
+/// Panics if `columns` is zero.
+///
+/// # Examples
+///
+/// ```
+/// use textwrap::wrap_columns;
+///
+/// let text = "\
+/// This is an example text, which is wrapped into three columns. \
+/// Notice how the final column can be shorter than the others.";
+///
+/// #[cfg(feature = "smawk")]
+/// assert_eq!(wrap_columns(text, 3, 50, "| ", " | ", " |"),
+///            vec!["| This is       | into three    | column can be  |",
+///                 "| an example    | columns.      | shorter than   |",
+///                 "| text, which   | Notice how    | the others.    |",
+///                 "| is wrapped    | the final     |                |"]);
+///
+/// // Without the `smawk` feature, the middle column is a little more uneven:
+/// #[cfg(not(feature = "smawk"))]
+/// assert_eq!(wrap_columns(text, 3, 50, "| ", " | ", " |"),
+///            vec!["| This is an    | three         | column can be  |",
+///                 "| example text, | columns.      | shorter than   |",
+///                 "| which is      | Notice how    | the others.    |",
+///                 "| wrapped into  | the final     |                |"]);
+pub fn wrap_columns<'a, Opt>(
+    text: &str,
+    columns: usize,
+    total_width_or_options: Opt,
+    left_gap: &str,
+    middle_gap: &str,
+    right_gap: &str,
+) -> Vec<String>
+where
+    Opt: Into<Options<'a>>,
+{
+    assert!(columns > 0);
+
+    let mut options: Options = total_width_or_options.into();
+
+    let inner_width = options
+        .width
+        .saturating_sub(core::display_width(left_gap))
+        .saturating_sub(core::display_width(right_gap))
+        .saturating_sub(core::display_width(middle_gap) * (columns - 1));
+
+    let column_width = std::cmp::max(inner_width / columns, 1);
+    options.width = column_width;
+    let last_column_padding = " ".repeat(inner_width % column_width);
+    let wrapped_lines = wrap(text, options);
+    let lines_per_column =
+        wrapped_lines.len() / columns + usize::from(wrapped_lines.len() % columns > 0);
+    let mut lines = Vec::new();
+    for line_no in 0..lines_per_column {
+        let mut line = String::from(left_gap);
+        for column_no in 0..columns {
+            match wrapped_lines.get(line_no + column_no * lines_per_column) {
+                Some(column_line) => {
+                    line.push_str(column_line);
+                    line.push_str(&" ".repeat(column_width - core::display_width(column_line)));
+                }
+                None => {
+                    line.push_str(&" ".repeat(column_width));
+                }
+            }
+            if column_no == columns - 1 {
+                line.push_str(&last_column_padding);
+            } else {
+                line.push_str(middle_gap);
+            }
+        }
+        line.push_str(right_gap);
+        lines.push(line);
+    }
+
+    lines
+}
+
+/// Fill `text` in-place without reallocating the input string.
+///
+/// This function works by modifying the input string: some `' '`
+/// characters will be replaced by `'\n'` characters. The rest of the
+/// text remains untouched.
+///
+/// Since we can only replace existing whitespace in the input with
+/// `'\n'` (there is no space for `"\r\n"`), we cannot do hyphenation
+/// nor can we split words longer than the line width. We also need to
+/// use `AsciiSpace` as the word separator since we need `' '`
+/// characters between words in order to replace some of them with a
+/// `'\n'`. Indentation is also ruled out. In other words,
+/// `fill_inplace(width)` behaves as if you had called [`fill`] with
+/// these options:
+///
+/// ```
+/// # use textwrap::{core, LineEnding, Options, WordSplitter, WordSeparator, WrapAlgorithm};
+/// # let width = 80;
+/// Options::new(width)
+///     .break_words(false)
+///     .line_ending(LineEnding::LF)
+///     .word_separator(WordSeparator::AsciiSpace)
+///     .wrap_algorithm(WrapAlgorithm::FirstFit)
+///     .word_splitter(WordSplitter::NoHyphenation);
+/// ```
+///
+/// The wrap algorithm is [`WrapAlgorithm::FirstFit`] since this
+/// is the fastest algorithm — and the main reason to use
+/// `fill_inplace` is to get the string broken into newlines as fast
+/// as possible.
+///
+/// A last difference is that (unlike [`fill`]) `fill_inplace` can
+/// leave trailing whitespace on lines. This is because we wrap by
+/// inserting a `'\n'` at the final whitespace in the input string:
+///
+/// ```
+/// let mut text = String::from("Hello   World!");
+/// textwrap::fill_inplace(&mut text, 10);
+/// assert_eq!(text, "Hello  \nWorld!");
+/// ```
+///
+/// If we didn't do this, the word `World!` would end up being
+/// indented. You can avoid this if you make sure that your input text
+/// has no double spaces.
+///
+/// # Performance
+///
+/// In benchmarks, `fill_inplace` is about twice as fast as [`fill`].
+/// Please see the [`linear`
+/// benchmark](https://github.com/mgeisler/textwrap/blob/master/benchmarks/linear.rs)
+/// for details.
+pub fn fill_inplace(text: &mut String, width: usize) {
+    let mut indices = Vec::new();
+
+    let mut offset = 0;
+    for line in text.split('\n') {
+        let words = WordSeparator::AsciiSpace
+            .find_words(line)
+            .collect::<Vec<_>>();
+        let wrapped_words = wrap_algorithms::wrap_first_fit(&words, &[width as f64]);
+
+        let mut line_offset = offset;
+        for words in &wrapped_words[..wrapped_words.len() - 1] {
+            let line_len = words
+                .iter()
+                .map(|word| word.len() + word.whitespace.len())
+                .sum::<usize>();
+
+            line_offset += line_len;
+            // We've advanced past all ' ' characters -- want to move
+            // one ' ' backwards and insert our '\n' there.
+            indices.push(line_offset - 1);
+        }
+
+        // Advance past entire line, plus the '\n' which was removed
+        // by the split call above.
+        offset += line.len() + 1;
+    }
+
+    let mut bytes = std::mem::take(text).into_bytes();
+    for idx in indices {
+        bytes[idx] = b'\n';
+    }
+    *text = String::from_utf8(bytes).unwrap();
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[cfg(feature = "hyphenation")]
+    use hyphenation::{Language, Load, Standard};
+
+    #[test]
+    fn options_agree_with_usize() {
+        let opt_usize = Options::from(42_usize);
+        let opt_options = Options::new(42);
+
+        assert_eq!(opt_usize.width, opt_options.width);
+        assert_eq!(opt_usize.initial_indent, opt_options.initial_indent);
+        assert_eq!(opt_usize.subsequent_indent, opt_options.subsequent_indent);
+        assert_eq!(opt_usize.break_words, opt_options.break_words);
+        assert_eq!(
+            opt_usize.word_splitter.split_points("hello-world"),
+            opt_options.word_splitter.split_points("hello-world")
+        );
+    }
+
+    #[test]
+    fn no_wrap() {
+        assert_eq!(wrap("foo", 10), vec!["foo"]);
+    }
+
+    #[test]
+    fn wrap_simple() {
+        assert_eq!(wrap("foo bar baz", 5), vec!["foo", "bar", "baz"]);
+    }
+
+    #[test]
+    fn to_be_or_not() {
+        assert_eq!(
+            wrap(
+                "To be, or not to be, that is the question.",
+                Options::new(10).wrap_algorithm(WrapAlgorithm::FirstFit)
+            ),
+            vec!["To be, or", "not to be,", "that is", "the", "question."]
+        );
+    }
+
+    #[test]
+    fn multiple_words_on_first_line() {
+        assert_eq!(wrap("foo bar baz", 10), vec!["foo bar", "baz"]);
+    }
+
+    #[test]
+    fn long_word() {
+        assert_eq!(wrap("foo", 0), vec!["f", "o", "o"]);
+    }
+
+    #[test]
+    fn long_words() {
+        assert_eq!(wrap("foo bar", 0), vec!["f", "o", "o", "b", "a", "r"]);
+    }
+
+    #[test]
+    fn max_width() {
+        assert_eq!(wrap("foo bar", usize::MAX), vec!["foo bar"]);
+
+        let text = "Hello there! This is some English text. \
+                    It should not be wrapped given the extents below.";
+        assert_eq!(wrap(text, usize::MAX), vec![text]);
+    }
+
+    #[test]
+    fn leading_whitespace() {
+        assert_eq!(wrap("  foo bar", 6), vec!["  foo", "bar"]);
+    }
+
+    #[test]
+    fn leading_whitespace_empty_first_line() {
+        // If there is no space for the first word, the first line
+        // will be empty. This is because the string is split into
+        // words like [" ", "foobar ", "baz"], which puts "foobar " on
+        // the second line. We never output trailing whitespace
+        assert_eq!(wrap(" foobar baz", 6), vec!["", "foobar", "baz"]);
+    }
+
+    #[test]
+    fn trailing_whitespace() {
+        // Whitespace is only significant inside a line. After a line
+        // gets too long and is broken, the first word starts in
+        // column zero and is not indented.
+        assert_eq!(wrap("foo     bar     baz  ", 5), vec!["foo", "bar", "baz"]);
+    }
+
+    #[test]
+    fn issue_99() {
+        // We did not reset the in_whitespace flag correctly and did
+        // not handle single-character words after a line break.
+        assert_eq!(
+            wrap("aaabbbccc x yyyzzzwww", 9),
+            vec!["aaabbbccc", "x", "yyyzzzwww"]
+        );
+    }
+
+    #[test]
+    fn issue_129() {
+        // The dash is an em-dash which takes up four bytes. We used
+        // to panic since we tried to index into the character.
+        let options = Options::new(1).word_separator(WordSeparator::AsciiSpace);
+        assert_eq!(wrap("x – x", options), vec!["x", "–", "x"]);
+    }
+
+    #[test]
+    fn wide_character_handling() {
+        assert_eq!(wrap("Hello, World!", 15), vec!["Hello, World!"]);
+        assert_eq!(
+            wrap(
+                "Hello, World!",
+                Options::new(15).word_separator(WordSeparator::AsciiSpace)
+            ),
+            vec!["Hello,", "World!"]
+        );
+
+        // Wide characters are allowed to break if the
+        // unicode-linebreak feature is enabled.
+        #[cfg(feature = "unicode-linebreak")]
+        assert_eq!(
+            wrap(
+                "Hello, World!",
+                Options::new(15).word_separator(WordSeparator::UnicodeBreakProperties),
+            ),
+            vec!["Hello, W", "orld!"]
+        );
+    }
+
+    #[test]
+    fn empty_line_is_indented() {
+        // Previously, indentation was not applied to empty lines.
+        // However, this is somewhat inconsistent and undesirable if
+        // the indentation is something like a border ("| ") which you
+        // want to apply to all lines, empty or not.
+        let options = Options::new(10).initial_indent("!!!");
+        assert_eq!(fill("", &options), "!!!");
+    }
+
+    #[test]
+    fn indent_single_line() {
+        let options = Options::new(10).initial_indent(">>>"); // No trailing space
+        assert_eq!(fill("foo", &options), ">>>foo");
+    }
+
+    #[test]
+    fn indent_first_emoji() {
+        let options = Options::new(10).initial_indent("👉👉");
+        assert_eq!(
+            wrap("x x x x x x x x x x x x x", &options),
+            vec!["👉👉x x x", "x x x x x", "x x x x x"]
+        );
+    }
+
+    #[test]
+    fn indent_multiple_lines() {
+        let options = Options::new(6).initial_indent("* ").subsequent_indent("  ");
+        assert_eq!(
+            wrap("foo bar baz", &options),
+            vec!["* foo", "  bar", "  baz"]
+        );
+    }
+
+    #[test]
+    fn only_initial_indent_multiple_lines() {
+        let options = Options::new(10).initial_indent("  ");
+        assert_eq!(wrap("foo\nbar\nbaz", &options), vec!["  foo", "bar", "baz"]);
+    }
+
+    #[test]
+    fn only_subsequent_indent_multiple_lines() {
+        let options = Options::new(10).subsequent_indent("  ");
+        assert_eq!(
+            wrap("foo\nbar\nbaz", &options),
+            vec!["foo", "  bar", "  baz"]
+        );
+    }
+
+    #[test]
+    fn indent_break_words() {
+        let options = Options::new(5).initial_indent("* ").subsequent_indent("  ");
+        assert_eq!(wrap("foobarbaz", &options), vec!["* foo", "  bar", "  baz"]);
+    }
+
+    #[test]
+    fn initial_indent_break_words() {
+        // This is a corner-case showing how the long word is broken
+        // according to the width of the subsequent lines. The first
+        // fragment of the word no longer fits on the first line,
+        // which ends up being pure indentation.
+        let options = Options::new(5).initial_indent("-->");
+        assert_eq!(wrap("foobarbaz", &options), vec!["-->", "fooba", "rbaz"]);
+    }
+
+    #[test]
+    fn hyphens() {
+        assert_eq!(wrap("foo-bar", 5), vec!["foo-", "bar"]);
+    }
+
+    #[test]
+    fn trailing_hyphen() {
+        let options = Options::new(5).break_words(false);
+        assert_eq!(wrap("foobar-", &options), vec!["foobar-"]);
+    }
+
+    #[test]
+    fn multiple_hyphens() {
+        assert_eq!(wrap("foo-bar-baz", 5), vec!["foo-", "bar-", "baz"]);
+    }
+
+    #[test]
+    fn hyphens_flag() {
+        let options = Options::new(5).break_words(false);
+        assert_eq!(
+            wrap("The --foo-bar flag.", &options),
+            vec!["The", "--foo-", "bar", "flag."]
+        );
+    }
+
+    #[test]
+    fn repeated_hyphens() {
+        let options = Options::new(4).break_words(false);
+        assert_eq!(wrap("foo--bar", &options), vec!["foo--bar"]);
+    }
+
+    #[test]
+    fn hyphens_alphanumeric() {
+        assert_eq!(wrap("Na2-CH4", 5), vec!["Na2-", "CH4"]);
+    }
+
+    #[test]
+    fn hyphens_non_alphanumeric() {
+        let options = Options::new(5).break_words(false);
+        assert_eq!(wrap("foo(-)bar", &options), vec!["foo(-)bar"]);
+    }
+
+    #[test]
+    fn multiple_splits() {
+        assert_eq!(wrap("foo-bar-baz", 9), vec!["foo-bar-", "baz"]);
+    }
+
+    #[test]
+    fn forced_split() {
+        let options = Options::new(5).break_words(false);
+        assert_eq!(wrap("foobar-baz", &options), vec!["foobar-", "baz"]);
+    }
+
+    #[test]
+    fn multiple_unbroken_words_issue_193() {
+        let options = Options::new(3).break_words(false);
+        assert_eq!(
+            wrap("small large tiny", &options),
+            vec!["small", "large", "tiny"]
+        );
+        assert_eq!(
+            wrap("small  large   tiny", &options),
+            vec!["small", "large", "tiny"]
+        );
+    }
+
+    #[test]
+    fn very_narrow_lines_issue_193() {
+        let options = Options::new(1).break_words(false);
+        assert_eq!(wrap("fooo x y", &options), vec!["fooo", "x", "y"]);
+        assert_eq!(wrap("fooo   x     y", &options), vec!["fooo", "x", "y"]);
+    }
+
+    #[test]
+    fn simple_hyphens() {
+        let options = Options::new(8).word_splitter(WordSplitter::HyphenSplitter);
+        assert_eq!(wrap("foo bar-baz", &options), vec!["foo bar-", "baz"]);
+    }
+
+    #[test]
+    fn no_hyphenation() {
+        let options = Options::new(8).word_splitter(WordSplitter::NoHyphenation);
+        assert_eq!(wrap("foo bar-baz", &options), vec!["foo", "bar-baz"]);
+    }
+
+    #[test]
+    #[cfg(feature = "hyphenation")]
+    fn auto_hyphenation_double_hyphenation() {
+        let dictionary = Standard::from_embedded(Language::EnglishUS).unwrap();
+        let options = Options::new(10);
+        assert_eq!(
+            wrap("Internationalization", &options),
+            vec!["Internatio", "nalization"]
+        );
+
+        let options = Options::new(10).word_splitter(WordSplitter::Hyphenation(dictionary));
+        assert_eq!(
+            wrap("Internationalization", &options),
+            vec!["Interna-", "tionaliza-", "tion"]
+        );
+    }
+
+    #[test]
+    #[cfg(feature = "hyphenation")]
+    fn auto_hyphenation_issue_158() {
+        let dictionary = Standard::from_embedded(Language::EnglishUS).unwrap();
+        let options = Options::new(10);
+        assert_eq!(
+            wrap("participation is the key to success", &options),
+            vec!["participat", "ion is", "the key to", "success"]
+        );
+
+        let options = Options::new(10).word_splitter(WordSplitter::Hyphenation(dictionary));
+        assert_eq!(
+            wrap("participation is the key to success", &options),
+            vec!["partici-", "pation is", "the key to", "success"]
+        );
+    }
+
+    #[test]
+    #[cfg(feature = "hyphenation")]
+    fn split_len_hyphenation() {
+        // Test that hyphenation takes the width of the whitespace
+        // into account.
+        let dictionary = Standard::from_embedded(Language::EnglishUS).unwrap();
+        let options = Options::new(15).word_splitter(WordSplitter::Hyphenation(dictionary));
+        assert_eq!(
+            wrap("garbage   collection", &options),
+            vec!["garbage   col-", "lection"]
+        );
+    }
+
+    #[test]
+    #[cfg(feature = "hyphenation")]
+    fn borrowed_lines() {
+        // Lines that end with an extra hyphen are owned, the final
+        // line is borrowed.
+        use std::borrow::Cow::{Borrowed, Owned};
+        let dictionary = Standard::from_embedded(Language::EnglishUS).unwrap();
+        let options = Options::new(10).word_splitter(WordSplitter::Hyphenation(dictionary));
+        let lines = wrap("Internationalization", &options);
+        assert_eq!(lines, vec!["Interna-", "tionaliza-", "tion"]);
+        if let Borrowed(s) = lines[0] {
+            assert!(false, "should not have been borrowed: {:?}", s);
+        }
+        if let Borrowed(s) = lines[1] {
+            assert!(false, "should not have been borrowed: {:?}", s);
+        }
+        if let Owned(ref s) = lines[2] {
+            assert!(false, "should not have been owned: {:?}", s);
+        }
+    }
+
+    #[test]
+    #[cfg(feature = "hyphenation")]
+    fn auto_hyphenation_with_hyphen() {
+        let dictionary = Standard::from_embedded(Language::EnglishUS).unwrap();
+        let options = Options::new(8).break_words(false);
+        assert_eq!(
+            wrap("over-caffinated", &options),
+            vec!["over-", "caffinated"]
+        );
+
+        let options = options.word_splitter(WordSplitter::Hyphenation(dictionary));
+        assert_eq!(
+            wrap("over-caffinated", &options),
+            vec!["over-", "caffi-", "nated"]
+        );
+    }
+
+    #[test]
+    fn break_words() {
+        assert_eq!(wrap("foobarbaz", 3), vec!["foo", "bar", "baz"]);
+    }
+
+    #[test]
+    fn break_words_wide_characters() {
+        // Even the poor man's version of `ch_width` counts these
+        // characters as wide.
+        let options = Options::new(5).word_separator(WordSeparator::AsciiSpace);
+        assert_eq!(wrap("Hello", options), vec!["He", "ll", "o"]);
+    }
+
+    #[test]
+    fn break_words_zero_width() {
+        assert_eq!(wrap("foobar", 0), vec!["f", "o", "o", "b", "a", "r"]);
+    }
+
+    #[test]
+    fn break_long_first_word() {
+        assert_eq!(wrap("testx y", 4), vec!["test", "x y"]);
+    }
+
+    #[test]
+    fn break_words_line_breaks() {
+        assert_eq!(fill("ab\ncdefghijkl", 5), "ab\ncdefg\nhijkl");
+        assert_eq!(fill("abcdefgh\nijkl", 5), "abcde\nfgh\nijkl");
+    }
+
+    #[test]
+    fn break_words_empty_lines() {
+        assert_eq!(
+            fill("foo\nbar", &Options::new(2).break_words(false)),
+            "foo\nbar"
+        );
+    }
+
+    #[test]
+    fn preserve_line_breaks() {
+        assert_eq!(fill("", 80), "");
+        assert_eq!(fill("\n", 80), "\n");
+        assert_eq!(fill("\n\n\n", 80), "\n\n\n");
+        assert_eq!(fill("test\n", 80), "test\n");
+        assert_eq!(fill("test\n\na\n\n", 80), "test\n\na\n\n");
+        assert_eq!(
+            fill(
+                "1 3 5 7\n1 3 5 7",
+                Options::new(7).wrap_algorithm(WrapAlgorithm::FirstFit)
+            ),
+            "1 3 5 7\n1 3 5 7"
+        );
+        assert_eq!(
+            fill(
+                "1 3 5 7\n1 3 5 7",
+                Options::new(5).wrap_algorithm(WrapAlgorithm::FirstFit)
+            ),
+            "1 3 5\n7\n1 3 5\n7"
+        );
+    }
+
+    #[test]
+    fn preserve_line_breaks_with_whitespace() {
+        assert_eq!(fill("  ", 80), "");
+        assert_eq!(fill("  \n  ", 80), "\n");
+        assert_eq!(fill("  \n \n  \n ", 80), "\n\n\n");
+    }
+
+    #[test]
+    fn non_breaking_space() {
+        let options = Options::new(5).break_words(false);
+        assert_eq!(fill("foo bar baz", &options), "foo bar baz");
+    }
+
+    #[test]
+    fn non_breaking_hyphen() {
+        let options = Options::new(5).break_words(false);
+        assert_eq!(fill("foo‑bar‑baz", &options), "foo‑bar‑baz");
+    }
+
+    #[test]
+    fn fill_simple() {
+        assert_eq!(fill("foo bar baz", 10), "foo bar\nbaz");
+    }
+
+    #[test]
+    fn fill_colored_text() {
+        // The words are much longer than 6 bytes, but they remain
+        // intact after filling the text.
+        let green_hello = "\u{1b}[0m\u{1b}[32mHello\u{1b}[0m";
+        let blue_world = "\u{1b}[0m\u{1b}[34mWorld!\u{1b}[0m";
+        assert_eq!(
+            fill(&(String::from(green_hello) + " " + blue_world), 6),
+            String::from(green_hello) + "\n" + blue_world
+        );
+    }
+
+    #[test]
+    fn fill_unicode_boundary() {
+        // https://github.com/mgeisler/textwrap/issues/390
+        fill("\u{1b}!Ͽ", 10);
+    }
+
+    #[test]
+    fn fill_inplace_empty() {
+        let mut text = String::from("");
+        fill_inplace(&mut text, 80);
+        assert_eq!(text, "");
+    }
+
+    #[test]
+    fn fill_inplace_simple() {
+        let mut text = String::from("foo bar baz");
+        fill_inplace(&mut text, 10);
+        assert_eq!(text, "foo bar\nbaz");
+    }
+
+    #[test]
+    fn fill_inplace_multiple_lines() {
+        let mut text = String::from("Some text to wrap over multiple lines");
+        fill_inplace(&mut text, 12);
+        assert_eq!(text, "Some text to\nwrap over\nmultiple\nlines");
+    }
+
+    #[test]
+    fn fill_inplace_long_word() {
+        let mut text = String::from("Internationalization is hard");
+        fill_inplace(&mut text, 10);
+        assert_eq!(text, "Internationalization\nis hard");
+    }
+
+    #[test]
+    fn fill_inplace_no_hyphen_splitting() {
+        let mut text = String::from("A well-chosen example");
+        fill_inplace(&mut text, 10);
+        assert_eq!(text, "A\nwell-chosen\nexample");
+    }
+
+    #[test]
+    fn fill_inplace_newlines() {
+        let mut text = String::from("foo bar\n\nbaz\n\n\n");
+        fill_inplace(&mut text, 10);
+        assert_eq!(text, "foo bar\n\nbaz\n\n\n");
+    }
+
+    #[test]
+    fn fill_inplace_newlines_reset_line_width() {
+        let mut text = String::from("1 3 5\n1 3 5 7 9\n1 3 5 7 9 1 3");
+        fill_inplace(&mut text, 10);
+        assert_eq!(text, "1 3 5\n1 3 5 7 9\n1 3 5 7 9\n1 3");
+    }
+
+    #[test]
+    fn fill_inplace_leading_whitespace() {
+        let mut text = String::from("  foo bar baz");
+        fill_inplace(&mut text, 10);
+        assert_eq!(text, "  foo bar\nbaz");
+    }
+
+    #[test]
+    fn fill_inplace_trailing_whitespace() {
+        let mut text = String::from("foo bar baz  ");
+        fill_inplace(&mut text, 10);
+        assert_eq!(text, "foo bar\nbaz  ");
+    }
+
+    #[test]
+    fn fill_inplace_interior_whitespace() {
+        // To avoid an unwanted indentation of "baz", it is important
+        // to replace the final ' ' with '\n'.
+        let mut text = String::from("foo  bar    baz");
+        fill_inplace(&mut text, 10);
+        assert_eq!(text, "foo  bar   \nbaz");
+    }
+
+    #[test]
+    fn unfill_simple() {
+        let (text, options) = unfill("foo\nbar");
+        assert_eq!(text, "foo bar");
+        assert_eq!(options.width, 3);
+        assert_eq!(options.line_ending, LineEnding::LF);
+    }
+
+    #[test]
+    fn unfill_no_new_line() {
+        let (text, options) = unfill("foo bar");
+        assert_eq!(text, "foo bar");
+        assert_eq!(options.width, 7);
+        assert_eq!(options.line_ending, LineEnding::LF);
+    }
+
+    #[test]
+    fn unfill_simple_crlf() {
+        let (text, options) = unfill("foo\r\nbar");
+        assert_eq!(text, "foo bar");
+        assert_eq!(options.width, 3);
+        assert_eq!(options.line_ending, LineEnding::CRLF);
+    }
+
+    #[test]
+    fn unfill_mixed_new_lines() {
+        let (text, options) = unfill("foo\r\nbar\nbaz");
+        assert_eq!(text, "foo bar baz");
+        assert_eq!(options.width, 3);
+        assert_eq!(options.line_ending, LineEnding::LF);
+    }
+
+    #[test]
+    fn unfill_trailing_newlines() {
+        let (text, options) = unfill("foo\nbar\n\n\n");
+        assert_eq!(text, "foo bar\n");
+        assert_eq!(options.width, 3);
+    }
+
+    #[test]
+    fn unfill_mixed_trailing_newlines() {
+        let (text, options) = unfill("foo\r\nbar\n\r\n\n");
+        assert_eq!(text, "foo bar\n");
+        assert_eq!(options.width, 3);
+        assert_eq!(options.line_ending, LineEnding::LF);
+    }
+
+    #[test]
+    fn unfill_trailing_crlf() {
+        let (text, options) = unfill("foo bar\r\n");
+        assert_eq!(text, "foo bar\r\n");
+        assert_eq!(options.width, 7);
+        assert_eq!(options.line_ending, LineEnding::CRLF);
+    }
+
+    #[test]
+    fn unfill_initial_indent() {
+        let (text, options) = unfill("  foo\nbar\nbaz");
+        assert_eq!(text, "foo bar baz");
+        assert_eq!(options.width, 5);
+        assert_eq!(options.initial_indent, "  ");
+    }
+
+    #[test]
+    fn unfill_differing_indents() {
+        let (text, options) = unfill("  foo\n    bar\n  baz");
+        assert_eq!(text, "foo   bar baz");
+        assert_eq!(options.width, 7);
+        assert_eq!(options.initial_indent, "  ");
+        assert_eq!(options.subsequent_indent, "  ");
+    }
+
+    #[test]
+    fn unfill_list_item() {
+        let (text, options) = unfill("* foo\n  bar\n  baz");
+        assert_eq!(text, "foo bar baz");
+        assert_eq!(options.width, 5);
+        assert_eq!(options.initial_indent, "* ");
+        assert_eq!(options.subsequent_indent, "  ");
+    }
+
+    #[test]
+    fn unfill_multiple_char_prefix() {
+        let (text, options) = unfill("    // foo bar\n    // baz\n    // quux");
+        assert_eq!(text, "foo bar baz quux");
+        assert_eq!(options.width, 14);
+        assert_eq!(options.initial_indent, "    // ");
+        assert_eq!(options.subsequent_indent, "    // ");
+    }
+
+    #[test]
+    fn unfill_block_quote() {
+        let (text, options) = unfill("> foo\n> bar\n> baz");
+        assert_eq!(text, "foo bar baz");
+        assert_eq!(options.width, 5);
+        assert_eq!(options.initial_indent, "> ");
+        assert_eq!(options.subsequent_indent, "> ");
+    }
+
+    #[test]
+    fn unfill_only_prefixes_issue_466() {
+        // Test that we don't crash if the first line has only prefix
+        // chars *and* the second line is shorter than the first line.
+        let (text, options) = unfill("######\nfoo");
+        assert_eq!(text, " foo");
+        assert_eq!(options.width, 6);
+        assert_eq!(options.initial_indent, "######");
+        assert_eq!(options.subsequent_indent, "");
+    }
+
+    #[test]
+    fn unfill_trailing_newlines_issue_466() {
+        // Test that we don't crash on a '\r' following a string of
+        // '\n'. The problem was that we removed both kinds of
+        // characters in one code path, but not in the other.
+        let (text, options) = unfill("foo\n##\n\n\r");
+        // The \n\n changes subsequent_indent to "".
+        assert_eq!(text, "foo ## \r");
+        assert_eq!(options.width, 3);
+        assert_eq!(options.initial_indent, "");
+        assert_eq!(options.subsequent_indent, "");
+    }
+
+    #[test]
+    fn unfill_whitespace() {
+        assert_eq!(unfill("foo   bar").0, "foo   bar");
+    }
+
+    #[test]
+    fn refill_convert_lf_to_crlf() {
+        let options = Options::new(5).line_ending(LineEnding::CRLF);
+        assert_eq!(refill("foo\nbar\n", options), "foo\r\nbar\r\n",);
+    }
+
+    #[test]
+    fn refill_convert_crlf_to_lf() {
+        let options = Options::new(5).line_ending(LineEnding::LF);
+        assert_eq!(refill("foo\r\nbar\r\n", options), "foo\nbar\n",);
+    }
+
+    #[test]
+    fn refill_convert_mixed_newlines() {
+        let options = Options::new(5).line_ending(LineEnding::CRLF);
+        assert_eq!(refill("foo\r\nbar\n", options), "foo\r\nbar\r\n",);
+    }
+
+    #[test]
+    fn refill_defaults_to_lf() {
+        assert_eq!(refill("foo bar baz", 5), "foo\nbar\nbaz");
+    }
+
+    #[test]
+    fn wrap_columns_empty_text() {
+        assert_eq!(wrap_columns("", 1, 10, "| ", "", " |"), vec!["|        |"]);
+    }
+
+    #[test]
+    fn wrap_columns_single_column() {
+        assert_eq!(
+            wrap_columns("Foo", 3, 30, "| ", " | ", " |"),
+            vec!["| Foo    |        |          |"]
+        );
+    }
+
+    #[test]
+    fn wrap_columns_uneven_columns() {
+        // The gaps take up a total of 5 columns, so the columns are
+        // (21 - 5)/4 = 4 columns wide:
+        assert_eq!(
+            wrap_columns("Foo Bar Baz Quux", 4, 21, "|", "|", "|"),
+            vec!["|Foo |Bar |Baz |Quux|"]
+        );
+        // As the total width increases, the last column absorbs the
+        // excess width:
+        assert_eq!(
+            wrap_columns("Foo Bar Baz Quux", 4, 24, "|", "|", "|"),
+            vec!["|Foo |Bar |Baz |Quux   |"]
+        );
+        // Finally, when the width is 25, the columns can be resized
+        // to a width of (25 - 5)/4 = 5 columns:
+        assert_eq!(
+            wrap_columns("Foo Bar Baz Quux", 4, 25, "|", "|", "|"),
+            vec!["|Foo  |Bar  |Baz  |Quux |"]
+        );
+    }
+
+    #[test]
+    #[cfg(feature = "unicode-width")]
+    fn wrap_columns_with_emojis() {
+        assert_eq!(
+            wrap_columns(
+                "Words and a few emojis 😍 wrapped in ⓶ columns",
+                2,
+                30,
+                "✨ ",
+                " ⚽ ",
+                " 👀"
+            ),
+            vec![
+                "✨ Words      ⚽ wrapped in 👀",
+                "✨ and a few  ⚽ ⓶ columns  👀",
+                "✨ emojis 😍  ⚽            👀"
+            ]
+        );
+    }
+
+    #[test]
+    fn wrap_columns_big_gaps() {
+        // The column width shrinks to 1 because the gaps take up all
+        // the space.
+        assert_eq!(
+            wrap_columns("xyz", 2, 10, "----> ", " !!! ", " <----"),
+            vec![
+                "----> x !!! z <----", //
+                "----> y !!!   <----"
+            ]
+        );
+    }
+
+    #[test]
+    #[should_panic]
+    fn wrap_columns_panic_with_zero_columns() {
+        wrap_columns("", 0, 10, "", "", "");
+    }
+}
diff --git a/crates/textwrap/src/line_ending.rs b/crates/textwrap/src/line_ending.rs
new file mode 100644
index 0000000..0514fe5
--- /dev/null
+++ b/crates/textwrap/src/line_ending.rs
@@ -0,0 +1,88 @@
+//! Line ending detection and conversion.
+
+use std::fmt::Debug;
+
+/// Supported line endings. Like in the Rust standard library, two line
+/// endings are supported: `\r\n` and `\n`
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub enum LineEnding {
+    /// _Carriage return and line feed_ – a line ending sequence
+    /// historically used in Windows. Corresponds to the sequence
+    /// of ASCII control characters `0x0D 0x0A` or `\r\n`
+    CRLF,
+    /// _Line feed_ – a line ending historically used in Unix.
+    ///  Corresponds to the ASCII control character `0x0A` or `\n`
+    LF,
+}
+
+impl LineEnding {
+    /// Turns this [`LineEnding`] value into its ASCII representation.
+    #[inline]
+    pub const fn as_str(&self) -> &'static str {
+        match self {
+            Self::CRLF => "\r\n",
+            Self::LF => "\n",
+        }
+    }
+}
+
+/// An iterator over the lines of a string, as tuples of string slice
+/// and [`LineEnding`] value; it only emits non-empty lines (i.e. having
+/// some content before the terminating `\r\n` or `\n`).
+///
+/// This struct is used internally by the library.
+#[derive(Debug, Clone, Copy)]
+pub(crate) struct NonEmptyLines<'a>(pub &'a str);
+
+impl<'a> Iterator for NonEmptyLines<'a> {
+    type Item = (&'a str, Option<LineEnding>);
+
+    fn next(&mut self) -> Option<Self::Item> {
+        while let Some(lf) = self.0.find('\n') {
+            if lf == 0 || (lf == 1 && self.0.as_bytes()[lf - 1] == b'\r') {
+                self.0 = &self.0[(lf + 1)..];
+                continue;
+            }
+            let trimmed = match self.0.as_bytes()[lf - 1] {
+                b'\r' => (&self.0[..(lf - 1)], Some(LineEnding::CRLF)),
+                _ => (&self.0[..lf], Some(LineEnding::LF)),
+            };
+            self.0 = &self.0[(lf + 1)..];
+            return Some(trimmed);
+        }
+        if self.0.is_empty() {
+            None
+        } else {
+            let line = std::mem::take(&mut self.0);
+            Some((line, None))
+        }
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[test]
+    fn non_empty_lines_full_case() {
+        assert_eq!(
+            NonEmptyLines("LF\nCRLF\r\n\r\n\nunterminated")
+                .collect::<Vec<(&str, Option<LineEnding>)>>(),
+            vec![
+                ("LF", Some(LineEnding::LF)),
+                ("CRLF", Some(LineEnding::CRLF)),
+                ("unterminated", None),
+            ]
+        );
+    }
+
+    #[test]
+    fn non_empty_lines_new_lines_only() {
+        assert_eq!(NonEmptyLines("\r\n\n\n\r\n").next(), None);
+    }
+
+    #[test]
+    fn non_empty_lines_no_input() {
+        assert_eq!(NonEmptyLines("").next(), None);
+    }
+}
diff --git a/crates/textwrap/src/word_separators.rs b/crates/textwrap/src/word_separators.rs
new file mode 100644
index 0000000..dc74e5e
--- /dev/null
+++ b/crates/textwrap/src/word_separators.rs
@@ -0,0 +1,481 @@
+//! Functionality for finding words.
+//!
+//! In order to wrap text, we need to know where the legal break
+//! points are, i.e., where the words of the text are. This means that
+//! we need to define what a "word" is.
+//!
+//! A simple approach is to simply split the text on whitespace, but
+//! this does not work for East-Asian languages such as Chinese or
+//! Japanese where there are no spaces between words. Breaking a long
+//! sequence of emojis is another example where line breaks might be
+//! wanted even if there are no whitespace to be found.
+//!
+//! The [`WordSeparator`] trait is responsible for determining where
+//! there words are in a line of text. Please refer to the trait and
+//! the structs which implement it for more information.
+
+#[cfg(feature = "unicode-linebreak")]
+use crate::core::skip_ansi_escape_sequence;
+use crate::core::Word;
+
+/// Describes where words occur in a line of text.
+///
+/// The simplest approach is say that words are separated by one or
+/// more ASCII spaces (`' '`). This works for Western languages
+/// without emojis. A more complex approach is to use the Unicode line
+/// breaking algorithm, which finds break points in non-ASCII text.
+///
+/// The line breaks occur between words, please see
+/// [`WordSplitter`](crate::WordSplitter) for options of how to handle
+/// hyphenation of individual words.
+///
+/// # Examples
+///
+/// ```
+/// use textwrap::core::Word;
+/// use textwrap::WordSeparator::AsciiSpace;
+///
+/// let words = AsciiSpace.find_words("Hello World!").collect::<Vec<_>>();
+/// assert_eq!(words, vec![Word::from("Hello "), Word::from("World!")]);
+/// ```
+#[derive(Clone, Copy)]
+pub enum WordSeparator {
+    /// Find words by splitting on runs of `' '` characters.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use textwrap::core::Word;
+    /// use textwrap::WordSeparator::AsciiSpace;
+    ///
+    /// let words = AsciiSpace.find_words("Hello   World!").collect::<Vec<_>>();
+    /// assert_eq!(words, vec![Word::from("Hello   "),
+    ///                        Word::from("World!")]);
+    /// ```
+    AsciiSpace,
+
+    /// Split `line` into words using Unicode break properties.
+    ///
+    /// This word separator uses the Unicode line breaking algorithm
+    /// described in [Unicode Standard Annex
+    /// #14](https://www.unicode.org/reports/tr14/) to find legal places
+    /// to break lines. There is a small difference in that the U+002D
+    /// (Hyphen-Minus) and U+00AD (Soft Hyphen) don’t create a line break:
+    /// to allow a line break at a hyphen, use
+    /// [`WordSplitter::HyphenSplitter`](crate::WordSplitter::HyphenSplitter).
+    /// Soft hyphens are not currently supported.
+    ///
+    /// # Examples
+    ///
+    /// Unlike [`WordSeparator::AsciiSpace`], the Unicode line
+    /// breaking algorithm will find line break opportunities between
+    /// some characters with no intervening whitespace:
+    ///
+    /// ```
+    /// #[cfg(feature = "unicode-linebreak")] {
+    /// use textwrap::core::Word;
+    /// use textwrap::WordSeparator::UnicodeBreakProperties;
+    ///
+    /// assert_eq!(UnicodeBreakProperties.find_words("Emojis: 😂😍").collect::<Vec<_>>(),
+    ///            vec![Word::from("Emojis: "),
+    ///                 Word::from("😂"),
+    ///                 Word::from("😍")]);
+    ///
+    /// assert_eq!(UnicodeBreakProperties.find_words("CJK: 你好").collect::<Vec<_>>(),
+    ///            vec![Word::from("CJK: "),
+    ///                 Word::from("你"),
+    ///                 Word::from("好")]);
+    /// }
+    /// ```
+    ///
+    /// A U+2060 (Word Joiner) character can be inserted if you want to
+    /// manually override the defaults and keep the characters together:
+    ///
+    /// ```
+    /// #[cfg(feature = "unicode-linebreak")] {
+    /// use textwrap::core::Word;
+    /// use textwrap::WordSeparator::UnicodeBreakProperties;
+    ///
+    /// assert_eq!(UnicodeBreakProperties.find_words("Emojis: 😂\u{2060}😍").collect::<Vec<_>>(),
+    ///            vec![Word::from("Emojis: "),
+    ///                 Word::from("😂\u{2060}😍")]);
+    /// }
+    /// ```
+    ///
+    /// The Unicode line breaking algorithm will also automatically
+    /// suppress break breaks around certain punctuation characters::
+    ///
+    /// ```
+    /// #[cfg(feature = "unicode-linebreak")] {
+    /// use textwrap::core::Word;
+    /// use textwrap::WordSeparator::UnicodeBreakProperties;
+    ///
+    /// assert_eq!(UnicodeBreakProperties.find_words("[ foo ] bar !").collect::<Vec<_>>(),
+    ///            vec![Word::from("[ foo ] "),
+    ///                 Word::from("bar !")]);
+    /// }
+    /// ```
+    #[cfg(feature = "unicode-linebreak")]
+    UnicodeBreakProperties,
+
+    /// Find words using a custom word separator
+    Custom(fn(line: &str) -> Box<dyn Iterator<Item = Word<'_>> + '_>),
+}
+
+impl PartialEq for WordSeparator {
+    /// Compare two word separators.
+    ///
+    /// ```
+    /// use textwrap::WordSeparator;
+    ///
+    /// assert_eq!(WordSeparator::AsciiSpace, WordSeparator::AsciiSpace);
+    /// #[cfg(feature = "unicode-linebreak")] {
+    ///     assert_eq!(WordSeparator::UnicodeBreakProperties,
+    ///                WordSeparator::UnicodeBreakProperties);
+    /// }
+    /// ```
+    ///
+    /// Note that `WordSeparator::Custom` values never compare equal:
+    ///
+    /// ```
+    /// use textwrap::WordSeparator;
+    /// use textwrap::core::Word;
+    /// fn word_separator(line: &str) -> Box<dyn Iterator<Item = Word<'_>> + '_> {
+    ///     Box::new(line.split_inclusive(' ').map(Word::from))
+    /// }
+    /// assert_ne!(WordSeparator::Custom(word_separator),
+    ///            WordSeparator::Custom(word_separator));
+    /// ```
+    fn eq(&self, other: &Self) -> bool {
+        match (self, other) {
+            (WordSeparator::AsciiSpace, WordSeparator::AsciiSpace) => true,
+            #[cfg(feature = "unicode-linebreak")]
+            (WordSeparator::UnicodeBreakProperties, WordSeparator::UnicodeBreakProperties) => true,
+            (_, _) => false,
+        }
+    }
+}
+
+impl std::fmt::Debug for WordSeparator {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        match self {
+            WordSeparator::AsciiSpace => f.write_str("AsciiSpace"),
+            #[cfg(feature = "unicode-linebreak")]
+            WordSeparator::UnicodeBreakProperties => f.write_str("UnicodeBreakProperties"),
+            WordSeparator::Custom(_) => f.write_str("Custom(...)"),
+        }
+    }
+}
+
+impl WordSeparator {
+    /// Create a new word separator.
+    ///
+    /// The best available algorithm is used by default, i.e.,
+    /// [`WordSeparator::UnicodeBreakProperties`] if available,
+    /// otherwise [`WordSeparator::AsciiSpace`].
+    pub const fn new() -> Self {
+        #[cfg(feature = "unicode-linebreak")]
+        {
+            WordSeparator::UnicodeBreakProperties
+        }
+
+        #[cfg(not(feature = "unicode-linebreak"))]
+        {
+            WordSeparator::AsciiSpace
+        }
+    }
+
+    // This function should really return impl Iterator<Item = Word>, but
+    // this isn't possible until Rust supports higher-kinded types:
+    // https://github.com/rust-lang/rfcs/blob/master/text/1522-conservative-impl-trait.md
+    /// Find all words in `line`.
+    pub fn find_words<'a>(&self, line: &'a str) -> Box<dyn Iterator<Item = Word<'a>> + 'a> {
+        match self {
+            WordSeparator::AsciiSpace => find_words_ascii_space(line),
+            #[cfg(feature = "unicode-linebreak")]
+            WordSeparator::UnicodeBreakProperties => find_words_unicode_break_properties(line),
+            WordSeparator::Custom(func) => func(line),
+        }
+    }
+}
+
+fn find_words_ascii_space<'a>(line: &'a str) -> Box<dyn Iterator<Item = Word<'a>> + 'a> {
+    let mut start = 0;
+    let mut in_whitespace = false;
+    let mut char_indices = line.char_indices();
+
+    Box::new(std::iter::from_fn(move || {
+        for (idx, ch) in char_indices.by_ref() {
+            if in_whitespace && ch != ' ' {
+                let word = Word::from(&line[start..idx]);
+                start = idx;
+                in_whitespace = ch == ' ';
+                return Some(word);
+            }
+
+            in_whitespace = ch == ' ';
+        }
+
+        if start < line.len() {
+            let word = Word::from(&line[start..]);
+            start = line.len();
+            return Some(word);
+        }
+
+        None
+    }))
+}
+
+// Strip all ANSI escape sequences from `text`.
+#[cfg(feature = "unicode-linebreak")]
+fn strip_ansi_escape_sequences(text: &str) -> String {
+    let mut result = String::with_capacity(text.len());
+
+    let mut chars = text.chars();
+    while let Some(ch) = chars.next() {
+        if skip_ansi_escape_sequence(ch, &mut chars) {
+            continue;
+        }
+        result.push(ch);
+    }
+
+    result
+}
+
+/// Soft hyphen, also knows as a “shy hyphen”. Should show up as ‘-’
+/// if a line is broken at this point, and otherwise be invisible.
+/// Textwrap does not currently support breaking words at soft
+/// hyphens.
+#[cfg(feature = "unicode-linebreak")]
+const SHY: char = '\u{00ad}';
+
+/// Find words in line. ANSI escape sequences are ignored in `line`.
+#[cfg(feature = "unicode-linebreak")]
+fn find_words_unicode_break_properties<'a>(
+    line: &'a str,
+) -> Box<dyn Iterator<Item = Word<'a>> + 'a> {
+    // Construct an iterator over (original index, stripped index)
+    // tuples. We find the Unicode linebreaks on a stripped string,
+    // but we need the original indices so we can form words based on
+    // the original string.
+    let mut last_stripped_idx = 0;
+    let mut char_indices = line.char_indices();
+    let mut idx_map = std::iter::from_fn(move || match char_indices.next() {
+        Some((orig_idx, ch)) => {
+            let stripped_idx = last_stripped_idx;
+            if !skip_ansi_escape_sequence(ch, &mut char_indices.by_ref().map(|(_, ch)| ch)) {
+                last_stripped_idx += ch.len_utf8();
+            }
+            Some((orig_idx, stripped_idx))
+        }
+        None => None,
+    });
+
+    let stripped = strip_ansi_escape_sequences(line);
+    let mut opportunities = unicode_linebreak::linebreaks(&stripped)
+        .filter(|(idx, _)| {
+            #[allow(clippy::match_like_matches_macro)]
+            match &stripped[..*idx].chars().next_back() {
+                // We suppress breaks at ‘-’ since we want to control
+                // this via the WordSplitter.
+                Some('-') => false,
+                // Soft hyphens are currently not supported since we
+                // require all `Word` fragments to be continuous in
+                // the input string.
+                Some(SHY) => false,
+                // Other breaks should be fine!
+                _ => true,
+            }
+        })
+        .collect::<Vec<_>>()
+        .into_iter();
+
+    // Remove final break opportunity, we will add it below using
+    // &line[start..]; This ensures that we correctly include a
+    // trailing ANSI escape sequence.
+    opportunities.next_back();
+
+    let mut start = 0;
+    Box::new(std::iter::from_fn(move || {
+        for (idx, _) in opportunities.by_ref() {
+            if let Some((orig_idx, _)) = idx_map.find(|&(_, stripped_idx)| stripped_idx == idx) {
+                let word = Word::from(&line[start..orig_idx]);
+                start = orig_idx;
+                return Some(word);
+            }
+        }
+
+        if start < line.len() {
+            let word = Word::from(&line[start..]);
+            start = line.len();
+            return Some(word);
+        }
+
+        None
+    }))
+}
+
+#[cfg(test)]
+mod tests {
+    use super::WordSeparator::*;
+    use super::*;
+
+    // Like assert_eq!, but the left expression is an iterator.
+    macro_rules! assert_iter_eq {
+        ($left:expr, $right:expr) => {
+            assert_eq!($left.collect::<Vec<_>>(), $right);
+        };
+    }
+
+    fn to_words(words: Vec<&str>) -> Vec<Word<'_>> {
+        words.into_iter().map(Word::from).collect()
+    }
+
+    macro_rules! test_find_words {
+        ($ascii_name:ident,
+         $unicode_name:ident,
+         $([ $line:expr, $ascii_words:expr, $unicode_words:expr ]),+) => {
+            #[test]
+            fn $ascii_name() {
+                $(
+                    let expected_words = to_words($ascii_words.to_vec());
+                    let actual_words = WordSeparator::AsciiSpace
+                        .find_words($line)
+                        .collect::<Vec<_>>();
+                    assert_eq!(actual_words, expected_words, "Line: {:?}", $line);
+                )+
+            }
+
+            #[test]
+            #[cfg(feature = "unicode-linebreak")]
+            fn $unicode_name() {
+                $(
+                    let expected_words = to_words($unicode_words.to_vec());
+                    let actual_words = WordSeparator::UnicodeBreakProperties
+                        .find_words($line)
+                        .collect::<Vec<_>>();
+                    assert_eq!(actual_words, expected_words, "Line: {:?}", $line);
+                )+
+            }
+        };
+    }
+
+    test_find_words!(ascii_space_empty, unicode_empty, ["", [], []]);
+
+    test_find_words!(
+        ascii_single_word,
+        unicode_single_word,
+        ["foo", ["foo"], ["foo"]]
+    );
+
+    test_find_words!(
+        ascii_two_words,
+        unicode_two_words,
+        ["foo bar", ["foo ", "bar"], ["foo ", "bar"]]
+    );
+
+    test_find_words!(
+        ascii_multiple_words,
+        unicode_multiple_words,
+        ["foo bar", ["foo ", "bar"], ["foo ", "bar"]],
+        ["x y z", ["x ", "y ", "z"], ["x ", "y ", "z"]]
+    );
+
+    test_find_words!(
+        ascii_only_whitespace,
+        unicode_only_whitespace,
+        [" ", [" "], [" "]],
+        ["    ", ["    "], ["    "]]
+    );
+
+    test_find_words!(
+        ascii_inter_word_whitespace,
+        unicode_inter_word_whitespace,
+        ["foo   bar", ["foo   ", "bar"], ["foo   ", "bar"]]
+    );
+
+    test_find_words!(
+        ascii_trailing_whitespace,
+        unicode_trailing_whitespace,
+        ["foo   ", ["foo   "], ["foo   "]]
+    );
+
+    test_find_words!(
+        ascii_leading_whitespace,
+        unicode_leading_whitespace,
+        ["   foo", ["   ", "foo"], ["   ", "foo"]]
+    );
+
+    test_find_words!(
+        ascii_multi_column_char,
+        unicode_multi_column_char,
+        ["\u{1f920}", ["\u{1f920}"], ["\u{1f920}"]] // cowboy emoji 🤠
+    );
+
+    test_find_words!(
+        ascii_hyphens,
+        unicode_hyphens,
+        ["foo-bar", ["foo-bar"], ["foo-bar"]],
+        ["foo- bar", ["foo- ", "bar"], ["foo- ", "bar"]],
+        ["foo - bar", ["foo ", "- ", "bar"], ["foo ", "- ", "bar"]],
+        ["foo -bar", ["foo ", "-bar"], ["foo ", "-bar"]]
+    );
+
+    test_find_words!(
+        ascii_newline,
+        unicode_newline,
+        ["foo\nbar", ["foo\nbar"], ["foo\n", "bar"]]
+    );
+
+    test_find_words!(
+        ascii_tab,
+        unicode_tab,
+        ["foo\tbar", ["foo\tbar"], ["foo\t", "bar"]]
+    );
+
+    test_find_words!(
+        ascii_non_breaking_space,
+        unicode_non_breaking_space,
+        ["foo\u{00A0}bar", ["foo\u{00A0}bar"], ["foo\u{00A0}bar"]]
+    );
+
+    #[test]
+    #[cfg(unix)]
+    fn find_words_colored_text() {
+        use termion::color::{Blue, Fg, Green, Reset};
+
+        let green_hello = format!("{}Hello{} ", Fg(Green), Fg(Reset));
+        let blue_world = format!("{}World!{}", Fg(Blue), Fg(Reset));
+        assert_iter_eq!(
+            AsciiSpace.find_words(&format!("{}{}", green_hello, blue_world)),
+            vec![Word::from(&green_hello), Word::from(&blue_world)]
+        );
+
+        #[cfg(feature = "unicode-linebreak")]
+        assert_iter_eq!(
+            UnicodeBreakProperties.find_words(&format!("{}{}", green_hello, blue_world)),
+            vec![Word::from(&green_hello), Word::from(&blue_world)]
+        );
+    }
+
+    #[test]
+    fn find_words_color_inside_word() {
+        let text = "foo\u{1b}[0m\u{1b}[32mbar\u{1b}[0mbaz";
+        assert_iter_eq!(AsciiSpace.find_words(text), vec![Word::from(text)]);
+
+        #[cfg(feature = "unicode-linebreak")]
+        assert_iter_eq!(
+            UnicodeBreakProperties.find_words(text),
+            vec![Word::from(text)]
+        );
+    }
+
+    #[test]
+    fn word_separator_new() {
+        #[cfg(feature = "unicode-linebreak")]
+        assert!(matches!(WordSeparator::new(), UnicodeBreakProperties));
+
+        #[cfg(not(feature = "unicode-linebreak"))]
+        assert!(matches!(WordSeparator::new(), AsciiSpace));
+    }
+}
diff --git a/crates/textwrap/src/word_splitters.rs b/crates/textwrap/src/word_splitters.rs
new file mode 100644
index 0000000..69e246f
--- /dev/null
+++ b/crates/textwrap/src/word_splitters.rs
@@ -0,0 +1,314 @@
+//! Word splitting functionality.
+//!
+//! To wrap text into lines, long words sometimes need to be split
+//! across lines. The [`WordSplitter`] enum defines this
+//! functionality.
+
+use crate::core::{display_width, Word};
+
+/// The `WordSplitter` enum describes where words can be split.
+///
+/// If the textwrap crate has been compiled with the `hyphenation`
+/// Cargo feature enabled, you will find a
+/// [`WordSplitter::Hyphenation`] variant. Use this struct for
+/// language-aware hyphenation:
+///
+/// ```
+/// #[cfg(feature = "hyphenation")] {
+///     use hyphenation::{Language, Load, Standard};
+///     use textwrap::{wrap, Options, WordSplitter};
+///
+///     let text = "Oxidation is the loss of electrons.";
+///     let dictionary = Standard::from_embedded(Language::EnglishUS).unwrap();
+///     let options = Options::new(8).word_splitter(WordSplitter::Hyphenation(dictionary));
+///     assert_eq!(wrap(text, &options), vec!["Oxida-",
+///                                           "tion is",
+///                                           "the loss",
+///                                           "of elec-",
+///                                           "trons."]);
+/// }
+/// ```
+///
+/// Please see the documentation for the [hyphenation] crate for more
+/// details.
+///
+/// [hyphenation]: https://docs.rs/hyphenation/
+#[derive(Clone)]
+pub enum WordSplitter {
+    /// Use this as a [`Options.word_splitter`] to avoid any kind of
+    /// hyphenation:
+    ///
+    /// ```
+    /// use textwrap::{wrap, Options, WordSplitter};
+    ///
+    /// let options = Options::new(8).word_splitter(WordSplitter::NoHyphenation);
+    /// assert_eq!(wrap("foo bar-baz", &options),
+    ///            vec!["foo", "bar-baz"]);
+    /// ```
+    ///
+    /// [`Options.word_splitter`]: super::Options::word_splitter
+    NoHyphenation,
+
+    /// `HyphenSplitter` is the default `WordSplitter` used by
+    /// [`Options::new`](super::Options::new). It will split words on
+    /// existing hyphens in the word.
+    ///
+    /// It will only use hyphens that are surrounded by alphanumeric
+    /// characters, which prevents a word like `"--foo-bar"` from
+    /// being split into `"--"` and `"foo-bar"`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use textwrap::WordSplitter;
+    ///
+    /// assert_eq!(WordSplitter::HyphenSplitter.split_points("--foo-bar"),
+    ///            vec![6]);
+    /// ```
+    HyphenSplitter,
+
+    /// Use a custom function as the word splitter.
+    ///
+    /// This varian lets you implement a custom word splitter using
+    /// your own function.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use textwrap::WordSplitter;
+    ///
+    /// fn split_at_underscore(word: &str) -> Vec<usize> {
+    ///     word.match_indices('_').map(|(idx, _)| idx + 1).collect()
+    /// }
+    ///
+    /// let word_splitter = WordSplitter::Custom(split_at_underscore);
+    /// assert_eq!(word_splitter.split_points("a_long_identifier"),
+    ///            vec![2, 7]);
+    /// ```
+    Custom(fn(word: &str) -> Vec<usize>),
+
+    /// A hyphenation dictionary can be used to do language-specific
+    /// hyphenation using patterns from the [hyphenation] crate.
+    ///
+    /// **Note:** Only available when the `hyphenation` Cargo feature is
+    /// enabled.
+    ///
+    /// [hyphenation]: https://docs.rs/hyphenation/
+    #[cfg(feature = "hyphenation")]
+    Hyphenation(hyphenation::Standard),
+}
+
+impl std::fmt::Debug for WordSplitter {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        match self {
+            WordSplitter::NoHyphenation => f.write_str("NoHyphenation"),
+            WordSplitter::HyphenSplitter => f.write_str("HyphenSplitter"),
+            WordSplitter::Custom(_) => f.write_str("Custom(...)"),
+            #[cfg(feature = "hyphenation")]
+            WordSplitter::Hyphenation(dict) => write!(f, "Hyphenation({})", dict.language()),
+        }
+    }
+}
+
+impl PartialEq<WordSplitter> for WordSplitter {
+    fn eq(&self, other: &WordSplitter) -> bool {
+        match (self, other) {
+            (WordSplitter::NoHyphenation, WordSplitter::NoHyphenation) => true,
+            (WordSplitter::HyphenSplitter, WordSplitter::HyphenSplitter) => true,
+            #[cfg(feature = "hyphenation")]
+            (WordSplitter::Hyphenation(this_dict), WordSplitter::Hyphenation(other_dict)) => {
+                this_dict.language() == other_dict.language()
+            }
+            (_, _) => false,
+        }
+    }
+}
+
+impl WordSplitter {
+    /// Return all possible indices where `word` can be split.
+    ///
+    /// The indices are in the range `0..word.len()`. They point to
+    /// the index _after_ the split point, i.e., after `-` if
+    /// splitting on hyphens. This way, `word.split_at(idx)` will
+    /// break the word into two well-formed pieces.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use textwrap::WordSplitter;
+    /// assert_eq!(WordSplitter::NoHyphenation.split_points("cannot-be-split"), vec![]);
+    /// assert_eq!(WordSplitter::HyphenSplitter.split_points("can-be-split"), vec![4, 7]);
+    /// assert_eq!(WordSplitter::Custom(|word| vec![word.len()/2]).split_points("middle"), vec![3]);
+    /// ```
+    pub fn split_points(&self, word: &str) -> Vec<usize> {
+        match self {
+            WordSplitter::NoHyphenation => Vec::new(),
+            WordSplitter::HyphenSplitter => {
+                let mut splits = Vec::new();
+
+                for (idx, _) in word.match_indices('-') {
+                    // We only use hyphens that are surrounded by alphanumeric
+                    // characters. This is to avoid splitting on repeated hyphens,
+                    // such as those found in --foo-bar.
+                    let prev = word[..idx].chars().next_back();
+                    let next = word[idx + 1..].chars().next();
+
+                    if prev.filter(|ch| ch.is_alphanumeric()).is_some()
+                        && next.filter(|ch| ch.is_alphanumeric()).is_some()
+                    {
+                        splits.push(idx + 1); // +1 due to width of '-'.
+                    }
+                }
+
+                splits
+            }
+            WordSplitter::Custom(splitter_func) => splitter_func(word),
+            #[cfg(feature = "hyphenation")]
+            WordSplitter::Hyphenation(dictionary) => {
+                use hyphenation::Hyphenator;
+                dictionary.hyphenate(word).breaks
+            }
+        }
+    }
+}
+
+/// Split words into smaller words according to the split points given
+/// by `word_splitter`.
+///
+/// Note that we split all words, regardless of their length. This is
+/// to more cleanly separate the business of splitting (including
+/// automatic hyphenation) from the business of word wrapping.
+pub fn split_words<'a, I>(
+    words: I,
+    word_splitter: &'a WordSplitter,
+) -> impl Iterator<Item = Word<'a>>
+where
+    I: IntoIterator<Item = Word<'a>>,
+{
+    words.into_iter().flat_map(move |word| {
+        let mut prev = 0;
+        let mut split_points = word_splitter.split_points(&word).into_iter();
+        std::iter::from_fn(move || {
+            if let Some(idx) = split_points.next() {
+                let need_hyphen = !word[..idx].ends_with('-');
+                let w = Word {
+                    word: &word.word[prev..idx],
+                    width: display_width(&word[prev..idx]),
+                    whitespace: "",
+                    penalty: if need_hyphen { "-" } else { "" },
+                };
+                prev = idx;
+                return Some(w);
+            }
+
+            if prev < word.word.len() || prev == 0 {
+                let w = Word {
+                    word: &word.word[prev..],
+                    width: display_width(&word[prev..]),
+                    whitespace: word.whitespace,
+                    penalty: word.penalty,
+                };
+                prev = word.word.len() + 1;
+                return Some(w);
+            }
+
+            None
+        })
+    })
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    // Like assert_eq!, but the left expression is an iterator.
+    macro_rules! assert_iter_eq {
+        ($left:expr, $right:expr) => {
+            assert_eq!($left.collect::<Vec<_>>(), $right);
+        };
+    }
+
+    #[test]
+    fn split_words_no_words() {
+        assert_iter_eq!(split_words(vec![], &WordSplitter::HyphenSplitter), vec![]);
+    }
+
+    #[test]
+    fn split_words_empty_word() {
+        assert_iter_eq!(
+            split_words(vec![Word::from("   ")], &WordSplitter::HyphenSplitter),
+            vec![Word::from("   ")]
+        );
+    }
+
+    #[test]
+    fn split_words_single_word() {
+        assert_iter_eq!(
+            split_words(vec![Word::from("foobar")], &WordSplitter::HyphenSplitter),
+            vec![Word::from("foobar")]
+        );
+    }
+
+    #[test]
+    fn split_words_hyphen_splitter() {
+        assert_iter_eq!(
+            split_words(vec![Word::from("foo-bar")], &WordSplitter::HyphenSplitter),
+            vec![Word::from("foo-"), Word::from("bar")]
+        );
+    }
+
+    #[test]
+    fn split_words_no_hyphenation() {
+        assert_iter_eq!(
+            split_words(vec![Word::from("foo-bar")], &WordSplitter::NoHyphenation),
+            vec![Word::from("foo-bar")]
+        );
+    }
+
+    #[test]
+    fn split_words_adds_penalty() {
+        let fixed_split_point = |_: &str| vec![3];
+
+        assert_iter_eq!(
+            split_words(
+                vec![Word::from("foobar")].into_iter(),
+                &WordSplitter::Custom(fixed_split_point)
+            ),
+            vec![
+                Word {
+                    word: "foo",
+                    width: 3,
+                    whitespace: "",
+                    penalty: "-"
+                },
+                Word {
+                    word: "bar",
+                    width: 3,
+                    whitespace: "",
+                    penalty: ""
+                }
+            ]
+        );
+
+        assert_iter_eq!(
+            split_words(
+                vec![Word::from("fo-bar")].into_iter(),
+                &WordSplitter::Custom(fixed_split_point)
+            ),
+            vec![
+                Word {
+                    word: "fo-",
+                    width: 3,
+                    whitespace: "",
+                    penalty: ""
+                },
+                Word {
+                    word: "bar",
+                    width: 3,
+                    whitespace: "",
+                    penalty: ""
+                }
+            ]
+        );
+    }
+}
diff --git a/crates/textwrap/src/wrap_algorithms.rs b/crates/textwrap/src/wrap_algorithms.rs
new file mode 100644
index 0000000..eef9b33
--- /dev/null
+++ b/crates/textwrap/src/wrap_algorithms.rs
@@ -0,0 +1,411 @@
+//! Word wrapping algorithms.
+//!
+//! After a text has been broken into words (or [`Fragment`]s), one
+//! now has to decide how to break the fragments into lines. The
+//! simplest algorithm for this is implemented by [`wrap_first_fit`]:
+//! it uses no look-ahead and simply adds fragments to the line as
+//! long as they fit. However, this can lead to poor line breaks if a
+//! large fragment almost-but-not-quite fits on a line. When that
+//! happens, the fragment is moved to the next line and it will leave
+//! behind a large gap. A more advanced algorithm, implemented by
+//! [`wrap_optimal_fit`], will take this into account. The optimal-fit
+//! algorithm considers all possible line breaks and will attempt to
+//! minimize the gaps left behind by overly short lines.
+//!
+//! While both algorithms run in linear time, the first-fit algorithm
+//! is about 4 times faster than the optimal-fit algorithm.
+
+#[cfg(feature = "smawk")]
+mod optimal_fit;
+#[cfg(feature = "smawk")]
+pub use optimal_fit::{wrap_optimal_fit, OverflowError, Penalties};
+
+use crate::core::{Fragment, Word};
+
+/// Describes how to wrap words into lines.
+///
+/// The simplest approach is to wrap words one word at a time and
+/// accept the first way of wrapping which fit
+/// ([`WrapAlgorithm::FirstFit`]). If the `smawk` Cargo feature is
+/// enabled, a more complex algorithm is available which will look at
+/// an entire paragraph at a time in order to find optimal line breaks
+/// ([`WrapAlgorithm::OptimalFit`]).
+#[derive(Clone, Copy)]
+pub enum WrapAlgorithm {
+    /// Wrap words using a fast and simple algorithm.
+    ///
+    /// This algorithm uses no look-ahead when finding line breaks.
+    /// Implemented by [`wrap_first_fit`], please see that function for
+    /// details and examples.
+    FirstFit,
+
+    /// Wrap words using an advanced algorithm with look-ahead.
+    ///
+    /// This wrapping algorithm considers the entire paragraph to find
+    /// optimal line breaks. When wrapping text, "penalties" are
+    /// assigned to line breaks based on the gaps left at the end of
+    /// lines. See [`Penalties`] for details.
+    ///
+    /// The underlying wrapping algorithm is implemented by
+    /// [`wrap_optimal_fit`], please see that function for examples.
+    ///
+    /// **Note:** Only available when the `smawk` Cargo feature is
+    /// enabled.
+    #[cfg(feature = "smawk")]
+    OptimalFit(Penalties),
+
+    /// Custom wrapping function.
+    ///
+    /// Use this if you want to implement your own wrapping algorithm.
+    /// The function can freely decide how to turn a slice of
+    /// [`Word`]s into lines.
+    ///
+    /// # Example
+    ///
+    /// ```
+    /// use textwrap::core::Word;
+    /// use textwrap::{wrap, Options, WrapAlgorithm};
+    ///
+    /// fn stair<'a, 'b>(words: &'b [Word<'a>], _: &'b [usize]) -> Vec<&'b [Word<'a>]> {
+    ///     let mut lines = Vec::new();
+    ///     let mut step = 1;
+    ///     let mut start_idx = 0;
+    ///     while start_idx + step <= words.len() {
+    ///       lines.push(&words[start_idx .. start_idx+step]);
+    ///       start_idx += step;
+    ///       step += 1;
+    ///     }
+    ///     lines
+    /// }
+    ///
+    /// let options = Options::new(10).wrap_algorithm(WrapAlgorithm::Custom(stair));
+    /// assert_eq!(wrap("First, second, third, fourth, fifth, sixth", options),
+    ///            vec!["First,",
+    ///                 "second, third,",
+    ///                 "fourth, fifth, sixth"]);
+    /// ```
+    Custom(for<'a, 'b> fn(words: &'b [Word<'a>], line_widths: &'b [usize]) -> Vec<&'b [Word<'a>]>),
+}
+
+impl PartialEq for WrapAlgorithm {
+    /// Compare two wrap algorithms.
+    ///
+    /// ```
+    /// use textwrap::WrapAlgorithm;
+    ///
+    /// assert_eq!(WrapAlgorithm::FirstFit, WrapAlgorithm::FirstFit);
+    /// #[cfg(feature = "smawk")] {
+    ///     assert_eq!(WrapAlgorithm::new_optimal_fit(), WrapAlgorithm::new_optimal_fit());
+    /// }
+    /// ```
+    ///
+    /// Note that `WrapAlgorithm::Custom1` values never compare equal:
+    ///
+    /// ```
+    /// use textwrap::WrapAlgorithm;
+    ///
+    /// assert_ne!(WrapAlgorithm::Custom(|words, line_widths| vec![words]),
+    ///            WrapAlgorithm::Custom(|words, line_widths| vec![words]));
+    /// ```
+    fn eq(&self, other: &Self) -> bool {
+        match (self, other) {
+            (WrapAlgorithm::FirstFit, WrapAlgorithm::FirstFit) => true,
+            #[cfg(feature = "smawk")]
+            (WrapAlgorithm::OptimalFit(a), WrapAlgorithm::OptimalFit(b)) => a == b,
+            (_, _) => false,
+        }
+    }
+}
+
+impl std::fmt::Debug for WrapAlgorithm {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        match self {
+            WrapAlgorithm::FirstFit => f.write_str("FirstFit"),
+            #[cfg(feature = "smawk")]
+            WrapAlgorithm::OptimalFit(penalties) => write!(f, "OptimalFit({:?})", penalties),
+            WrapAlgorithm::Custom(_) => f.write_str("Custom(...)"),
+        }
+    }
+}
+
+impl WrapAlgorithm {
+    /// Create new wrap algorithm.
+    ///
+    /// The best wrapping algorithm is used by default, i.e.,
+    /// [`WrapAlgorithm::OptimalFit`] if available, otherwise
+    /// [`WrapAlgorithm::FirstFit`].
+    pub const fn new() -> Self {
+        #[cfg(not(feature = "smawk"))]
+        {
+            WrapAlgorithm::FirstFit
+        }
+
+        #[cfg(feature = "smawk")]
+        {
+            WrapAlgorithm::new_optimal_fit()
+        }
+    }
+
+    /// New [`WrapAlgorithm::OptimalFit`] with default penalties. This
+    /// works well for monospace text.
+    ///
+    /// **Note:** Only available when the `smawk` Cargo feature is
+    /// enabled.
+    #[cfg(feature = "smawk")]
+    pub const fn new_optimal_fit() -> Self {
+        WrapAlgorithm::OptimalFit(Penalties::new())
+    }
+
+    /// Wrap words according to line widths.
+    ///
+    /// The `line_widths` slice gives the target line width for each
+    /// line (the last slice element is repeated as necessary). This
+    /// can be used to implement hanging indentation.
+    #[inline]
+    pub fn wrap<'a, 'b>(
+        &self,
+        words: &'b [Word<'a>],
+        line_widths: &'b [usize],
+    ) -> Vec<&'b [Word<'a>]> {
+        // Every integer up to 2u64.pow(f64::MANTISSA_DIGITS) = 2**53
+        // = 9_007_199_254_740_992 can be represented without loss by
+        // a f64. Larger line widths will be rounded to the nearest
+        // representable number.
+        let f64_line_widths = line_widths.iter().map(|w| *w as f64).collect::<Vec<_>>();
+
+        match self {
+            WrapAlgorithm::FirstFit => wrap_first_fit(words, &f64_line_widths),
+
+            #[cfg(feature = "smawk")]
+            WrapAlgorithm::OptimalFit(penalties) => {
+                // The computation cannnot overflow when the line
+                // widths are restricted to usize.
+                wrap_optimal_fit(words, &f64_line_widths, penalties).unwrap()
+            }
+
+            WrapAlgorithm::Custom(func) => func(words, line_widths),
+        }
+    }
+}
+
+impl Default for WrapAlgorithm {
+    fn default() -> Self {
+        WrapAlgorithm::new()
+    }
+}
+
+/// Wrap abstract fragments into lines with a first-fit algorithm.
+///
+/// The `line_widths` slice gives the target line width for each line
+/// (the last slice element is repeated as necessary). This can be
+/// used to implement hanging indentation.
+///
+/// The fragments must already have been split into the desired
+/// widths, this function will not (and cannot) attempt to split them
+/// further when arranging them into lines.
+///
+/// # First-Fit Algorithm
+///
+/// This implements a simple “greedy” algorithm: accumulate fragments
+/// one by one and when a fragment no longer fits, start a new line.
+/// There is no look-ahead, we simply take first fit of the fragments
+/// we find.
+///
+/// While fast and predictable, this algorithm can produce poor line
+/// breaks when a long fragment is moved to a new line, leaving behind
+/// a large gap:
+///
+/// ```
+/// use textwrap::core::Word;
+/// use textwrap::wrap_algorithms::wrap_first_fit;
+/// use textwrap::WordSeparator;
+///
+/// // Helper to convert wrapped lines to a Vec<String>.
+/// fn lines_to_strings(lines: Vec<&[Word<'_>]>) -> Vec<String> {
+///     lines.iter().map(|line| {
+///         line.iter().map(|word| &**word).collect::<Vec<_>>().join(" ")
+///     }).collect::<Vec<_>>()
+/// }
+///
+/// let text = "These few words will unfortunately not wrap nicely.";
+/// let words = WordSeparator::AsciiSpace.find_words(text).collect::<Vec<_>>();
+/// assert_eq!(lines_to_strings(wrap_first_fit(&words, &[15.0])),
+///            vec!["These few words",
+///                 "will",  // <-- short line
+///                 "unfortunately",
+///                 "not wrap",
+///                 "nicely."]);
+///
+/// // We can avoid the short line if we look ahead:
+/// #[cfg(feature = "smawk")]
+/// use textwrap::wrap_algorithms::{wrap_optimal_fit, Penalties};
+/// #[cfg(feature = "smawk")]
+/// assert_eq!(lines_to_strings(wrap_optimal_fit(&words, &[15.0], &Penalties::new()).unwrap()),
+///            vec!["These few",
+///                 "words will",
+///                 "unfortunately",
+///                 "not wrap",
+///                 "nicely."]);
+/// ```
+///
+/// The [`wrap_optimal_fit`] function was used above to get better
+/// line breaks. It uses an advanced algorithm which tries to avoid
+/// short lines. This function is about 4 times faster than
+/// [`wrap_optimal_fit`].
+///
+/// # Examples
+///
+/// Imagine you're building a house site and you have a number of
+/// tasks you need to execute. Things like pour foundation, complete
+/// framing, install plumbing, electric cabling, install insulation.
+///
+/// The construction workers can only work during daytime, so they
+/// need to pack up everything at night. Because they need to secure
+/// their tools and move machines back to the garage, this process
+/// takes much more time than the time it would take them to simply
+/// switch to another task.
+///
+/// You would like to make a list of tasks to execute every day based
+/// on your estimates. You can model this with a program like this:
+///
+/// ```
+/// use textwrap::core::{Fragment, Word};
+/// use textwrap::wrap_algorithms::wrap_first_fit;
+///
+/// #[derive(Debug)]
+/// struct Task<'a> {
+///     name: &'a str,
+///     hours: f64,   // Time needed to complete task.
+///     sweep: f64,   // Time needed for a quick sweep after task during the day.
+///     cleanup: f64, // Time needed for full cleanup if day ends with this task.
+/// }
+///
+/// impl Fragment for Task<'_> {
+///     fn width(&self) -> f64 { self.hours }
+///     fn whitespace_width(&self) -> f64 { self.sweep }
+///     fn penalty_width(&self) -> f64 { self.cleanup }
+/// }
+///
+/// // The morning tasks
+/// let tasks = vec![
+///     Task { name: "Foundation",  hours: 4.0, sweep: 2.0, cleanup: 3.0 },
+///     Task { name: "Framing",     hours: 3.0, sweep: 1.0, cleanup: 2.0 },
+///     Task { name: "Plumbing",    hours: 2.0, sweep: 2.0, cleanup: 2.0 },
+///     Task { name: "Electrical",  hours: 2.0, sweep: 1.0, cleanup: 2.0 },
+///     Task { name: "Insulation",  hours: 2.0, sweep: 1.0, cleanup: 2.0 },
+///     Task { name: "Drywall",     hours: 3.0, sweep: 1.0, cleanup: 2.0 },
+///     Task { name: "Floors",      hours: 3.0, sweep: 1.0, cleanup: 2.0 },
+///     Task { name: "Countertops", hours: 1.0, sweep: 1.0, cleanup: 2.0 },
+///     Task { name: "Bathrooms",   hours: 2.0, sweep: 1.0, cleanup: 2.0 },
+/// ];
+///
+/// // Fill tasks into days, taking `day_length` into account. The
+/// // output shows the hours worked per day along with the names of
+/// // the tasks for that day.
+/// fn assign_days<'a>(tasks: &[Task<'a>], day_length: f64) -> Vec<(f64, Vec<&'a str>)> {
+///     let mut days = Vec::new();
+///     // Assign tasks to days. The assignment is a vector of slices,
+///     // with a slice per day.
+///     let assigned_days: Vec<&[Task<'a>]> = wrap_first_fit(&tasks, &[day_length]);
+///     for day in assigned_days.iter() {
+///         let last = day.last().unwrap();
+///         let work_hours: f64 = day.iter().map(|t| t.hours + t.sweep).sum();
+///         let names = day.iter().map(|t| t.name).collect::<Vec<_>>();
+///         days.push((work_hours - last.sweep + last.cleanup, names));
+///     }
+///     days
+/// }
+///
+/// // With a single crew working 8 hours a day:
+/// assert_eq!(
+///     assign_days(&tasks, 8.0),
+///     [
+///         (7.0, vec!["Foundation"]),
+///         (8.0, vec!["Framing", "Plumbing"]),
+///         (7.0, vec!["Electrical", "Insulation"]),
+///         (5.0, vec!["Drywall"]),
+///         (7.0, vec!["Floors", "Countertops"]),
+///         (4.0, vec!["Bathrooms"]),
+///     ]
+/// );
+///
+/// // With two crews working in shifts, 16 hours a day:
+/// assert_eq!(
+///     assign_days(&tasks, 16.0),
+///     [
+///         (14.0, vec!["Foundation", "Framing", "Plumbing"]),
+///         (15.0, vec!["Electrical", "Insulation", "Drywall", "Floors"]),
+///         (6.0, vec!["Countertops", "Bathrooms"]),
+///     ]
+/// );
+/// ```
+///
+/// Apologies to anyone who actually knows how to build a house and
+/// knows how long each step takes :-)
+pub fn wrap_first_fit<'a, 'b, T: Fragment>(
+    fragments: &'a [T],
+    line_widths: &'b [f64],
+) -> Vec<&'a [T]> {
+    // The final line width is used for all remaining lines.
+    let default_line_width = line_widths.last().copied().unwrap_or(0.0);
+    let mut lines = Vec::new();
+    let mut start = 0;
+    let mut width = 0.0;
+
+    for (idx, fragment) in fragments.iter().enumerate() {
+        let line_width = line_widths
+            .get(lines.len())
+            .copied()
+            .unwrap_or(default_line_width);
+        if width + fragment.width() + fragment.penalty_width() > line_width && idx > start {
+            lines.push(&fragments[start..idx]);
+            start = idx;
+            width = 0.0;
+        }
+        width += fragment.width() + fragment.whitespace_width();
+    }
+    lines.push(&fragments[start..]);
+    lines
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[derive(Debug, PartialEq)]
+    struct Word(f64);
+
+    #[rustfmt::skip]
+    impl Fragment for Word {
+        fn width(&self) -> f64 { self.0 }
+        fn whitespace_width(&self) -> f64 { 1.0 }
+        fn penalty_width(&self) -> f64 { 0.0 }
+    }
+
+    #[test]
+    fn wrap_string_longer_than_f64() {
+        let words = vec![
+            Word(1e307),
+            Word(2e307),
+            Word(3e307),
+            Word(4e307),
+            Word(5e307),
+            Word(6e307),
+        ];
+        // Wrap at just under f64::MAX (~19e307). The tiny
+        // whitespace_widths disappear because of loss of precision.
+        assert_eq!(
+            wrap_first_fit(&words, &[15e307]),
+            &[
+                vec![
+                    Word(1e307),
+                    Word(2e307),
+                    Word(3e307),
+                    Word(4e307),
+                    Word(5e307)
+                ],
+                vec![Word(6e307)]
+            ]
+        );
+    }
+}
diff --git a/crates/textwrap/src/wrap_algorithms/optimal_fit.rs b/crates/textwrap/src/wrap_algorithms/optimal_fit.rs
new file mode 100644
index 0000000..ef2f333
--- /dev/null
+++ b/crates/textwrap/src/wrap_algorithms/optimal_fit.rs
@@ -0,0 +1,433 @@
+use std::cell::RefCell;
+
+use crate::core::Fragment;
+
+/// Penalties for
+/// [`WrapAlgorithm::OptimalFit`](crate::WrapAlgorithm::OptimalFit)
+/// and [`wrap_optimal_fit`].
+///
+/// This wrapping algorithm in [`wrap_optimal_fit`] considers the
+/// entire paragraph to find optimal line breaks. When wrapping text,
+/// "penalties" are assigned to line breaks based on the gaps left at
+/// the end of lines. The penalties are given by this struct, with
+/// [`Penalties::default`] assigning penalties that work well for
+/// monospace text.
+///
+/// If you are wrapping proportional text, you are advised to assign
+/// your own penalties according to your font size. See the individual
+/// penalties below for details.
+///
+/// **Note:** Only available when the `smawk` Cargo feature is
+/// enabled.
+#[derive(Clone, Copy, Debug, PartialEq)]
+pub struct Penalties {
+    /// Per-line penalty. This is added for every line, which makes it
+    /// expensive to output more lines than the minimum required.
+    pub nline_penalty: usize,
+
+    /// Per-character cost for lines that overflow the target line width.
+    ///
+    /// With a default value of 50², every single character costs as
+    /// much as leaving a gap of 50 characters behind. This is because
+    /// we assign as cost of `gap * gap` to a short line. When
+    /// wrapping monospace text, we can overflow the line by 1
+    /// character in extreme cases:
+    ///
+    /// ```
+    /// use textwrap::core::Word;
+    /// use textwrap::wrap_algorithms::{wrap_optimal_fit, Penalties};
+    ///
+    /// let short = "foo ";
+    /// let long = "x".repeat(50);
+    /// let length = (short.len() + long.len()) as f64;
+    /// let fragments = vec![Word::from(short), Word::from(&long)];
+    /// let penalties = Penalties::new();
+    ///
+    /// // Perfect fit, both words are on a single line with no overflow.
+    /// let wrapped = wrap_optimal_fit(&fragments, &[length], &penalties).unwrap();
+    /// assert_eq!(wrapped, vec![&[Word::from(short), Word::from(&long)]]);
+    ///
+    /// // The words no longer fit, yet we get a single line back. While
+    /// // the cost of overflow (`1 * 2500`) is the same as the cost of the
+    /// // gap (`50 * 50 = 2500`), the tie is broken by `nline_penalty`
+    /// // which makes it cheaper to overflow than to use two lines.
+    /// let wrapped = wrap_optimal_fit(&fragments, &[length - 1.0], &penalties).unwrap();
+    /// assert_eq!(wrapped, vec![&[Word::from(short), Word::from(&long)]]);
+    ///
+    /// // The cost of overflow would be 2 * 2500, whereas the cost of
+    /// // the gap is only `49 * 49 + nline_penalty = 2401 + 1000 =
+    /// // 3401`. We therefore get two lines.
+    /// let wrapped = wrap_optimal_fit(&fragments, &[length - 2.0], &penalties).unwrap();
+    /// assert_eq!(wrapped, vec![&[Word::from(short)],
+    ///                          &[Word::from(&long)]]);
+    /// ```
+    ///
+    /// This only happens if the overflowing word is 50 characters
+    /// long _and_ if the word overflows the line by exactly one
+    /// character. If it overflows by more than one character, the
+    /// overflow penalty will quickly outgrow the cost of the gap, as
+    /// seen above.
+    pub overflow_penalty: usize,
+
+    /// When should the a single word on the last line be considered
+    /// "too short"?
+    ///
+    /// If the last line of the text consist of a single word and if
+    /// this word is shorter than `1 / short_last_line_fraction` of
+    /// the line width, then the final line will be considered "short"
+    /// and `short_last_line_penalty` is added as an extra penalty.
+    ///
+    /// The effect of this is to avoid a final line consisting of a
+    /// single small word. For example, with a
+    /// `short_last_line_penalty` of 25 (the default), a gap of up to
+    /// 5 columns will be seen as more desirable than having a final
+    /// short line.
+    ///
+    /// ## Examples
+    ///
+    /// ```
+    /// use textwrap::{wrap, wrap_algorithms, Options, WrapAlgorithm};
+    ///
+    /// let text = "This is a demo of the short last line penalty.";
+    ///
+    /// // The first-fit algorithm leaves a single short word on the last line:
+    /// assert_eq!(wrap(text, Options::new(37).wrap_algorithm(WrapAlgorithm::FirstFit)),
+    ///            vec!["This is a demo of the short last line",
+    ///                 "penalty."]);
+    ///
+    /// #[cfg(feature = "smawk")] {
+    /// let mut penalties = wrap_algorithms::Penalties::new();
+    ///
+    /// // Since "penalty." is shorter than 25% of the line width, the
+    /// // optimal-fit algorithm adds a penalty of 25. This is enough
+    /// // to move "line " down:
+    /// assert_eq!(wrap(text, Options::new(37).wrap_algorithm(WrapAlgorithm::OptimalFit(penalties))),
+    ///            vec!["This is a demo of the short last",
+    ///                 "line penalty."]);
+    ///
+    /// // We can change the meaning of "short" lines. Here, only words
+    /// // shorter than 1/10th of the line width will be considered short:
+    /// penalties.short_last_line_fraction = 10;
+    /// assert_eq!(wrap(text, Options::new(37).wrap_algorithm(WrapAlgorithm::OptimalFit(penalties))),
+    ///            vec!["This is a demo of the short last line",
+    ///                 "penalty."]);
+    ///
+    /// // If desired, the penalty can also be disabled:
+    /// penalties.short_last_line_fraction = 4;
+    /// penalties.short_last_line_penalty = 0;
+    /// assert_eq!(wrap(text, Options::new(37).wrap_algorithm(WrapAlgorithm::OptimalFit(penalties))),
+    ///            vec!["This is a demo of the short last line",
+    ///                 "penalty."]);
+    /// }
+    /// ```
+    pub short_last_line_fraction: usize,
+
+    /// Penalty for a last line with a single short word.
+    ///
+    /// Set this to zero if you do not want to penalize short last lines.
+    pub short_last_line_penalty: usize,
+
+    /// Penalty for lines ending with a hyphen.
+    pub hyphen_penalty: usize,
+}
+
+impl Penalties {
+    /// Default penalties for monospace text.
+    ///
+    /// The penalties here work well for monospace text. This is
+    /// because they expect the gaps at the end of lines to be roughly
+    /// in the range `0..100`. If the gaps are larger, the
+    /// `overflow_penalty` and `hyphen_penalty` become insignificant.
+    pub const fn new() -> Self {
+        Penalties {
+            nline_penalty: 1000,
+            overflow_penalty: 50 * 50,
+            short_last_line_fraction: 4,
+            short_last_line_penalty: 25,
+            hyphen_penalty: 25,
+        }
+    }
+}
+
+impl Default for Penalties {
+    fn default() -> Self {
+        Self::new()
+    }
+}
+
+/// Cache for line numbers. This is necessary to avoid a O(n**2)
+/// behavior when computing line numbers in [`wrap_optimal_fit`].
+struct LineNumbers {
+    line_numbers: RefCell<Vec<usize>>,
+}
+
+impl LineNumbers {
+    fn new(size: usize) -> Self {
+        let mut line_numbers = Vec::with_capacity(size);
+        line_numbers.push(0);
+        LineNumbers {
+            line_numbers: RefCell::new(line_numbers),
+        }
+    }
+
+    fn get<T>(&self, i: usize, minima: &[(usize, T)]) -> usize {
+        while self.line_numbers.borrow_mut().len() < i + 1 {
+            let pos = self.line_numbers.borrow().len();
+            let line_number = 1 + self.get(minima[pos].0, minima);
+            self.line_numbers.borrow_mut().push(line_number);
+        }
+
+        self.line_numbers.borrow()[i]
+    }
+}
+
+/// Overflow error during the [`wrap_optimal_fit`] computation.
+#[derive(Debug, PartialEq, Eq)]
+pub struct OverflowError;
+
+impl std::fmt::Display for OverflowError {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        write!(f, "wrap_optimal_fit cost computation overflowed")
+    }
+}
+
+impl std::error::Error for OverflowError {}
+
+/// Wrap abstract fragments into lines with an optimal-fit algorithm.
+///
+/// The `line_widths` slice gives the target line width for each line
+/// (the last slice element is repeated as necessary). This can be
+/// used to implement hanging indentation.
+///
+/// The fragments must already have been split into the desired
+/// widths, this function will not (and cannot) attempt to split them
+/// further when arranging them into lines.
+///
+/// # Optimal-Fit Algorithm
+///
+/// The algorithm considers all possible break points and picks the
+/// breaks which minimizes the gaps at the end of each line. More
+/// precisely, the algorithm assigns a cost or penalty to each break
+/// point, determined by `cost = gap * gap` where `gap = target_width -
+/// line_width`. Shorter lines are thus penalized more heavily since
+/// they leave behind a larger gap.
+///
+/// We can illustrate this with the text “To be, or not to be: that is
+/// the question”. We will be wrapping it in a narrow column with room
+/// for only 10 characters. The [greedy
+/// algorithm](super::wrap_first_fit) will produce these lines, each
+/// annotated with the corresponding penalty:
+///
+/// ```text
+/// "To be, or"   1² =  1
+/// "not to be:"  0² =  0
+/// "that is"     3² =  9
+/// "the"         7² = 49
+/// "question"    2² =  4
+/// ```
+///
+/// We see that line four with “the” leaves a gap of 7 columns, which
+/// gives it a penalty of 49. The sum of the penalties is 63.
+///
+/// There are 10 words, which means that there are `2_u32.pow(9)` or
+/// 512 different ways to typeset it. We can compute
+/// the sum of the penalties for each possible line break and search
+/// for the one with the lowest sum:
+///
+/// ```text
+/// "To be,"     4² = 16
+/// "or not to"  1² =  1
+/// "be: that"   2² =  4
+/// "is the"     4² = 16
+/// "question"   2² =  4
+/// ```
+///
+/// The sum of the penalties is 41, which is better than what the
+/// greedy algorithm produced.
+///
+/// Searching through all possible combinations would normally be
+/// prohibitively slow. However, it turns out that the problem can be
+/// formulated as the task of finding column minima in a cost matrix.
+/// This matrix has a special form (totally monotone) which lets us
+/// use a [linear-time algorithm called
+/// SMAWK](https://lib.rs/crates/smawk) to find the optimal break
+/// points.
+///
+/// This means that the time complexity remains O(_n_) where _n_ is
+/// the number of words. Compared to
+/// [`wrap_first_fit`](super::wrap_first_fit), this function is about
+/// 4 times slower.
+///
+/// The optimization of per-line costs over the entire paragraph is
+/// inspired by the line breaking algorithm used in TeX, as described
+/// in the 1981 article [_Breaking Paragraphs into
+/// Lines_](http://www.eprg.org/G53DOC/pdfs/knuth-plass-breaking.pdf)
+/// by Knuth and Plass. The implementation here is based on [Python
+/// code by David
+/// Eppstein](https://github.com/jfinkels/PADS/blob/master/pads/wrap.py).
+///
+/// # Errors
+///
+/// In case of an overflow during the cost computation, an `Err` is
+/// returned. Overflows happens when fragments or lines have infinite
+/// widths (`f64::INFINITY`) or if the widths are so large that the
+/// gaps at the end of lines have sizes larger than `f64::MAX.sqrt()`
+/// (approximately 1e154):
+///
+/// ```
+/// use textwrap::core::Fragment;
+/// use textwrap::wrap_algorithms::{wrap_optimal_fit, OverflowError, Penalties};
+///
+/// #[derive(Debug, PartialEq)]
+/// struct Word(f64);
+///
+/// impl Fragment for Word {
+///     fn width(&self) -> f64 { self.0 }
+///     fn whitespace_width(&self) -> f64 { 1.0 }
+///     fn penalty_width(&self) -> f64 { 0.0 }
+/// }
+///
+/// // Wrapping overflows because 1e155 * 1e155 = 1e310, which is
+/// // larger than f64::MAX:
+/// assert_eq!(wrap_optimal_fit(&[Word(0.0), Word(0.0)], &[1e155], &Penalties::default()),
+///            Err(OverflowError));
+/// ```
+///
+/// When using fragment widths and line widths which fit inside an
+/// `u64`, overflows cannot happen. This means that fragments derived
+/// from a `&str` cannot cause overflows.
+///
+/// **Note:** Only available when the `smawk` Cargo feature is
+/// enabled.
+pub fn wrap_optimal_fit<'a, 'b, T: Fragment>(
+    fragments: &'a [T],
+    line_widths: &'b [f64],
+    penalties: &'b Penalties,
+) -> Result<Vec<&'a [T]>, OverflowError> {
+    // The final line width is used for all remaining lines.
+    let default_line_width = line_widths.last().copied().unwrap_or(0.0);
+    let mut widths = Vec::with_capacity(fragments.len() + 1);
+    let mut width = 0.0;
+    widths.push(width);
+    for fragment in fragments {
+        width += fragment.width() + fragment.whitespace_width();
+        widths.push(width);
+    }
+
+    let line_numbers = LineNumbers::new(fragments.len());
+
+    let minima = smawk::online_column_minima(0.0, widths.len(), |minima, i, j| {
+        // Line number for fragment `i`.
+        let line_number = line_numbers.get(i, minima);
+        let line_width = line_widths
+            .get(line_number)
+            .copied()
+            .unwrap_or(default_line_width);
+        let target_width = line_width.max(1.0);
+
+        // Compute the width of a line spanning fragments[i..j] in
+        // constant time. We need to adjust widths[j] by subtracting
+        // the whitespace of fragment[j-1] and then add the penalty.
+        let line_width = widths[j] - widths[i] - fragments[j - 1].whitespace_width()
+            + fragments[j - 1].penalty_width();
+
+        // We compute cost of the line containing fragments[i..j]. We
+        // start with values[i].1, which is the optimal cost for
+        // breaking before fragments[i].
+        //
+        // First, every extra line cost NLINE_PENALTY.
+        let mut cost = minima[i].1 + penalties.nline_penalty as f64;
+
+        // Next, we add a penalty depending on the line length.
+        if line_width > target_width {
+            // Lines that overflow get a hefty penalty.
+            let overflow = line_width - target_width;
+            cost += overflow * penalties.overflow_penalty as f64;
+        } else if j < fragments.len() {
+            // Other lines (except for the last line) get a milder
+            // penalty which depend on the size of the gap.
+            let gap = target_width - line_width;
+            cost += gap * gap;
+        } else if i + 1 == j
+            && line_width < target_width / penalties.short_last_line_fraction as f64
+        {
+            // The last line can have any size gap, but we do add a
+            // penalty if the line is very short (typically because it
+            // contains just a single word).
+            cost += penalties.short_last_line_penalty as f64;
+        }
+
+        // Finally, we discourage hyphens.
+        if fragments[j - 1].penalty_width() > 0.0 {
+            // TODO: this should use a penalty value from the fragment
+            // instead.
+            cost += penalties.hyphen_penalty as f64;
+        }
+
+        cost
+    });
+
+    for (_, cost) in &minima {
+        if cost.is_infinite() {
+            return Err(OverflowError);
+        }
+    }
+
+    let mut lines = Vec::with_capacity(line_numbers.get(fragments.len(), &minima));
+    let mut pos = fragments.len();
+    loop {
+        let prev = minima[pos].0;
+        lines.push(&fragments[prev..pos]);
+        pos = prev;
+        if pos == 0 {
+            break;
+        }
+    }
+
+    lines.reverse();
+    Ok(lines)
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[derive(Debug, PartialEq)]
+    struct Word(f64);
+
+    #[rustfmt::skip]
+    impl Fragment for Word {
+        fn width(&self) -> f64 { self.0 }
+        fn whitespace_width(&self) -> f64 { 1.0 }
+        fn penalty_width(&self) -> f64 { 0.0 }
+    }
+
+    #[test]
+    fn wrap_fragments_with_infinite_widths() {
+        let words = vec![Word(f64::INFINITY)];
+        assert_eq!(
+            wrap_optimal_fit(&words, &[0.0], &Penalties::default()),
+            Err(OverflowError)
+        );
+    }
+
+    #[test]
+    fn wrap_fragments_with_huge_widths() {
+        let words = vec![Word(1e200), Word(1e250), Word(1e300)];
+        assert_eq!(
+            wrap_optimal_fit(&words, &[1e300], &Penalties::default()),
+            Err(OverflowError)
+        );
+    }
+
+    #[test]
+    fn wrap_fragments_with_large_widths() {
+        // The gaps will be of the sizes between 1e25 and 1e75. This
+        // makes the `gap * gap` cost fit comfortably in a f64.
+        let words = vec![Word(1e25), Word(1e50), Word(1e75)];
+        assert_eq!(
+            wrap_optimal_fit(&words, &[1e100], &Penalties::default()),
+            Ok(vec![&vec![Word(1e25), Word(1e50), Word(1e75)][..]])
+        );
+    }
+}
diff --git a/crates/textwrap/tests/indent.rs b/crates/textwrap/tests/indent.rs
new file mode 100644
index 0000000..9dd5ad2
--- /dev/null
+++ b/crates/textwrap/tests/indent.rs
@@ -0,0 +1,88 @@
+/// tests cases ported over from python standard library
+use textwrap::{dedent, indent};
+
+const ROUNDTRIP_CASES: [&str; 3] = [
+    // basic test case
+    "Hi.\nThis is a test.\nTesting.",
+    // include a blank line
+    "Hi.\nThis is a test.\n\nTesting.",
+    // include leading and trailing blank lines
+    "\nHi.\nThis is a test.\nTesting.\n",
+];
+
+const WINDOWS_CASES: [&str; 2] = [
+    // use windows line endings
+    "Hi.\r\nThis is a test.\r\nTesting.",
+    // pathological case
+    "Hi.\r\nThis is a test.\n\r\nTesting.\r\n\n",
+];
+
+#[test]
+fn test_indent_nomargin_default() {
+    // indent should do nothing if 'prefix' is empty.
+    for text in ROUNDTRIP_CASES.iter() {
+        assert_eq!(&indent(text, ""), text);
+    }
+    for text in WINDOWS_CASES.iter() {
+        assert_eq!(&indent(text, ""), text);
+    }
+}
+
+#[test]
+fn test_roundtrip_spaces() {
+    // A whitespace prefix should roundtrip with dedent
+    for text in ROUNDTRIP_CASES.iter() {
+        assert_eq!(&dedent(&indent(text, "    ")), text);
+    }
+}
+
+#[test]
+fn test_roundtrip_tabs() {
+    // A whitespace prefix should roundtrip with dedent
+    for text in ROUNDTRIP_CASES.iter() {
+        assert_eq!(&dedent(&indent(text, "\t\t")), text);
+    }
+}
+
+#[test]
+fn test_roundtrip_mixed() {
+    // A whitespace prefix should roundtrip with dedent
+    for text in ROUNDTRIP_CASES.iter() {
+        assert_eq!(&dedent(&indent(text, " \t  \t ")), text);
+    }
+}
+
+#[test]
+fn test_indent_default() {
+    // Test default indenting of lines that are not whitespace only
+    let prefix = "  ";
+    let expected = [
+        // Basic test case
+        "  Hi.\n  This is a test.\n  Testing.",
+        // Include a blank line
+        "  Hi.\n  This is a test.\n\n  Testing.",
+        // Include leading and trailing blank lines
+        "\n  Hi.\n  This is a test.\n  Testing.\n",
+    ];
+    for (text, expect) in ROUNDTRIP_CASES.iter().zip(expected.iter()) {
+        assert_eq!(&indent(text, prefix), expect)
+    }
+    let expected = [
+        // Use Windows line endings
+        "  Hi.\r\n  This is a test.\r\n  Testing.",
+        // Pathological case
+        "  Hi.\r\n  This is a test.\n\r\n  Testing.\r\n\n",
+    ];
+    for (text, expect) in WINDOWS_CASES.iter().zip(expected.iter()) {
+        assert_eq!(&indent(text, prefix), expect)
+    }
+}
+
+#[test]
+fn indented_text_should_have_the_same_number_of_lines_as_the_original_text() {
+    let texts = ["foo\nbar", "foo\nbar\n", "foo\nbar\nbaz"];
+    for original in texts.iter() {
+        let indented = indent(original, "");
+        assert_eq!(&indented, original);
+    }
+}
diff --git a/crates/textwrap/tests/version-numbers.rs b/crates/textwrap/tests/version-numbers.rs
new file mode 100644
index 0000000..3f429b1
--- /dev/null
+++ b/crates/textwrap/tests/version-numbers.rs
@@ -0,0 +1,22 @@
+#[test]
+fn test_readme_deps() {
+    version_sync::assert_markdown_deps_updated!("README.md");
+}
+
+#[test]
+fn test_changelog() {
+    version_sync::assert_contains_regex!(
+        "CHANGELOG.md",
+        r"^## Version {version} \(20\d\d-\d\d-\d\d\)"
+    );
+}
+
+#[test]
+fn test_html_root_url() {
+    version_sync::assert_html_root_url_updated!("src/lib.rs");
+}
+
+#[test]
+fn test_dependency_graph() {
+    version_sync::assert_contains_regex!("src/lib.rs", "master/images/textwrap-{version}.svg");
+}
diff --git a/crates/thiserror/.cargo-checksum.json b/crates/thiserror/.cargo-checksum.json
new file mode 100644
index 0000000..f8b53fa
--- /dev/null
+++ b/crates/thiserror/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.toml":"35169a8135290207040ce0411701afa68516e677e39b3da37d990842106a37b1","LICENSE-APACHE":"62c7a1e35f56406896d7aa7ca52d0cc0d272ac022b5d2796e7d6905db8a3636a","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"106c5a937767d49503e1fc5eae1b924f57f15decd8583720a3c652483e348a64","build.rs":"68e8862d9a6fa607647c01232662a3d9a44462aec390a6fe98beccd59fa58f7d","rust-toolchain.toml":"6bbb61302978c736b2da03e4fb40e3beab908f85d533ab46fd541e637b5f3e0f","src/aserror.rs":"cf3c98abb2b9e06afa3c4aba0df14938417c3e330315863437561cbb3573888b","src/display.rs":"0adeeeb524c6bee06de179d54e82a43dc12d2c5b7f69f6fd268ba4611ebf5233","src/lib.rs":"40767118e757634c47a80518ae63bf73b8c4efa517039c70224ea24a57a92a24","src/provide.rs":"8007e22953bacfcc57bb7d12a03fbeb0acece5d2ec889cf55522a4e071d26df3","tests/compiletest.rs":"022a8e400ef813d7ea1875b944549cee5125f6a995dc33e93b48cba3e1b57bd1","tests/test_backtrace.rs":"a8f038490fb881463c0e8c36557617c47cf2d181f16c00525d4d139c7964fade","tests/test_deprecated.rs":"7b80a10f090a3982da017556d3d71398abcead59afd8278c7b9d9b1f7b66c7b3","tests/test_display.rs":"28e0f938fe0f6354529c35722eff04830451e27718145c27522c9acf7f8a6639","tests/test_error.rs":"d06dca3c38f22d7ce4e27dadd6c0f78e5cefe3a2ebbc5fe44abc9ddd5ee1985f","tests/test_expr.rs":"6d6d686b7f31e063c76647b24df347acfceba6897ce3a5f54b2851cde387579b","tests/test_from.rs":"36bd22be7b048cd187a19076aeac1456040f20a0b677b01c6003998b63439ea1","tests/test_generics.rs":"adc61f0d5fe8d53796848d44fb0373be5eab19a1eeb6a7172bc6f0dd7b91199c","tests/test_lints.rs":"c17d79d77edfcdd4b8f6dcdcd1c70ad065cfbc747e1a618ac6343315d0b59ea4","tests/test_option.rs":"ac30d929c019d6c54d1c1792b09e43c18dc0e4123b82051ff9e5db5e63c15e43","tests/test_path.rs":"ef5452c7e828a0179f5ace7e19f95b9762aa887caf10244adbfe36ded712c090","tests/test_source.rs":"f2f04f11bf8a709eddb1c68f113cda0c2be87e56800d6b9d991bedd545b4642f","tests/test_transparent.rs":"cd8d5be14d00d610a1782104bea6c013618501dab5c3625178ecfcf66e31f939","tests/ui/bad-field-attr.rs":"c5b567e3091969a01061843fb2d95c5e1aa3fa81edfeecdf416a84a6fba40aa8","tests/ui/bad-field-attr.stderr":"78f576d5ec66464a77f1cdf0f5bb7dcdf18f7f04f1165983a6239ec59d908ea3","tests/ui/concat-display.rs":"3995bd6b3bdd67df7bb16499775d89600c0dd20895633fe807396a64c117078d","tests/ui/concat-display.stderr":"256dfde61ee689ebe51588b135e2e030bdf95ba5adef1cb59f588c797bbdeef2","tests/ui/duplicate-enum-source.rs":"bfe28ce18042d446a76c7411aa233598211ce1157fdd3cb87bff3b3fa7c33131","tests/ui/duplicate-enum-source.stderr":"3d32fead420b27b4497be49080bc3b78f7f0ba339ead3de6c94e5dc20302c18f","tests/ui/duplicate-fmt.rs":"af53b66445bcce076a114376747f176b42c060a156563a41ccb638ae14c451fd","tests/ui/duplicate-fmt.stderr":"998bb121ce6f1595fd99529a7a1b06451b6bf476924337dce5524a83a7a5f1a1","tests/ui/duplicate-struct-source.rs":"f3d5f8e3d6fccfcdbb630db291353709583a920c6bf46f9f9de9966b67ea4c0f","tests/ui/duplicate-struct-source.stderr":"fb761d76668ac42357cf37b03c0abdbae5de0a828034990850291c9cb6ab766d","tests/ui/duplicate-transparent.rs":"41a9447e85f1a47027023442acde55c3d8610ec46d91b39bd43a42d7a004d747","tests/ui/duplicate-transparent.stderr":"4975abad43e973df158f18098d9bcb9dc39f8e75d3e733ed5d6620d1ee065c11","tests/ui/from-backtrace-backtrace.rs":"1fd51c5a1f7f6b6ee676d9fc798b6276ef2ce75def33d07f0e4b7bbde3291859","tests/ui/from-backtrace-backtrace.stderr":"f9774e9dad51374501ef4a55fa2dacece4d1c70e29ca18761394bdb80a9a74da","tests/ui/from-not-source.rs":"744a55aeffe11066830159ac023c33aaa5576e313b341fa24440ee13dfe3ac98","tests/ui/from-not-source.stderr":"525038e8b841707b927434cca4549168f73bd305faca17552a0d1fffa542ccc4","tests/ui/lifetime.rs":"e72e0391695e47fcd07edbf3819f114e468e2097086ec687781c7c8d6b4b7da7","tests/ui/lifetime.stderr":"d889a23f71324afe95dafc5f9d15337fbdbc9977cb8924f0cafe3a3becf4ced7","tests/ui/missing-fmt.rs":"bc9e2830e54c2474ff6c27a766ed3dee88d29e40f93f30e8d64d63233866c17d","tests/ui/missing-fmt.stderr":"9a20ccee9b660fe31a5b3199307b48580bb8305cb9ce33d97d3fc767a0cfc614","tests/ui/no-display.rs":"962245372272d23e9833311c15e73221b3c7da822a2ff90189613af56ffb5c2e","tests/ui/no-display.stderr":"9e2161baf5f66ab22370322f2e66e7633bf04b1ec07ef656e904b984bcc45d09","tests/ui/source-enum-not-error.rs":"7c57c63b3ec37bc456738acea2e1038de5b0f32fe7e83984037d7ad1ed921737","tests/ui/source-enum-not-error.stderr":"feac587e2436fd7bed73e7265dceb31b495587f1a8eea5c5fefd9da66b912dac","tests/ui/source-struct-not-error.rs":"09fb7713637242dca9192585a6daeb8d732dc1c1d0fa522b74f1c98618e6d949","tests/ui/source-struct-not-error.stderr":"66fb5fa85d59f11d8b5f7ec99469a843c51943b0010e554bdf56376a0614a2ca","tests/ui/transparent-display.rs":"b3c59583eb64b0b5a246444456d03cf52d51bcdc08885023600dbb44fd87e5f2","tests/ui/transparent-display.stderr":"16d538914e0d92026bde4b4bec75660217da9ecc6b621d12d2eb81d33ed1d1da","tests/ui/transparent-enum-many.rs":"2a40a764fb4683bff57973eec61507a6c00f7d4d7a32da6e7bd0190c2e445434","tests/ui/transparent-enum-many.stderr":"f1d78c1d6d8edbef153420db4fb9ca3dc6076fa043b5b1bc0cd291daa417a3ea","tests/ui/transparent-enum-source.rs":"18f606a98ac0a53f08dc56f5f923b9cbe75d25ed34479c777b48dac305d5968c","tests/ui/transparent-enum-source.stderr":"1b2e0ac53951034575d43ec0396c4e2b3cfb272db2aef8d6baa13a7e1632cc84","tests/ui/transparent-struct-many.rs":"72c6b6c1a44c203d3bc68989b2f1ec092531ef75b745432824c3776c290326f6","tests/ui/transparent-struct-many.stderr":"7bd0536dbb54a0ce7d4a8e66ca7624a1b132d8a1d1e4fecca642ec77494ac01c","tests/ui/transparent-struct-source.rs":"863fa691ed7d27e8767da58d9ee11fd40d6642274b36338ca1074c07964ea2b3","tests/ui/transparent-struct-source.stderr":"267dab65929e67d32347fb467a00b43af931f8205d727d7671938580217fc70e","tests/ui/unexpected-field-fmt.rs":"29fba7b4d81c642ec8e47cfe053aa515acf9080a86d65e685363a48993becfe3","tests/ui/unexpected-field-fmt.stderr":"20731c4a08af04bed3ff513903adadd690b6bc532b15604557e7f25575a8338f","tests/ui/unexpected-struct-source.rs":"c6cbe882d622635c216feb8290b1bd536ce0ec4feee16bc087667a21b3641d5c","tests/ui/unexpected-struct-source.stderr":"7c8227513478f6cc09e8a28be337c8a0e758a06ca5978d774c91bd43c4a54043","tests/ui/union.rs":"331adff27cebd8b95b03b6742cc8247331fda1f961e1590ed39c8d39f50cf1d8","tests/ui/union.stderr":"5f67ad29753d6fb14bc03aef7d4a1f660ee7796e469c037efbf8b13456934ad3"},"package":"1177e8c6d7ede7afde3585fd2513e611227efd6481bd78d2e82ba1ce16557ed4"}
\ No newline at end of file
diff --git a/crates/thiserror/Android.bp b/crates/thiserror/Android.bp
new file mode 100644
index 0000000..dabc411
--- /dev/null
+++ b/crates/thiserror/Android.bp
@@ -0,0 +1,32 @@
+// This file is generated by cargo_embargo.
+// Do not modify this file because the changes will be overridden on upgrade.
+
+package {
+    default_applicable_licenses: ["external_rust_crates_thiserror_license"],
+    default_team: "trendy_team_android_rust",
+}
+
+license {
+    name: "external_rust_crates_thiserror_license",
+    visibility: [":__subpackages__"],
+    license_kinds: ["SPDX-license-identifier-Apache-2.0"],
+    license_text: ["LICENSE"],
+}
+
+rust_library {
+    name: "libthiserror",
+    host_supported: true,
+    crate_name: "thiserror",
+    cargo_env_compat: true,
+    cargo_pkg_version: "1.0.49",
+    crate_root: "src/lib.rs",
+    edition: "2021",
+    proc_macros: ["libthiserror_impl"],
+    apex_available: [
+        "//apex_available:anyapex",
+        "//apex_available:platform",
+    ],
+    product_available: true,
+    vendor_available: true,
+    min_sdk_version: "29",
+}
diff --git a/crates/thiserror/Cargo.lock b/crates/thiserror/Cargo.lock
new file mode 100644
index 0000000..bfa7a91
--- /dev/null
+++ b/crates/thiserror/Cargo.lock
@@ -0,0 +1,334 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "anyhow"
+version = "1.0.86"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da"
+
+[[package]]
+name = "dissimilar"
+version = "1.0.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "59f8e79d1fbf76bdfbde321e902714bf6c49df88a7dda6fc682fc2979226962d"
+
+[[package]]
+name = "equivalent"
+version = "1.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5"
+
+[[package]]
+name = "glob"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b"
+
+[[package]]
+name = "hashbrown"
+version = "0.14.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1"
+
+[[package]]
+name = "indexmap"
+version = "2.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "93ead53efc7ea8ed3cfb0c79fc8023fbb782a5432b52830b6518941cebe6505c"
+dependencies = [
+ "equivalent",
+ "hashbrown",
+]
+
+[[package]]
+name = "itoa"
+version = "1.0.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b"
+
+[[package]]
+name = "memchr"
+version = "2.7.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3"
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.86"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77"
+dependencies = [
+ "unicode-ident",
+]
+
+[[package]]
+name = "quote"
+version = "1.0.37"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af"
+dependencies = [
+ "proc-macro2",
+]
+
+[[package]]
+name = "ref-cast"
+version = "1.0.23"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ccf0a6f84d5f1d581da8b41b47ec8600871962f2a528115b542b362d4b744931"
+dependencies = [
+ "ref-cast-impl",
+]
+
+[[package]]
+name = "ref-cast-impl"
+version = "1.0.23"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bcc303e793d3734489387d205e9b186fac9c6cfacedd98cbb2e8a5943595f3e6"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "rustversion"
+version = "1.0.17"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6"
+
+[[package]]
+name = "ryu"
+version = "1.0.18"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f"
+
+[[package]]
+name = "serde"
+version = "1.0.209"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "99fce0ffe7310761ca6bf9faf5115afbc19688edd00171d81b1bb1b116c63e09"
+dependencies = [
+ "serde_derive",
+]
+
+[[package]]
+name = "serde_derive"
+version = "1.0.209"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a5831b979fd7b5439637af1752d535ff49f4860c0f341d1baeb6faf0f4242170"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "serde_json"
+version = "1.0.127"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8043c06d9f82bd7271361ed64f415fe5e12a77fdb52e573e7f06a516dea329ad"
+dependencies = [
+ "itoa",
+ "memchr",
+ "ryu",
+ "serde",
+]
+
+[[package]]
+name = "serde_spanned"
+version = "0.6.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "eb5b1b31579f3811bf615c144393417496f152e12ac8b7663bf664f4a815306d"
+dependencies = [
+ "serde",
+]
+
+[[package]]
+name = "syn"
+version = "2.0.76"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "578e081a14e0cefc3279b0472138c513f37b41a08d5a3cca9b6e4e8ceb6cd525"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
+]
+
+[[package]]
+name = "termcolor"
+version = "1.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755"
+dependencies = [
+ "winapi-util",
+]
+
+[[package]]
+name = "thiserror"
+version = "1.0.49"
+dependencies = [
+ "anyhow",
+ "ref-cast",
+ "rustversion",
+ "thiserror-impl",
+ "trybuild",
+]
+
+[[package]]
+name = "thiserror-impl"
+version = "1.0.49"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "10712f02019e9288794769fba95cd6847df9874d49d871d062172f9dd41bc4cc"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "toml"
+version = "0.8.19"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a1ed1f98e3fdc28d6d910e6737ae6ab1a93bf1985935a1193e68f93eeb68d24e"
+dependencies = [
+ "serde",
+ "serde_spanned",
+ "toml_datetime",
+ "toml_edit",
+]
+
+[[package]]
+name = "toml_datetime"
+version = "0.6.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41"
+dependencies = [
+ "serde",
+]
+
+[[package]]
+name = "toml_edit"
+version = "0.22.20"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "583c44c02ad26b0c3f3066fe629275e50627026c51ac2e595cca4c230ce1ce1d"
+dependencies = [
+ "indexmap",
+ "serde",
+ "serde_spanned",
+ "toml_datetime",
+ "winnow",
+]
+
+[[package]]
+name = "trybuild"
+version = "1.0.99"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "207aa50d36c4be8d8c6ea829478be44a372c6a77669937bb39c698e52f1491e8"
+dependencies = [
+ "dissimilar",
+ "glob",
+ "serde",
+ "serde_derive",
+ "serde_json",
+ "termcolor",
+ "toml",
+]
+
+[[package]]
+name = "unicode-ident"
+version = "1.0.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
+
+[[package]]
+name = "winapi-util"
+version = "0.1.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb"
+dependencies = [
+ "windows-sys",
+]
+
+[[package]]
+name = "windows-sys"
+version = "0.59.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b"
+dependencies = [
+ "windows-targets",
+]
+
+[[package]]
+name = "windows-targets"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973"
+dependencies = [
+ "windows_aarch64_gnullvm",
+ "windows_aarch64_msvc",
+ "windows_i686_gnu",
+ "windows_i686_gnullvm",
+ "windows_i686_msvc",
+ "windows_x86_64_gnu",
+ "windows_x86_64_gnullvm",
+ "windows_x86_64_msvc",
+]
+
+[[package]]
+name = "windows_aarch64_gnullvm"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"
+
+[[package]]
+name = "windows_aarch64_msvc"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"
+
+[[package]]
+name = "windows_i686_gnu"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b"
+
+[[package]]
+name = "windows_i686_gnullvm"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"
+
+[[package]]
+name = "windows_i686_msvc"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
+
+[[package]]
+name = "windows_x86_64_gnu"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
+
+[[package]]
+name = "windows_x86_64_gnullvm"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
+
+[[package]]
+name = "windows_x86_64_msvc"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
+
+[[package]]
+name = "winnow"
+version = "0.6.18"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "68a9bda4691f099d435ad181000724da8e5899daa10713c2d432552b9ccd3a6f"
+dependencies = [
+ "memchr",
+]
diff --git a/crates/thiserror/Cargo.toml b/crates/thiserror/Cargo.toml
new file mode 100644
index 0000000..53b073d
--- /dev/null
+++ b/crates/thiserror/Cargo.toml
@@ -0,0 +1,48 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2021"
+rust-version = "1.56"
+name = "thiserror"
+version = "1.0.49"
+authors = ["David Tolnay <dtolnay@gmail.com>"]
+description = "derive(Error)"
+documentation = "https://docs.rs/thiserror"
+readme = "README.md"
+keywords = [
+    "error",
+    "error-handling",
+    "derive",
+]
+categories = ["rust-patterns"]
+license = "MIT OR Apache-2.0"
+repository = "https://github.com/dtolnay/thiserror"
+
+[package.metadata.docs.rs]
+rustdoc-args = ["--generate-link-to-definition"]
+targets = ["x86_64-unknown-linux-gnu"]
+
+[dependencies.thiserror-impl]
+version = "=1.0.49"
+
+[dev-dependencies.anyhow]
+version = "1.0.73"
+
+[dev-dependencies.ref-cast]
+version = "1.0.18"
+
+[dev-dependencies.rustversion]
+version = "1.0.13"
+
+[dev-dependencies.trybuild]
+version = "1.0.81"
+features = ["diff"]
diff --git a/crates/thiserror/LICENSE b/crates/thiserror/LICENSE
new file mode 120000
index 0000000..6b579aa
--- /dev/null
+++ b/crates/thiserror/LICENSE
@@ -0,0 +1 @@
+LICENSE-APACHE
\ No newline at end of file
diff --git a/crates/thiserror/LICENSE-APACHE b/crates/thiserror/LICENSE-APACHE
new file mode 100644
index 0000000..1b5ec8b
--- /dev/null
+++ b/crates/thiserror/LICENSE-APACHE
@@ -0,0 +1,176 @@
+                              Apache License
+                        Version 2.0, January 2004
+                     http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+   "License" shall mean the terms and conditions for use, reproduction,
+   and distribution as defined by Sections 1 through 9 of this document.
+
+   "Licensor" shall mean the copyright owner or entity authorized by
+   the copyright owner that is granting the License.
+
+   "Legal Entity" shall mean the union of the acting entity and all
+   other entities that control, are controlled by, or are under common
+   control with that entity. For the purposes of this definition,
+   "control" means (i) the power, direct or indirect, to cause the
+   direction or management of such entity, whether by contract or
+   otherwise, or (ii) ownership of fifty percent (50%) or more of the
+   outstanding shares, or (iii) beneficial ownership of such entity.
+
+   "You" (or "Your") shall mean an individual or Legal Entity
+   exercising permissions granted by this License.
+
+   "Source" form shall mean the preferred form for making modifications,
+   including but not limited to software source code, documentation
+   source, and configuration files.
+
+   "Object" form shall mean any form resulting from mechanical
+   transformation or translation of a Source form, including but
+   not limited to compiled object code, generated documentation,
+   and conversions to other media types.
+
+   "Work" shall mean the work of authorship, whether in Source or
+   Object form, made available under the License, as indicated by a
+   copyright notice that is included in or attached to the work
+   (an example is provided in the Appendix below).
+
+   "Derivative Works" shall mean any work, whether in Source or Object
+   form, that is based on (or derived from) the Work and for which the
+   editorial revisions, annotations, elaborations, or other modifications
+   represent, as a whole, an original work of authorship. For the purposes
+   of this License, Derivative Works shall not include works that remain
+   separable from, or merely link (or bind by name) to the interfaces of,
+   the Work and Derivative Works thereof.
+
+   "Contribution" shall mean any work of authorship, including
+   the original version of the Work and any modifications or additions
+   to that Work or Derivative Works thereof, that is intentionally
+   submitted to Licensor for inclusion in the Work by the copyright owner
+   or by an individual or Legal Entity authorized to submit on behalf of
+   the copyright owner. For the purposes of this definition, "submitted"
+   means any form of electronic, verbal, or written communication sent
+   to the Licensor or its representatives, including but not limited to
+   communication on electronic mailing lists, source code control systems,
+   and issue tracking systems that are managed by, or on behalf of, the
+   Licensor for the purpose of discussing and improving the Work, but
+   excluding communication that is conspicuously marked or otherwise
+   designated in writing by the copyright owner as "Not a Contribution."
+
+   "Contributor" shall mean Licensor and any individual or Legal Entity
+   on behalf of whom a Contribution has been received by Licensor and
+   subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+   this License, each Contributor hereby grants to You a perpetual,
+   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+   copyright license to reproduce, prepare Derivative Works of,
+   publicly display, publicly perform, sublicense, and distribute the
+   Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+   this License, each Contributor hereby grants to You a perpetual,
+   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+   (except as stated in this section) patent license to make, have made,
+   use, offer to sell, sell, import, and otherwise transfer the Work,
+   where such license applies only to those patent claims licensable
+   by such Contributor that are necessarily infringed by their
+   Contribution(s) alone or by combination of their Contribution(s)
+   with the Work to which such Contribution(s) was submitted. If You
+   institute patent litigation against any entity (including a
+   cross-claim or counterclaim in a lawsuit) alleging that the Work
+   or a Contribution incorporated within the Work constitutes direct
+   or contributory patent infringement, then any patent licenses
+   granted to You under this License for that Work shall terminate
+   as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+   Work or Derivative Works thereof in any medium, with or without
+   modifications, and in Source or Object form, provided that You
+   meet the following conditions:
+
+   (a) You must give any other recipients of the Work or
+       Derivative Works a copy of this License; and
+
+   (b) You must cause any modified files to carry prominent notices
+       stating that You changed the files; and
+
+   (c) You must retain, in the Source form of any Derivative Works
+       that You distribute, all copyright, patent, trademark, and
+       attribution notices from the Source form of the Work,
+       excluding those notices that do not pertain to any part of
+       the Derivative Works; and
+
+   (d) If the Work includes a "NOTICE" text file as part of its
+       distribution, then any Derivative Works that You distribute must
+       include a readable copy of the attribution notices contained
+       within such NOTICE file, excluding those notices that do not
+       pertain to any part of the Derivative Works, in at least one
+       of the following places: within a NOTICE text file distributed
+       as part of the Derivative Works; within the Source form or
+       documentation, if provided along with the Derivative Works; or,
+       within a display generated by the Derivative Works, if and
+       wherever such third-party notices normally appear. The contents
+       of the NOTICE file are for informational purposes only and
+       do not modify the License. You may add Your own attribution
+       notices within Derivative Works that You distribute, alongside
+       or as an addendum to the NOTICE text from the Work, provided
+       that such additional attribution notices cannot be construed
+       as modifying the License.
+
+   You may add Your own copyright statement to Your modifications and
+   may provide additional or different license terms and conditions
+   for use, reproduction, or distribution of Your modifications, or
+   for any such Derivative Works as a whole, provided Your use,
+   reproduction, and distribution of the Work otherwise complies with
+   the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+   any Contribution intentionally submitted for inclusion in the Work
+   by You to the Licensor shall be under the terms and conditions of
+   this License, without any additional terms or conditions.
+   Notwithstanding the above, nothing herein shall supersede or modify
+   the terms of any separate license agreement you may have executed
+   with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+   names, trademarks, service marks, or product names of the Licensor,
+   except as required for reasonable and customary use in describing the
+   origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+   agreed to in writing, Licensor provides the Work (and each
+   Contributor provides its Contributions) on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+   implied, including, without limitation, any warranties or conditions
+   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+   PARTICULAR PURPOSE. You are solely responsible for determining the
+   appropriateness of using or redistributing the Work and assume any
+   risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+   whether in tort (including negligence), contract, or otherwise,
+   unless required by applicable law (such as deliberate and grossly
+   negligent acts) or agreed to in writing, shall any Contributor be
+   liable to You for damages, including any direct, indirect, special,
+   incidental, or consequential damages of any character arising as a
+   result of this License or out of the use or inability to use the
+   Work (including but not limited to damages for loss of goodwill,
+   work stoppage, computer failure or malfunction, or any and all
+   other commercial damages or losses), even if such Contributor
+   has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+   the Work or Derivative Works thereof, You may choose to offer,
+   and charge a fee for, acceptance of support, warranty, indemnity,
+   or other liability obligations and/or rights consistent with this
+   License. However, in accepting such obligations, You may act only
+   on Your own behalf and on Your sole responsibility, not on behalf
+   of any other Contributor, and only if You agree to indemnify,
+   defend, and hold each Contributor harmless for any liability
+   incurred by, or claims asserted against, such Contributor by reason
+   of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
diff --git a/crates/thiserror/LICENSE-MIT b/crates/thiserror/LICENSE-MIT
new file mode 100644
index 0000000..31aa793
--- /dev/null
+++ b/crates/thiserror/LICENSE-MIT
@@ -0,0 +1,23 @@
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/crates/thiserror/METADATA b/crates/thiserror/METADATA
new file mode 100644
index 0000000..77a19a5
--- /dev/null
+++ b/crates/thiserror/METADATA
@@ -0,0 +1,23 @@
+# This project was upgraded with external_updater.
+# Usage: tools/external_updater/updater.sh update rust/crates/thiserror
+# For more info, check https://cs.android.com/android/platform/superproject/+/master:tools/external_updater/README.md
+
+name: "thiserror"
+description: "derive(Error)"
+third_party {
+  url {
+    type: HOMEPAGE
+    value: "https://crates.io/crates/thiserror"
+  }
+  url {
+    type: ARCHIVE
+    value: "https://static.crates.io/crates/thiserror/thiserror-1.0.40.crate"
+  }
+  version: "1.0.40"
+  license_type: NOTICE
+  last_upgrade_date {
+    year: 2023
+    month: 3
+    day: 23
+  }
+}
diff --git a/crates/thiserror/MODULE_LICENSE_APACHE2 b/crates/thiserror/MODULE_LICENSE_APACHE2
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/crates/thiserror/MODULE_LICENSE_APACHE2
diff --git a/crates/thiserror/NOTICE b/crates/thiserror/NOTICE
new file mode 120000
index 0000000..7a694c9
--- /dev/null
+++ b/crates/thiserror/NOTICE
@@ -0,0 +1 @@
+LICENSE
\ No newline at end of file
diff --git a/crates/thiserror/README.md b/crates/thiserror/README.md
new file mode 100644
index 0000000..9de063c
--- /dev/null
+++ b/crates/thiserror/README.md
@@ -0,0 +1,222 @@
+derive(Error)
+=============
+
+[<img alt="github" src="https://img.shields.io/badge/github-dtolnay/thiserror-8da0cb?style=for-the-badge&labelColor=555555&logo=github" height="20">](https://github.com/dtolnay/thiserror)
+[<img alt="crates.io" src="https://img.shields.io/crates/v/thiserror.svg?style=for-the-badge&color=fc8d62&logo=rust" height="20">](https://crates.io/crates/thiserror)
+[<img alt="docs.rs" src="https://img.shields.io/badge/docs.rs-thiserror-66c2a5?style=for-the-badge&labelColor=555555&logo=docs.rs" height="20">](https://docs.rs/thiserror)
+[<img alt="build status" src="https://img.shields.io/github/actions/workflow/status/dtolnay/thiserror/ci.yml?branch=master&style=for-the-badge" height="20">](https://github.com/dtolnay/thiserror/actions?query=branch%3Amaster)
+
+This library provides a convenient derive macro for the standard library's
+[`std::error::Error`] trait.
+
+[`std::error::Error`]: https://doc.rust-lang.org/std/error/trait.Error.html
+
+```toml
+[dependencies]
+thiserror = "1.0"
+```
+
+*Compiler support: requires rustc 1.56+*
+
+<br>
+
+## Example
+
+```rust
+use thiserror::Error;
+
+#[derive(Error, Debug)]
+pub enum DataStoreError {
+    #[error("data store disconnected")]
+    Disconnect(#[from] io::Error),
+    #[error("the data for key `{0}` is not available")]
+    Redaction(String),
+    #[error("invalid header (expected {expected:?}, found {found:?})")]
+    InvalidHeader {
+        expected: String,
+        found: String,
+    },
+    #[error("unknown data store error")]
+    Unknown,
+}
+```
+
+<br>
+
+## Details
+
+- Thiserror deliberately does not appear in your public API. You get the same
+  thing as if you had written an implementation of `std::error::Error` by hand,
+  and switching from handwritten impls to thiserror or vice versa is not a
+  breaking change.
+
+- Errors may be enums, structs with named fields, tuple structs, or unit
+  structs.
+
+- A `Display` impl is generated for your error if you provide `#[error("...")]`
+  messages on the struct or each variant of your enum, as shown above in the
+  example.
+
+  The messages support a shorthand for interpolating fields from the error.
+
+    - `#[error("{var}")]`&ensp;⟶&ensp;`write!("{}", self.var)`
+    - `#[error("{0}")]`&ensp;⟶&ensp;`write!("{}", self.0)`
+    - `#[error("{var:?}")]`&ensp;⟶&ensp;`write!("{:?}", self.var)`
+    - `#[error("{0:?}")]`&ensp;⟶&ensp;`write!("{:?}", self.0)`
+
+  These shorthands can be used together with any additional format args, which
+  may be arbitrary expressions. For example:
+
+  ```rust
+  #[derive(Error, Debug)]
+  pub enum Error {
+      #[error("invalid rdo_lookahead_frames {0} (expected < {})", i32::MAX)]
+      InvalidLookahead(u32),
+  }
+  ```
+
+  If one of the additional expression arguments needs to refer to a field of the
+  struct or enum, then refer to named fields as `.var` and tuple fields as `.0`.
+
+  ```rust
+  #[derive(Error, Debug)]
+  pub enum Error {
+      #[error("first letter must be lowercase but was {:?}", first_char(.0))]
+      WrongCase(String),
+      #[error("invalid index {idx}, expected at least {} and at most {}", .limits.lo, .limits.hi)]
+      OutOfBounds { idx: usize, limits: Limits },
+  }
+  ```
+
+- A `From` impl is generated for each variant containing a `#[from]` attribute.
+
+  Note that the variant must not contain any other fields beyond the source
+  error and possibly a backtrace. A backtrace is captured from within the `From`
+  impl if there is a field for it.
+
+  ```rust
+  #[derive(Error, Debug)]
+  pub enum MyError {
+      Io {
+          #[from]
+          source: io::Error,
+          backtrace: Backtrace,
+      },
+  }
+  ```
+
+- The Error trait's `source()` method is implemented to return whichever field
+  has a `#[source]` attribute or is named `source`, if any. This is for
+  identifying the underlying lower level error that caused your error.
+
+  The `#[from]` attribute always implies that the same field is `#[source]`, so
+  you don't ever need to specify both attributes.
+
+  Any error type that implements `std::error::Error` or dereferences to `dyn
+  std::error::Error` will work as a source.
+
+  ```rust
+  #[derive(Error, Debug)]
+  pub struct MyError {
+      msg: String,
+      #[source]  // optional if field name is `source`
+      source: anyhow::Error,
+  }
+  ```
+
+- The Error trait's `provide()` method is implemented to provide whichever field
+  has a type named `Backtrace`, if any, as a `std::backtrace::Backtrace`.
+
+  ```rust
+  use std::backtrace::Backtrace;
+
+  #[derive(Error, Debug)]
+  pub struct MyError {
+      msg: String,
+      backtrace: Backtrace,  // automatically detected
+  }
+  ```
+
+- If a field is both a source (named `source`, or has `#[source]` or `#[from]`
+  attribute) *and* is marked `#[backtrace]`, then the Error trait's `provide()`
+  method is forwarded to the source's `provide` so that both layers of the error
+  share the same backtrace.
+
+  ```rust
+  #[derive(Error, Debug)]
+  pub enum MyError {
+      Io {
+          #[backtrace]
+          source: io::Error,
+      },
+  }
+  ```
+
+- Errors may use `error(transparent)` to forward the source and Display methods
+  straight through to an underlying error without adding an additional message.
+  This would be appropriate for enums that need an "anything else" variant.
+
+  ```rust
+  #[derive(Error, Debug)]
+  pub enum MyError {
+      ...
+
+      #[error(transparent)]
+      Other(#[from] anyhow::Error),  // source and Display delegate to anyhow::Error
+  }
+  ```
+
+  Another use case is hiding implementation details of an error representation
+  behind an opaque error type, so that the representation is able to evolve
+  without breaking the crate's public API.
+
+  ```rust
+  // PublicError is public, but opaque and easy to keep compatible.
+  #[derive(Error, Debug)]
+  #[error(transparent)]
+  pub struct PublicError(#[from] ErrorRepr);
+
+  impl PublicError {
+      // Accessors for anything we do want to expose publicly.
+  }
+
+  // Private and free to change across minor version of the crate.
+  #[derive(Error, Debug)]
+  enum ErrorRepr {
+      ...
+  }
+  ```
+
+- See also the [`anyhow`] library for a convenient single error type to use in
+  application code.
+
+  [`anyhow`]: https://github.com/dtolnay/anyhow
+
+<br>
+
+## Comparison to anyhow
+
+Use thiserror if you care about designing your own dedicated error type(s) so
+that the caller receives exactly the information that you choose in the event of
+failure. This most often applies to library-like code. Use [Anyhow] if you don't
+care what error type your functions return, you just want it to be easy. This is
+common in application-like code.
+
+[Anyhow]: https://github.com/dtolnay/anyhow
+
+<br>
+
+#### License
+
+<sup>
+Licensed under either of <a href="LICENSE-APACHE">Apache License, Version
+2.0</a> or <a href="LICENSE-MIT">MIT license</a> at your option.
+</sup>
+
+<br>
+
+<sub>
+Unless you explicitly state otherwise, any contribution intentionally submitted
+for inclusion in this crate by you, as defined in the Apache-2.0 license, shall
+be dual licensed as above, without any additional terms or conditions.
+</sub>
diff --git a/crates/thiserror/TEST_MAPPING b/crates/thiserror/TEST_MAPPING
new file mode 100644
index 0000000..6b4c996
--- /dev/null
+++ b/crates/thiserror/TEST_MAPPING
@@ -0,0 +1,68 @@
+// Generated by update_crate_tests.py for tests that depend on this crate.
+{
+  "imports": [
+    {
+      "path": "external/rust/crates/anyhow"
+    },
+    {
+      "path": "external/rust/crates/jni"
+    },
+    {
+      "path": "external/rust/crates/serde-xml-rs"
+    },
+    {
+      "path": "external/uwb/src"
+    },
+    {
+      "path": "packages/modules/DnsResolver"
+    },
+    {
+      "path": "packages/modules/Virtualization/apkdmverity"
+    },
+    {
+      "path": "packages/modules/Virtualization/authfs"
+    },
+    {
+      "path": "packages/modules/Virtualization/avmd"
+    },
+    {
+      "path": "packages/modules/Virtualization/encryptedstore"
+    },
+    {
+      "path": "packages/modules/Virtualization/libs/apexutil"
+    },
+    {
+      "path": "packages/modules/Virtualization/libs/devicemapper"
+    },
+    {
+      "path": "packages/modules/Virtualization/microdroid_manager"
+    },
+    {
+      "path": "packages/modules/Virtualization/virtualizationmanager"
+    },
+    {
+      "path": "packages/modules/Virtualization/vm"
+    },
+    {
+      "path": "packages/modules/Virtualization/zipfuse"
+    },
+    {
+      "path": "system/keymint/hal"
+    },
+    {
+      "path": "system/security/diced"
+    },
+    {
+      "path": "system/security/keystore2"
+    },
+    {
+      "path": "system/security/keystore2/legacykeystore"
+    },
+    {
+      "path": "system/security/keystore2/selinux"
+    },
+    {
+      "path": "system/security/keystore2/src/crypto"
+    }
+  ]
+}
diff --git a/crates/thiserror/build.rs b/crates/thiserror/build.rs
new file mode 100644
index 0000000..7983a2b
--- /dev/null
+++ b/crates/thiserror/build.rs
@@ -0,0 +1,95 @@
+use std::env;
+use std::fs;
+use std::path::Path;
+use std::process::{Command, ExitStatus, Stdio};
+use std::str;
+
+// This code exercises the surface area that we expect of the Error generic
+// member access API. If the current toolchain is able to compile it, then
+// thiserror is able to provide backtrace support.
+const PROBE: &str = r#"
+    #![feature(error_generic_member_access)]
+
+    use std::error::{Error, Request};
+    use std::fmt::{self, Debug, Display};
+
+    struct MyError(Thing);
+    struct Thing;
+
+    impl Debug for MyError {
+        fn fmt(&self, _formatter: &mut fmt::Formatter) -> fmt::Result {
+            unimplemented!()
+        }
+    }
+
+    impl Display for MyError {
+        fn fmt(&self, _formatter: &mut fmt::Formatter) -> fmt::Result {
+            unimplemented!()
+        }
+    }
+
+    impl Error for MyError {
+        fn provide<'a>(&'a self, request: &mut Request<'a>) {
+            request.provide_ref(&self.0);
+        }
+    }
+"#;
+
+fn main() {
+    match compile_probe() {
+        Some(status) if status.success() => println!("cargo:rustc-cfg=error_generic_member_access"),
+        _ => {}
+    }
+}
+
+fn compile_probe() -> Option<ExitStatus> {
+    if env::var_os("RUSTC_STAGE").is_some() {
+        // We are running inside rustc bootstrap. This is a highly non-standard
+        // environment with issues such as:
+        //
+        //     https://github.com/rust-lang/cargo/issues/11138
+        //     https://github.com/rust-lang/rust/issues/114839
+        //
+        // Let's just not use nightly features here.
+        return None;
+    }
+
+    let rustc = env::var_os("RUSTC")?;
+    let out_dir = env::var_os("OUT_DIR")?;
+    let probefile = Path::new(&out_dir).join("probe.rs");
+    fs::write(&probefile, PROBE).ok()?;
+
+    // Make sure to pick up Cargo rustc configuration.
+    let mut cmd = if let Some(wrapper) = env::var_os("RUSTC_WRAPPER") {
+        let mut cmd = Command::new(wrapper);
+        // The wrapper's first argument is supposed to be the path to rustc.
+        cmd.arg(rustc);
+        cmd
+    } else {
+        Command::new(rustc)
+    };
+
+    cmd.stderr(Stdio::null())
+        .arg("--edition=2018")
+        .arg("--crate-name=thiserror_build")
+        .arg("--crate-type=lib")
+        .arg("--emit=metadata")
+        .arg("--out-dir")
+        .arg(out_dir)
+        .arg(probefile);
+
+    if let Some(target) = env::var_os("TARGET") {
+        cmd.arg("--target").arg(target);
+    }
+
+    // If Cargo wants to set RUSTFLAGS, use that.
+    if let Ok(rustflags) = env::var("CARGO_ENCODED_RUSTFLAGS") {
+        if !rustflags.is_empty() {
+            for arg in rustflags.split('\x1f') {
+                cmd.arg(arg);
+            }
+        }
+    }
+
+    cmd.status().ok()
+}
diff --git a/crates/thiserror/cargo_embargo.json b/crates/thiserror/cargo_embargo.json
new file mode 100644
index 0000000..3e8023e
--- /dev/null
+++ b/crates/thiserror/cargo_embargo.json
@@ -0,0 +1,8 @@
+{
+  "apex_available": [
+    "//apex_available:anyapex",
+    "//apex_available:platform"
+  ],
+  "min_sdk_version": "29",
+  "run_cargo": false
+}
diff --git a/crates/thiserror/rust-toolchain.toml b/crates/thiserror/rust-toolchain.toml
new file mode 100644
index 0000000..20fe888
--- /dev/null
+++ b/crates/thiserror/rust-toolchain.toml
@@ -0,0 +1,2 @@
+[toolchain]
+components = ["rust-src"]
diff --git a/crates/thiserror/src/aserror.rs b/crates/thiserror/src/aserror.rs
new file mode 100644
index 0000000..54fc6f1
--- /dev/null
+++ b/crates/thiserror/src/aserror.rs
@@ -0,0 +1,50 @@
+use std::error::Error;
+use std::panic::UnwindSafe;
+
+#[doc(hidden)]
+pub trait AsDynError<'a>: Sealed {
+    fn as_dyn_error(&self) -> &(dyn Error + 'a);
+}
+
+impl<'a, T: Error + 'a> AsDynError<'a> for T {
+    #[inline]
+    fn as_dyn_error(&self) -> &(dyn Error + 'a) {
+        self
+    }
+}
+
+impl<'a> AsDynError<'a> for dyn Error + 'a {
+    #[inline]
+    fn as_dyn_error(&self) -> &(dyn Error + 'a) {
+        self
+    }
+}
+
+impl<'a> AsDynError<'a> for dyn Error + Send + 'a {
+    #[inline]
+    fn as_dyn_error(&self) -> &(dyn Error + 'a) {
+        self
+    }
+}
+
+impl<'a> AsDynError<'a> for dyn Error + Send + Sync + 'a {
+    #[inline]
+    fn as_dyn_error(&self) -> &(dyn Error + 'a) {
+        self
+    }
+}
+
+impl<'a> AsDynError<'a> for dyn Error + Send + Sync + UnwindSafe + 'a {
+    #[inline]
+    fn as_dyn_error(&self) -> &(dyn Error + 'a) {
+        self
+    }
+}
+
+#[doc(hidden)]
+pub trait Sealed {}
+impl<'a, T: Error + 'a> Sealed for T {}
+impl<'a> Sealed for dyn Error + 'a {}
+impl<'a> Sealed for dyn Error + Send + 'a {}
+impl<'a> Sealed for dyn Error + Send + Sync + 'a {}
+impl<'a> Sealed for dyn Error + Send + Sync + UnwindSafe + 'a {}
diff --git a/crates/thiserror/src/display.rs b/crates/thiserror/src/display.rs
new file mode 100644
index 0000000..27098f1
--- /dev/null
+++ b/crates/thiserror/src/display.rs
@@ -0,0 +1,40 @@
+use std::fmt::Display;
+use std::path::{self, Path, PathBuf};
+
+#[doc(hidden)]
+pub trait AsDisplay<'a> {
+    // TODO: convert to generic associated type.
+    // https://github.com/dtolnay/thiserror/pull/253
+    type Target: Display;
+
+    fn as_display(&'a self) -> Self::Target;
+}
+
+impl<'a, T> AsDisplay<'a> for &T
+where
+    T: Display + 'a,
+{
+    type Target = &'a T;
+
+    fn as_display(&'a self) -> Self::Target {
+        *self
+    }
+}
+
+impl<'a> AsDisplay<'a> for Path {
+    type Target = path::Display<'a>;
+
+    #[inline]
+    fn as_display(&'a self) -> Self::Target {
+        self.display()
+    }
+}
+
+impl<'a> AsDisplay<'a> for PathBuf {
+    type Target = path::Display<'a>;
+
+    #[inline]
+    fn as_display(&'a self) -> Self::Target {
+        self.display()
+    }
+}
diff --git a/crates/thiserror/src/lib.rs b/crates/thiserror/src/lib.rs
new file mode 100644
index 0000000..3242c1f
--- /dev/null
+++ b/crates/thiserror/src/lib.rs
@@ -0,0 +1,257 @@
+//! [![github]](https://github.com/dtolnay/thiserror)&ensp;[![crates-io]](https://crates.io/crates/thiserror)&ensp;[![docs-rs]](https://docs.rs/thiserror)
+//!
+//! [github]: https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github
+//! [crates-io]: https://img.shields.io/badge/crates.io-fc8d62?style=for-the-badge&labelColor=555555&logo=rust
+//! [docs-rs]: https://img.shields.io/badge/docs.rs-66c2a5?style=for-the-badge&labelColor=555555&logo=docs.rs
+//!
+//! <br>
+//!
+//! This library provides a convenient derive macro for the standard library's
+//! [`std::error::Error`] trait.
+//!
+//! [`std::error::Error`]: https://doc.rust-lang.org/std/error/trait.Error.html
+//!
+//! <br>
+//!
+//! # Example
+//!
+//! ```rust
+//! # use std::io;
+//! use thiserror::Error;
+//!
+//! #[derive(Error, Debug)]
+//! pub enum DataStoreError {
+//!     #[error("data store disconnected")]
+//!     Disconnect(#[from] io::Error),
+//!     #[error("the data for key `{0}` is not available")]
+//!     Redaction(String),
+//!     #[error("invalid header (expected {expected:?}, found {found:?})")]
+//!     InvalidHeader {
+//!         expected: String,
+//!         found: String,
+//!     },
+//!     #[error("unknown data store error")]
+//!     Unknown,
+//! }
+//! ```
+//!
+//! <br>
+//!
+//! # Details
+//!
+//! - Thiserror deliberately does not appear in your public API. You get the
+//!   same thing as if you had written an implementation of `std::error::Error`
+//!   by hand, and switching from handwritten impls to thiserror or vice versa
+//!   is not a breaking change.
+//!
+//! - Errors may be enums, structs with named fields, tuple structs, or unit
+//!   structs.
+//!
+//! - A `Display` impl is generated for your error if you provide
+//!   `#[error("...")]` messages on the struct or each variant of your enum, as
+//!   shown above in the example.
+//!
+//!   The messages support a shorthand for interpolating fields from the error.
+//!
+//!     - `#[error("{var}")]`&ensp;⟶&ensp;`write!("{}", self.var)`
+//!     - `#[error("{0}")]`&ensp;⟶&ensp;`write!("{}", self.0)`
+//!     - `#[error("{var:?}")]`&ensp;⟶&ensp;`write!("{:?}", self.var)`
+//!     - `#[error("{0:?}")]`&ensp;⟶&ensp;`write!("{:?}", self.0)`
+//!
+//!   These shorthands can be used together with any additional format args,
+//!   which may be arbitrary expressions. For example:
+//!
+//!   ```rust
+//!   # use std::i32;
+//!   # use thiserror::Error;
+//!   #
+//!   #[derive(Error, Debug)]
+//!   pub enum Error {
+//!       #[error("invalid rdo_lookahead_frames {0} (expected < {})", i32::MAX)]
+//!       InvalidLookahead(u32),
+//!   }
+//!   ```
+//!
+//!   If one of the additional expression arguments needs to refer to a field of
+//!   the struct or enum, then refer to named fields as `.var` and tuple fields
+//!   as `.0`.
+//!
+//!   ```rust
+//!   # use thiserror::Error;
+//!   #
+//!   # fn first_char(s: &String) -> char {
+//!   #     s.chars().next().unwrap()
+//!   # }
+//!   #
+//!   # #[derive(Debug)]
+//!   # struct Limits {
+//!   #     lo: usize,
+//!   #     hi: usize,
+//!   # }
+//!   #
+//!   #[derive(Error, Debug)]
+//!   pub enum Error {
+//!       #[error("first letter must be lowercase but was {:?}", first_char(.0))]
+//!       WrongCase(String),
+//!       #[error("invalid index {idx}, expected at least {} and at most {}", .limits.lo, .limits.hi)]
+//!       OutOfBounds { idx: usize, limits: Limits },
+//!   }
+//!   ```
+//!
+//! - A `From` impl is generated for each variant containing a `#[from]`
+//!   attribute.
+//!
+//!   Note that the variant must not contain any other fields beyond the source
+//!   error and possibly a backtrace. A backtrace is captured from within the
+//!   `From` impl if there is a field for it.
+//!
+//!   ```rust
+//!   # const IGNORE: &str = stringify! {
+//!   #[derive(Error, Debug)]
+//!   pub enum MyError {
+//!       Io {
+//!           #[from]
+//!           source: io::Error,
+//!           backtrace: Backtrace,
+//!       },
+//!   }
+//!   # };
+//!   ```
+//!
+//! - The Error trait's `source()` method is implemented to return whichever
+//!   field has a `#[source]` attribute or is named `source`, if any. This is
+//!   for identifying the underlying lower level error that caused your error.
+//!
+//!   The `#[from]` attribute always implies that the same field is `#[source]`,
+//!   so you don't ever need to specify both attributes.
+//!
+//!   Any error type that implements `std::error::Error` or dereferences to `dyn
+//!   std::error::Error` will work as a source.
+//!
+//!   ```rust
+//!   # use std::fmt::{self, Display};
+//!   # use thiserror::Error;
+//!   #
+//!   #[derive(Error, Debug)]
+//!   pub struct MyError {
+//!       msg: String,
+//!       #[source]  // optional if field name is `source`
+//!       source: anyhow::Error,
+//!   }
+//!   #
+//!   # impl Display for MyError {
+//!   #     fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+//!   #         unimplemented!()
+//!   #     }
+//!   # }
+//!   ```
+//!
+//! - The Error trait's `provide()` method is implemented to provide whichever
+//!   field has a type named `Backtrace`, if any, as a
+//!   `std::backtrace::Backtrace`.
+//!
+//!   ```rust
+//!   # const IGNORE: &str = stringify! {
+//!   use std::backtrace::Backtrace;
+//!
+//!   #[derive(Error, Debug)]
+//!   pub struct MyError {
+//!       msg: String,
+//!       backtrace: Backtrace,  // automatically detected
+//!   }
+//!   # };
+//!   ```
+//!
+//! - If a field is both a source (named `source`, or has `#[source]` or
+//!   `#[from]` attribute) *and* is marked `#[backtrace]`, then the Error
+//!   trait's `provide()` method is forwarded to the source's `provide` so that
+//!   both layers of the error share the same backtrace.
+//!
+//!   ```rust
+//!   # const IGNORE: &str = stringify! {
+//!   #[derive(Error, Debug)]
+//!   pub enum MyError {
+//!       Io {
+//!           #[backtrace]
+//!           source: io::Error,
+//!       },
+//!   }
+//!   # };
+//!   ```
+//!
+//! - Errors may use `error(transparent)` to forward the source and Display
+//!   methods straight through to an underlying error without adding an
+//!   additional message. This would be appropriate for enums that need an
+//!   "anything else" variant.
+//!
+//!   ```
+//!   # use thiserror::Error;
+//!   #
+//!   #[derive(Error, Debug)]
+//!   pub enum MyError {
+//!       # /*
+//!       ...
+//!       # */
+//!
+//!       #[error(transparent)]
+//!       Other(#[from] anyhow::Error),  // source and Display delegate to anyhow::Error
+//!   }
+//!   ```
+//!
+//!   Another use case is hiding implementation details of an error
+//!   representation behind an opaque error type, so that the representation is
+//!   able to evolve without breaking the crate's public API.
+//!
+//!   ```
+//!   # use thiserror::Error;
+//!   #
+//!   // PublicError is public, but opaque and easy to keep compatible.
+//!   #[derive(Error, Debug)]
+//!   #[error(transparent)]
+//!   pub struct PublicError(#[from] ErrorRepr);
+//!
+//!   impl PublicError {
+//!       // Accessors for anything we do want to expose publicly.
+//!   }
+//!
+//!   // Private and free to change across minor version of the crate.
+//!   #[derive(Error, Debug)]
+//!   enum ErrorRepr {
+//!       # /*
+//!       ...
+//!       # */
+//!   }
+//!   ```
+//!
+//! - See also the [`anyhow`] library for a convenient single error type to use
+//!   in application code.
+//!
+//!   [`anyhow`]: https://github.com/dtolnay/anyhow
+
+#![doc(html_root_url = "https://docs.rs/thiserror/1.0.49")]
+#![allow(
+    clippy::module_name_repetitions,
+    clippy::needless_lifetimes,
+    clippy::return_self_not_must_use,
+    clippy::wildcard_imports
+)]
+#![cfg_attr(error_generic_member_access, feature(error_generic_member_access))]
+
+mod aserror;
+mod display;
+#[cfg(error_generic_member_access)]
+mod provide;
+
+pub use thiserror_impl::*;
+
+// Not public API.
+#[doc(hidden)]
+pub mod __private {
+    #[doc(hidden)]
+    pub use crate::aserror::AsDynError;
+    #[doc(hidden)]
+    pub use crate::display::AsDisplay;
+    #[cfg(error_generic_member_access)]
+    #[doc(hidden)]
+    pub use crate::provide::ThiserrorProvide;
+}
diff --git a/crates/thiserror/src/provide.rs b/crates/thiserror/src/provide.rs
new file mode 100644
index 0000000..7b4e922
--- /dev/null
+++ b/crates/thiserror/src/provide.rs
@@ -0,0 +1,20 @@
+use std::error::{Error, Request};
+
+#[doc(hidden)]
+pub trait ThiserrorProvide: Sealed {
+    fn thiserror_provide<'a>(&'a self, request: &mut Request<'a>);
+}
+
+impl<T> ThiserrorProvide for T
+where
+    T: Error + ?Sized,
+{
+    #[inline]
+    fn thiserror_provide<'a>(&'a self, request: &mut Request<'a>) {
+        self.provide(request);
+    }
+}
+
+#[doc(hidden)]
+pub trait Sealed {}
+impl<T: Error + ?Sized> Sealed for T {}
diff --git a/crates/thiserror/tests/compiletest.rs b/crates/thiserror/tests/compiletest.rs
new file mode 100644
index 0000000..7974a62
--- /dev/null
+++ b/crates/thiserror/tests/compiletest.rs
@@ -0,0 +1,7 @@
+#[rustversion::attr(not(nightly), ignore)]
+#[cfg_attr(miri, ignore)]
+#[test]
+fn ui() {
+    let t = trybuild::TestCases::new();
+    t.compile_fail("tests/ui/*.rs");
+}
diff --git a/crates/thiserror/tests/test_backtrace.rs b/crates/thiserror/tests/test_backtrace.rs
new file mode 100644
index 0000000..4710d45
--- /dev/null
+++ b/crates/thiserror/tests/test_backtrace.rs
@@ -0,0 +1,274 @@
+#![cfg_attr(thiserror_nightly_testing, feature(error_generic_member_access))]
+
+use thiserror::Error;
+
+#[derive(Error, Debug)]
+#[error("...")]
+pub struct Inner;
+
+#[cfg(thiserror_nightly_testing)]
+#[derive(Error, Debug)]
+#[error("...")]
+pub struct InnerBacktrace {
+    backtrace: std::backtrace::Backtrace,
+}
+
+#[cfg(thiserror_nightly_testing)]
+pub mod structs {
+    use super::{Inner, InnerBacktrace};
+    use std::backtrace::Backtrace;
+    use std::error::{self, Error};
+    use std::sync::Arc;
+    use thiserror::Error;
+
+    #[derive(Error, Debug)]
+    #[error("...")]
+    pub struct PlainBacktrace {
+        backtrace: Backtrace,
+    }
+
+    #[derive(Error, Debug)]
+    #[error("...")]
+    pub struct ExplicitBacktrace {
+        #[backtrace]
+        backtrace: Backtrace,
+    }
+
+    #[derive(Error, Debug)]
+    #[error("...")]
+    pub struct OptBacktrace {
+        #[backtrace]
+        backtrace: Option<Backtrace>,
+    }
+
+    #[derive(Error, Debug)]
+    #[error("...")]
+    pub struct ArcBacktrace {
+        #[backtrace]
+        backtrace: Arc<Backtrace>,
+    }
+
+    #[derive(Error, Debug)]
+    #[error("...")]
+    pub struct BacktraceFrom {
+        #[from]
+        source: Inner,
+        #[backtrace]
+        backtrace: Backtrace,
+    }
+
+    #[derive(Error, Debug)]
+    #[error("...")]
+    pub struct CombinedBacktraceFrom {
+        #[from]
+        #[backtrace]
+        source: InnerBacktrace,
+    }
+
+    #[derive(Error, Debug)]
+    #[error("...")]
+    pub struct OptBacktraceFrom {
+        #[from]
+        source: Inner,
+        #[backtrace]
+        backtrace: Option<Backtrace>,
+    }
+
+    #[derive(Error, Debug)]
+    #[error("...")]
+    pub struct ArcBacktraceFrom {
+        #[from]
+        source: Inner,
+        #[backtrace]
+        backtrace: Arc<Backtrace>,
+    }
+
+    #[derive(Error, Debug)]
+    #[error("...")]
+    pub struct AnyhowBacktrace {
+        #[backtrace]
+        source: anyhow::Error,
+    }
+
+    #[derive(Error, Debug)]
+    #[error("...")]
+    pub struct BoxDynErrorBacktrace {
+        #[backtrace]
+        source: Box<dyn Error>,
+    }
+
+    #[test]
+    fn test_backtrace() {
+        let error = PlainBacktrace {
+            backtrace: Backtrace::capture(),
+        };
+        assert!(error::request_ref::<Backtrace>(&error).is_some());
+
+        let error = ExplicitBacktrace {
+            backtrace: Backtrace::capture(),
+        };
+        assert!(error::request_ref::<Backtrace>(&error).is_some());
+
+        let error = OptBacktrace {
+            backtrace: Some(Backtrace::capture()),
+        };
+        assert!(error::request_ref::<Backtrace>(&error).is_some());
+
+        let error = ArcBacktrace {
+            backtrace: Arc::new(Backtrace::capture()),
+        };
+        assert!(error::request_ref::<Backtrace>(&error).is_some());
+
+        let error = BacktraceFrom::from(Inner);
+        assert!(error::request_ref::<Backtrace>(&error).is_some());
+
+        let error = CombinedBacktraceFrom::from(InnerBacktrace {
+            backtrace: Backtrace::capture(),
+        });
+        assert!(error::request_ref::<Backtrace>(&error).is_some());
+
+        let error = OptBacktraceFrom::from(Inner);
+        assert!(error::request_ref::<Backtrace>(&error).is_some());
+
+        let error = ArcBacktraceFrom::from(Inner);
+        assert!(error::request_ref::<Backtrace>(&error).is_some());
+
+        let error = AnyhowBacktrace {
+            source: anyhow::Error::msg("..."),
+        };
+        assert!(error::request_ref::<Backtrace>(&error).is_some());
+
+        let error = BoxDynErrorBacktrace {
+            source: Box::new(PlainBacktrace {
+                backtrace: Backtrace::capture(),
+            }),
+        };
+        assert!(error::request_ref::<Backtrace>(&error).is_some());
+    }
+}
+
+#[cfg(thiserror_nightly_testing)]
+pub mod enums {
+    use super::{Inner, InnerBacktrace};
+    use std::backtrace::Backtrace;
+    use std::error;
+    use std::sync::Arc;
+    use thiserror::Error;
+
+    #[derive(Error, Debug)]
+    pub enum PlainBacktrace {
+        #[error("...")]
+        Test { backtrace: Backtrace },
+    }
+
+    #[derive(Error, Debug)]
+    pub enum ExplicitBacktrace {
+        #[error("...")]
+        Test {
+            #[backtrace]
+            backtrace: Backtrace,
+        },
+    }
+
+    #[derive(Error, Debug)]
+    pub enum OptBacktrace {
+        #[error("...")]
+        Test {
+            #[backtrace]
+            backtrace: Option<Backtrace>,
+        },
+    }
+
+    #[derive(Error, Debug)]
+    pub enum ArcBacktrace {
+        #[error("...")]
+        Test {
+            #[backtrace]
+            backtrace: Arc<Backtrace>,
+        },
+    }
+
+    #[derive(Error, Debug)]
+    pub enum BacktraceFrom {
+        #[error("...")]
+        Test {
+            #[from]
+            source: Inner,
+            #[backtrace]
+            backtrace: Backtrace,
+        },
+    }
+
+    #[derive(Error, Debug)]
+    pub enum CombinedBacktraceFrom {
+        #[error("...")]
+        Test {
+            #[from]
+            #[backtrace]
+            source: InnerBacktrace,
+        },
+    }
+
+    #[derive(Error, Debug)]
+    pub enum OptBacktraceFrom {
+        #[error("...")]
+        Test {
+            #[from]
+            source: Inner,
+            #[backtrace]
+            backtrace: Option<Backtrace>,
+        },
+    }
+
+    #[derive(Error, Debug)]
+    pub enum ArcBacktraceFrom {
+        #[error("...")]
+        Test {
+            #[from]
+            source: Inner,
+            #[backtrace]
+            backtrace: Arc<Backtrace>,
+        },
+    }
+
+    #[test]
+    fn test_backtrace() {
+        let error = PlainBacktrace::Test {
+            backtrace: Backtrace::capture(),
+        };
+        assert!(error::request_ref::<Backtrace>(&error).is_some());
+
+        let error = ExplicitBacktrace::Test {
+            backtrace: Backtrace::capture(),
+        };
+        assert!(error::request_ref::<Backtrace>(&error).is_some());
+
+        let error = OptBacktrace::Test {
+            backtrace: Some(Backtrace::capture()),
+        };
+        assert!(error::request_ref::<Backtrace>(&error).is_some());
+
+        let error = ArcBacktrace::Test {
+            backtrace: Arc::new(Backtrace::capture()),
+        };
+        assert!(error::request_ref::<Backtrace>(&error).is_some());
+
+        let error = BacktraceFrom::from(Inner);
+        assert!(error::request_ref::<Backtrace>(&error).is_some());
+
+        let error = CombinedBacktraceFrom::from(InnerBacktrace {
+            backtrace: Backtrace::capture(),
+        });
+        assert!(error::request_ref::<Backtrace>(&error).is_some());
+
+        let error = OptBacktraceFrom::from(Inner);
+        assert!(error::request_ref::<Backtrace>(&error).is_some());
+
+        let error = ArcBacktraceFrom::from(Inner);
+        assert!(error::request_ref::<Backtrace>(&error).is_some());
+    }
+}
+
+#[test]
+#[cfg_attr(not(thiserror_nightly_testing), ignore)]
+fn test_backtrace() {}
diff --git a/crates/thiserror/tests/test_deprecated.rs b/crates/thiserror/tests/test_deprecated.rs
new file mode 100644
index 0000000..5524666
--- /dev/null
+++ b/crates/thiserror/tests/test_deprecated.rs
@@ -0,0 +1,10 @@
+#![deny(deprecated, clippy::all, clippy::pedantic)]
+
+use thiserror::Error;
+
+#[derive(Error, Debug)]
+pub enum Error {
+    #[deprecated]
+    #[error("...")]
+    Deprecated,
+}
diff --git a/crates/thiserror/tests/test_display.rs b/crates/thiserror/tests/test_display.rs
new file mode 100644
index 0000000..6f60388
--- /dev/null
+++ b/crates/thiserror/tests/test_display.rs
@@ -0,0 +1,303 @@
+#![allow(clippy::uninlined_format_args)]
+
+use std::fmt::{self, Display};
+use thiserror::Error;
+
+fn assert<T: Display>(expected: &str, value: T) {
+    assert_eq!(expected, value.to_string());
+}
+
+#[test]
+fn test_braced() {
+    #[derive(Error, Debug)]
+    #[error("braced error: {msg}")]
+    struct Error {
+        msg: String,
+    }
+
+    let msg = "T".to_owned();
+    assert("braced error: T", Error { msg });
+}
+
+#[test]
+fn test_braced_unused() {
+    #[derive(Error, Debug)]
+    #[error("braced error")]
+    struct Error {
+        extra: usize,
+    }
+
+    assert("braced error", Error { extra: 0 });
+}
+
+#[test]
+fn test_tuple() {
+    #[derive(Error, Debug)]
+    #[error("tuple error: {0}")]
+    struct Error(usize);
+
+    assert("tuple error: 0", Error(0));
+}
+
+#[test]
+fn test_unit() {
+    #[derive(Error, Debug)]
+    #[error("unit error")]
+    struct Error;
+
+    assert("unit error", Error);
+}
+
+#[test]
+fn test_enum() {
+    #[derive(Error, Debug)]
+    enum Error {
+        #[error("braced error: {id}")]
+        Braced { id: usize },
+        #[error("tuple error: {0}")]
+        Tuple(usize),
+        #[error("unit error")]
+        Unit,
+    }
+
+    assert("braced error: 0", Error::Braced { id: 0 });
+    assert("tuple error: 0", Error::Tuple(0));
+    assert("unit error", Error::Unit);
+}
+
+#[test]
+fn test_constants() {
+    #[derive(Error, Debug)]
+    #[error("{MSG}: {id:?} (code {CODE:?})")]
+    struct Error {
+        id: &'static str,
+    }
+
+    const MSG: &str = "failed to do";
+    const CODE: usize = 9;
+
+    assert("failed to do: \"\" (code 9)", Error { id: "" });
+}
+
+#[test]
+fn test_inherit() {
+    #[derive(Error, Debug)]
+    #[error("{0}")]
+    enum Error {
+        Some(&'static str),
+        #[error("other error")]
+        Other(&'static str),
+    }
+
+    assert("some error", Error::Some("some error"));
+    assert("other error", Error::Other("..."));
+}
+
+#[test]
+fn test_brace_escape() {
+    #[derive(Error, Debug)]
+    #[error("fn main() {{}}")]
+    struct Error;
+
+    assert("fn main() {}", Error);
+}
+
+#[test]
+fn test_expr() {
+    #[derive(Error, Debug)]
+    #[error("1 + 1 = {}", 1 + 1)]
+    struct Error;
+    assert("1 + 1 = 2", Error);
+}
+
+#[test]
+fn test_nested() {
+    #[derive(Error, Debug)]
+    #[error("!bool = {}", not(.0))]
+    struct Error(bool);
+
+    #[allow(clippy::trivially_copy_pass_by_ref)]
+    fn not(bool: &bool) -> bool {
+        !*bool
+    }
+
+    assert("!bool = false", Error(true));
+}
+
+#[test]
+fn test_match() {
+    #[derive(Error, Debug)]
+    #[error("{}: {0}", match .1 {
+        Some(n) => format!("error occurred with {}", n),
+        None => "there was an empty error".to_owned(),
+    })]
+    struct Error(String, Option<usize>);
+
+    assert(
+        "error occurred with 1: ...",
+        Error("...".to_owned(), Some(1)),
+    );
+    assert(
+        "there was an empty error: ...",
+        Error("...".to_owned(), None),
+    );
+}
+
+#[test]
+fn test_nested_display() {
+    // Same behavior as the one in `test_match`, but without String allocations.
+    #[derive(Error, Debug)]
+    #[error("{}", {
+        struct Msg<'a>(&'a String, &'a Option<usize>);
+        impl<'a> Display for Msg<'a> {
+            fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+                match self.1 {
+                    Some(n) => write!(formatter, "error occurred with {}", n),
+                    None => write!(formatter, "there was an empty error"),
+                }?;
+                write!(formatter, ": {}", self.0)
+            }
+        }
+        Msg(.0, .1)
+    })]
+    struct Error(String, Option<usize>);
+
+    assert(
+        "error occurred with 1: ...",
+        Error("...".to_owned(), Some(1)),
+    );
+    assert(
+        "there was an empty error: ...",
+        Error("...".to_owned(), None),
+    );
+}
+
+#[test]
+fn test_void() {
+    #[allow(clippy::empty_enum)]
+    #[derive(Error, Debug)]
+    #[error("...")]
+    pub enum Error {}
+
+    let _: Error;
+}
+
+#[test]
+fn test_mixed() {
+    #[derive(Error, Debug)]
+    #[error("a={a} :: b={} :: c={c} :: d={d}", 1, c = 2, d = 3)]
+    struct Error {
+        a: usize,
+        d: usize,
+    }
+
+    assert("a=0 :: b=1 :: c=2 :: d=3", Error { a: 0, d: 0 });
+}
+
+#[test]
+fn test_ints() {
+    #[derive(Error, Debug)]
+    enum Error {
+        #[error("error {0}")]
+        Tuple(usize, usize),
+        #[error("error {0}", '?')]
+        Struct { v: usize },
+    }
+
+    assert("error 9", Error::Tuple(9, 0));
+    assert("error ?", Error::Struct { v: 0 });
+}
+
+#[test]
+fn test_trailing_comma() {
+    #[derive(Error, Debug)]
+    #[error(
+        "error {0}",
+    )]
+    #[rustfmt::skip]
+    struct Error(char);
+
+    assert("error ?", Error('?'));
+}
+
+#[test]
+fn test_field() {
+    #[derive(Debug)]
+    struct Inner {
+        data: usize,
+    }
+
+    #[derive(Error, Debug)]
+    #[error("{}", .0.data)]
+    struct Error(Inner);
+
+    assert("0", Error(Inner { data: 0 }));
+}
+
+#[test]
+fn test_macro_rules() {
+    // Regression test for https://github.com/dtolnay/thiserror/issues/86
+
+    macro_rules! decl_error {
+        ($variant:ident($value:ident)) => {
+            #[derive(Debug, Error)]
+            pub enum Error0 {
+                #[error("{0:?}")]
+                $variant($value),
+            }
+
+            #[derive(Debug, Error)]
+            #[error("{0:?}")]
+            pub enum Error1 {
+                $variant($value),
+            }
+        };
+    }
+
+    decl_error!(Repro(u8));
+
+    assert("0", Error0::Repro(0));
+    assert("0", Error1::Repro(0));
+}
+
+#[test]
+fn test_raw() {
+    #[derive(Error, Debug)]
+    #[error("braced raw error: {r#fn}")]
+    struct Error {
+        r#fn: &'static str,
+    }
+
+    assert("braced raw error: T", Error { r#fn: "T" });
+}
+
+#[test]
+fn test_raw_enum() {
+    #[derive(Error, Debug)]
+    enum Error {
+        #[error("braced raw error: {r#fn}")]
+        Braced { r#fn: &'static str },
+    }
+
+    assert("braced raw error: T", Error::Braced { r#fn: "T" });
+}
+
+#[test]
+fn test_raw_conflict() {
+    #[derive(Error, Debug)]
+    enum Error {
+        #[error("braced raw error: {r#func}, {func}", func = "U")]
+        Braced { r#func: &'static str },
+    }
+
+    assert("braced raw error: T, U", Error::Braced { r#func: "T" });
+}
+
+#[test]
+fn test_keyword() {
+    #[derive(Error, Debug)]
+    #[error("error: {type}", type = 1)]
+    struct Error;
+
+    assert("error: 1", Error);
+}
diff --git a/crates/thiserror/tests/test_error.rs b/crates/thiserror/tests/test_error.rs
new file mode 100644
index 0000000..fab934d
--- /dev/null
+++ b/crates/thiserror/tests/test_error.rs
@@ -0,0 +1,56 @@
+#![allow(dead_code)]
+
+use std::fmt::{self, Display};
+use std::io;
+use thiserror::Error;
+
+macro_rules! unimplemented_display {
+    ($ty:ty) => {
+        impl Display for $ty {
+            fn fmt(&self, _formatter: &mut fmt::Formatter) -> fmt::Result {
+                unimplemented!()
+            }
+        }
+    };
+}
+
+#[derive(Error, Debug)]
+struct BracedError {
+    msg: String,
+    pos: usize,
+}
+
+#[derive(Error, Debug)]
+struct TupleError(String, usize);
+
+#[derive(Error, Debug)]
+struct UnitError;
+
+#[derive(Error, Debug)]
+struct WithSource {
+    #[source]
+    cause: io::Error,
+}
+
+#[derive(Error, Debug)]
+struct WithAnyhow {
+    #[source]
+    cause: anyhow::Error,
+}
+
+#[derive(Error, Debug)]
+enum EnumError {
+    Braced {
+        #[source]
+        cause: io::Error,
+    },
+    Tuple(#[source] io::Error),
+    Unit,
+}
+
+unimplemented_display!(BracedError);
+unimplemented_display!(TupleError);
+unimplemented_display!(UnitError);
+unimplemented_display!(WithSource);
+unimplemented_display!(WithAnyhow);
+unimplemented_display!(EnumError);
diff --git a/crates/thiserror/tests/test_expr.rs b/crates/thiserror/tests/test_expr.rs
new file mode 100644
index 0000000..8db097b
--- /dev/null
+++ b/crates/thiserror/tests/test_expr.rs
@@ -0,0 +1,92 @@
+#![allow(
+    clippy::iter_cloned_collect,
+    clippy::option_if_let_else,
+    clippy::uninlined_format_args
+)]
+
+use std::fmt::Display;
+use thiserror::Error;
+
+// Some of the elaborate cases from the rcc codebase, which is a C compiler in
+// Rust. https://github.com/jyn514/rcc/blob/0.8.0/src/data/error.rs
+#[derive(Error, Debug)]
+pub enum CompilerError {
+    #[error("cannot shift {} by {maximum} or more bits (got {current})", if *.is_left { "left" } else { "right" })]
+    TooManyShiftBits {
+        is_left: bool,
+        maximum: u64,
+        current: u64,
+    },
+
+    #[error("#error {}", (.0).iter().copied().collect::<Vec<_>>().join(" "))]
+    User(Vec<&'static str>),
+
+    #[error("overflow while parsing {}integer literal",
+        if let Some(signed) = .is_signed {
+            if *signed { "signed "} else { "unsigned "}
+        } else {
+            ""
+        }
+    )]
+    IntegerOverflow { is_signed: Option<bool> },
+
+    #[error("overflow while parsing {}integer literal", match .is_signed {
+        Some(true) => "signed ",
+        Some(false) => "unsigned ",
+        None => "",
+    })]
+    IntegerOverflow2 { is_signed: Option<bool> },
+}
+
+// Examples drawn from Rustup.
+#[derive(Error, Debug)]
+pub enum RustupError {
+    #[error(
+        "toolchain '{name}' does not contain component {component}{}",
+        .suggestion
+            .as_ref()
+            .map_or_else(String::new, |s| format!("; did you mean '{}'?", s)),
+    )]
+    UnknownComponent {
+        name: String,
+        component: String,
+        suggestion: Option<String>,
+    },
+}
+
+fn assert<T: Display>(expected: &str, value: T) {
+    assert_eq!(expected, value.to_string());
+}
+
+#[test]
+fn test_rcc() {
+    assert(
+        "cannot shift left by 32 or more bits (got 50)",
+        CompilerError::TooManyShiftBits {
+            is_left: true,
+            maximum: 32,
+            current: 50,
+        },
+    );
+
+    assert("#error A B C", CompilerError::User(vec!["A", "B", "C"]));
+
+    assert(
+        "overflow while parsing signed integer literal",
+        CompilerError::IntegerOverflow {
+            is_signed: Some(true),
+        },
+    );
+}
+
+#[test]
+fn test_rustup() {
+    assert(
+        "toolchain 'nightly' does not contain component clipy; did you mean 'clippy'?",
+        RustupError::UnknownComponent {
+            name: "nightly".to_owned(),
+            component: "clipy".to_owned(),
+            suggestion: Some("clippy".to_owned()),
+        },
+    );
+}
diff --git a/crates/thiserror/tests/test_from.rs b/crates/thiserror/tests/test_from.rs
new file mode 100644
index 0000000..51af40b
--- /dev/null
+++ b/crates/thiserror/tests/test_from.rs
@@ -0,0 +1,64 @@
+#![allow(clippy::extra_unused_type_parameters)]
+
+use std::io;
+use thiserror::Error;
+
+#[derive(Error, Debug)]
+#[error("...")]
+pub struct ErrorStruct {
+    #[from]
+    source: io::Error,
+}
+
+#[derive(Error, Debug)]
+#[error("...")]
+pub struct ErrorStructOptional {
+    #[from]
+    source: Option<io::Error>,
+}
+
+#[derive(Error, Debug)]
+#[error("...")]
+pub struct ErrorTuple(#[from] io::Error);
+
+#[derive(Error, Debug)]
+#[error("...")]
+pub struct ErrorTupleOptional(#[from] Option<io::Error>);
+
+#[derive(Error, Debug)]
+#[error("...")]
+pub enum ErrorEnum {
+    Test {
+        #[from]
+        source: io::Error,
+    },
+}
+
+#[derive(Error, Debug)]
+#[error("...")]
+pub enum ErrorEnumOptional {
+    Test {
+        #[from]
+        source: Option<io::Error>,
+    },
+}
+
+#[derive(Error, Debug)]
+#[error("...")]
+pub enum Many {
+    Any(#[from] anyhow::Error),
+    Io(#[from] io::Error),
+}
+
+fn assert_impl<T: From<io::Error>>() {}
+
+#[test]
+fn test_from() {
+    assert_impl::<ErrorStruct>();
+    assert_impl::<ErrorStructOptional>();
+    assert_impl::<ErrorTuple>();
+    assert_impl::<ErrorTupleOptional>();
+    assert_impl::<ErrorEnum>();
+    assert_impl::<ErrorEnumOptional>();
+    assert_impl::<Many>();
+}
diff --git a/crates/thiserror/tests/test_generics.rs b/crates/thiserror/tests/test_generics.rs
new file mode 100644
index 0000000..c94d95e
--- /dev/null
+++ b/crates/thiserror/tests/test_generics.rs
@@ -0,0 +1,161 @@
+#![allow(clippy::needless_late_init, clippy::uninlined_format_args)]
+
+use std::fmt::{self, Debug, Display};
+use thiserror::Error;
+
+pub struct NoFormat;
+
+#[derive(Debug)]
+pub struct DebugOnly;
+
+pub struct DisplayOnly;
+
+impl Display for DisplayOnly {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.write_str("display only")
+    }
+}
+
+#[derive(Debug)]
+pub struct DebugAndDisplay;
+
+impl Display for DebugAndDisplay {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.write_str("debug and display")
+    }
+}
+
+// Should expand to:
+//
+//     impl<E> Display for EnumDebugField<E>
+//     where
+//         E: Debug;
+//
+//     impl<E> Error for EnumDebugField<E>
+//     where
+//         Self: Debug + Display;
+//
+#[derive(Error, Debug)]
+pub enum EnumDebugGeneric<E> {
+    #[error("{0:?}")]
+    FatalError(E),
+}
+
+// Should expand to:
+//
+//     impl<E> Display for EnumFromGeneric<E>;
+//
+//     impl<E> Error for EnumFromGeneric<E>
+//     where
+//         EnumDebugGeneric<E>: Error + 'static,
+//         Self: Debug + Display;
+//
+#[derive(Error, Debug)]
+pub enum EnumFromGeneric<E> {
+    #[error("enum from generic")]
+    Source(#[from] EnumDebugGeneric<E>),
+}
+
+// Should expand to:
+//
+//     impl<HasDisplay, HasDebug, HasNeither> Display
+//         for EnumCompound<HasDisplay, HasDebug, HasNeither>
+//     where
+//         HasDisplay: Display,
+//         HasDebug: Debug;
+//
+//     impl<HasDisplay, HasDebug, HasNeither> Error
+//         for EnumCompound<HasDisplay, HasDebug, HasNeither>
+//     where
+//         Self: Debug + Display;
+//
+#[derive(Error)]
+pub enum EnumCompound<HasDisplay, HasDebug, HasNeither> {
+    #[error("{0} {1:?}")]
+    DisplayDebug(HasDisplay, HasDebug),
+    #[error("{0}")]
+    Display(HasDisplay, HasNeither),
+    #[error("{1:?}")]
+    Debug(HasNeither, HasDebug),
+}
+
+impl<HasDisplay, HasDebug, HasNeither> Debug for EnumCompound<HasDisplay, HasDebug, HasNeither> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.write_str("EnumCompound")
+    }
+}
+
+#[test]
+fn test_display_enum_compound() {
+    let mut instance: EnumCompound<DisplayOnly, DebugOnly, NoFormat>;
+
+    instance = EnumCompound::DisplayDebug(DisplayOnly, DebugOnly);
+    assert_eq!(format!("{}", instance), "display only DebugOnly");
+
+    instance = EnumCompound::Display(DisplayOnly, NoFormat);
+    assert_eq!(format!("{}", instance), "display only");
+
+    instance = EnumCompound::Debug(NoFormat, DebugOnly);
+    assert_eq!(format!("{}", instance), "DebugOnly");
+}
+
+// Should expand to:
+//
+//     impl<E> Display for EnumTransparentGeneric<E>
+//     where
+//         E: Display;
+//
+//     impl<E> Error for EnumTransparentGeneric<E>
+//     where
+//         E: Error,
+//         Self: Debug + Display;
+//
+#[derive(Error, Debug)]
+pub enum EnumTransparentGeneric<E> {
+    #[error(transparent)]
+    Other(E),
+}
+
+// Should expand to:
+//
+//     impl<E> Display for StructDebugGeneric<E>
+//     where
+//         E: Debug;
+//
+//     impl<E> Error for StructDebugGeneric<E>
+//     where
+//         Self: Debug + Display;
+//
+#[derive(Error, Debug)]
+#[error("{underlying:?}")]
+pub struct StructDebugGeneric<E> {
+    pub underlying: E,
+}
+
+// Should expand to:
+//
+//     impl<E> Error for StructFromGeneric<E>
+//     where
+//         StructDebugGeneric<E>: Error + 'static,
+//         Self: Debug + Display;
+//
+#[derive(Error, Debug)]
+pub struct StructFromGeneric<E> {
+    #[from]
+    pub source: StructDebugGeneric<E>,
+}
+
+// Should expand to:
+//
+//     impl<E> Display for StructTransparentGeneric<E>
+//     where
+//         E: Display;
+//
+//     impl<E> Error for StructTransparentGeneric<E>
+//     where
+//         E: Error,
+//         Self: Debug + Display;
+//
+#[derive(Error, Debug)]
+#[error(transparent)]
+pub struct StructTransparentGeneric<E>(E);
diff --git a/crates/thiserror/tests/test_lints.rs b/crates/thiserror/tests/test_lints.rs
new file mode 100644
index 0000000..59699a4
--- /dev/null
+++ b/crates/thiserror/tests/test_lints.rs
@@ -0,0 +1,18 @@
+use thiserror::Error;
+
+pub use std::error::Error;
+
+#[test]
+fn test_unused_qualifications() {
+    #![deny(unused_qualifications)]
+
+    // Expansion of derive(Error) macro can't know whether something like
+    // std::error::Error is already imported in the caller's scope so it must
+    // suppress unused_qualifications.
+
+    #[derive(Debug, Error)]
+    #[error("...")]
+    pub struct MyError;
+
+    let _: MyError;
+}
diff --git a/crates/thiserror/tests/test_option.rs b/crates/thiserror/tests/test_option.rs
new file mode 100644
index 0000000..232e5a3
--- /dev/null
+++ b/crates/thiserror/tests/test_option.rs
@@ -0,0 +1,105 @@
+#![cfg_attr(thiserror_nightly_testing, feature(error_generic_member_access))]
+
+#[cfg(thiserror_nightly_testing)]
+pub mod structs {
+    use std::backtrace::Backtrace;
+    use thiserror::Error;
+
+    #[derive(Error, Debug)]
+    #[error("...")]
+    pub struct OptSourceNoBacktrace {
+        #[source]
+        source: Option<anyhow::Error>,
+    }
+
+    #[derive(Error, Debug)]
+    #[error("...")]
+    pub struct OptSourceAlwaysBacktrace {
+        #[source]
+        source: Option<anyhow::Error>,
+        backtrace: Backtrace,
+    }
+
+    #[derive(Error, Debug)]
+    #[error("...")]
+    pub struct NoSourceOptBacktrace {
+        #[backtrace]
+        backtrace: Option<Backtrace>,
+    }
+
+    #[derive(Error, Debug)]
+    #[error("...")]
+    pub struct AlwaysSourceOptBacktrace {
+        source: anyhow::Error,
+        #[backtrace]
+        backtrace: Option<Backtrace>,
+    }
+
+    #[derive(Error, Debug)]
+    #[error("...")]
+    pub struct OptSourceOptBacktrace {
+        #[source]
+        source: Option<anyhow::Error>,
+        #[backtrace]
+        backtrace: Option<Backtrace>,
+    }
+}
+
+#[cfg(thiserror_nightly_testing)]
+pub mod enums {
+    use std::backtrace::Backtrace;
+    use thiserror::Error;
+
+    #[derive(Error, Debug)]
+    pub enum OptSourceNoBacktrace {
+        #[error("...")]
+        Test {
+            #[source]
+            source: Option<anyhow::Error>,
+        },
+    }
+
+    #[derive(Error, Debug)]
+    pub enum OptSourceAlwaysBacktrace {
+        #[error("...")]
+        Test {
+            #[source]
+            source: Option<anyhow::Error>,
+            backtrace: Backtrace,
+        },
+    }
+
+    #[derive(Error, Debug)]
+    pub enum NoSourceOptBacktrace {
+        #[error("...")]
+        Test {
+            #[backtrace]
+            backtrace: Option<Backtrace>,
+        },
+    }
+
+    #[derive(Error, Debug)]
+    pub enum AlwaysSourceOptBacktrace {
+        #[error("...")]
+        Test {
+            source: anyhow::Error,
+            #[backtrace]
+            backtrace: Option<Backtrace>,
+        },
+    }
+
+    #[derive(Error, Debug)]
+    pub enum OptSourceOptBacktrace {
+        #[error("...")]
+        Test {
+            #[source]
+            source: Option<anyhow::Error>,
+            #[backtrace]
+            backtrace: Option<Backtrace>,
+        },
+    }
+}
+
+#[test]
+#[cfg_attr(not(thiserror_nightly_testing), ignore)]
+fn test_option() {}
diff --git a/crates/thiserror/tests/test_path.rs b/crates/thiserror/tests/test_path.rs
new file mode 100644
index 0000000..a34a3d7
--- /dev/null
+++ b/crates/thiserror/tests/test_path.rs
@@ -0,0 +1,37 @@
+use ref_cast::RefCast;
+use std::fmt::Display;
+use std::path::{Path, PathBuf};
+use thiserror::Error;
+
+#[derive(Error, Debug)]
+#[error("failed to read '{file}'")]
+struct StructPathBuf {
+    file: PathBuf,
+}
+
+#[derive(Error, Debug, RefCast)]
+#[repr(C)]
+#[error("failed to read '{file}'")]
+struct StructPath {
+    file: Path,
+}
+
+#[derive(Error, Debug)]
+enum EnumPathBuf {
+    #[error("failed to read '{0}'")]
+    Read(PathBuf),
+}
+
+fn assert<T: Display>(expected: &str, value: T) {
+    assert_eq!(expected, value.to_string());
+}
+
+#[test]
+fn test_display() {
+    let path = Path::new("/thiserror");
+    let file = path.to_owned();
+    assert("failed to read '/thiserror'", StructPathBuf { file });
+    let file = path.to_owned();
+    assert("failed to read '/thiserror'", EnumPathBuf::Read(file));
+    assert("failed to read '/thiserror'", StructPath::ref_cast(path));
+}
diff --git a/crates/thiserror/tests/test_source.rs b/crates/thiserror/tests/test_source.rs
new file mode 100644
index 0000000..637f4ac
--- /dev/null
+++ b/crates/thiserror/tests/test_source.rs
@@ -0,0 +1,65 @@
+use std::error::Error as StdError;
+use std::io;
+use thiserror::Error;
+
+#[derive(Error, Debug)]
+#[error("implicit source")]
+pub struct ImplicitSource {
+    source: io::Error,
+}
+
+#[derive(Error, Debug)]
+#[error("explicit source")]
+pub struct ExplicitSource {
+    source: String,
+    #[source]
+    io: io::Error,
+}
+
+#[derive(Error, Debug)]
+#[error("boxed source")]
+pub struct BoxedSource {
+    #[source]
+    source: Box<dyn StdError + Send + 'static>,
+}
+
+#[test]
+fn test_implicit_source() {
+    let io = io::Error::new(io::ErrorKind::Other, "oh no!");
+    let error = ImplicitSource { source: io };
+    error.source().unwrap().downcast_ref::<io::Error>().unwrap();
+}
+
+#[test]
+fn test_explicit_source() {
+    let io = io::Error::new(io::ErrorKind::Other, "oh no!");
+    let error = ExplicitSource {
+        source: String::new(),
+        io,
+    };
+    error.source().unwrap().downcast_ref::<io::Error>().unwrap();
+}
+
+#[test]
+fn test_boxed_source() {
+    let source = Box::new(io::Error::new(io::ErrorKind::Other, "oh no!"));
+    let error = BoxedSource { source };
+    error.source().unwrap().downcast_ref::<io::Error>().unwrap();
+}
+
+macro_rules! error_from_macro {
+    ($($variants:tt)*) => {
+        #[derive(Error)]
+        #[derive(Debug)]
+        pub enum MacroSource {
+            $($variants)*
+        }
+    }
+}
+
+// Test that we generate impls with the proper hygiene
+#[rustfmt::skip]
+error_from_macro! {
+    #[error("Something")]
+    Variant(#[from] io::Error)
+}
diff --git a/crates/thiserror/tests/test_transparent.rs b/crates/thiserror/tests/test_transparent.rs
new file mode 100644
index 0000000..6f3c03e
--- /dev/null
+++ b/crates/thiserror/tests/test_transparent.rs
@@ -0,0 +1,78 @@
+use anyhow::anyhow;
+use std::error::Error as _;
+use std::io;
+use thiserror::Error;
+
+#[test]
+fn test_transparent_struct() {
+    #[derive(Error, Debug)]
+    #[error(transparent)]
+    struct Error(ErrorKind);
+
+    #[derive(Error, Debug)]
+    enum ErrorKind {
+        #[error("E0")]
+        E0,
+        #[error("E1")]
+        E1(#[from] io::Error),
+    }
+
+    let error = Error(ErrorKind::E0);
+    assert_eq!("E0", error.to_string());
+    assert!(error.source().is_none());
+
+    let io = io::Error::new(io::ErrorKind::Other, "oh no!");
+    let error = Error(ErrorKind::from(io));
+    assert_eq!("E1", error.to_string());
+    error.source().unwrap().downcast_ref::<io::Error>().unwrap();
+}
+
+#[test]
+fn test_transparent_enum() {
+    #[derive(Error, Debug)]
+    enum Error {
+        #[error("this failed")]
+        This,
+        #[error(transparent)]
+        Other(anyhow::Error),
+    }
+
+    let error = Error::This;
+    assert_eq!("this failed", error.to_string());
+
+    let error = Error::Other(anyhow!("inner").context("outer"));
+    assert_eq!("outer", error.to_string());
+    assert_eq!("inner", error.source().unwrap().to_string());
+}
+
+#[test]
+fn test_anyhow() {
+    #[derive(Error, Debug)]
+    #[error(transparent)]
+    struct Any(#[from] anyhow::Error);
+
+    let error = Any::from(anyhow!("inner").context("outer"));
+    assert_eq!("outer", error.to_string());
+    assert_eq!("inner", error.source().unwrap().to_string());
+}
+
+#[test]
+fn test_non_static() {
+    #[derive(Error, Debug)]
+    #[error(transparent)]
+    struct Error<'a> {
+        inner: ErrorKind<'a>,
+    }
+
+    #[derive(Error, Debug)]
+    enum ErrorKind<'a> {
+        #[error("unexpected token: {:?}", token)]
+        Unexpected { token: &'a str },
+    }
+
+    let error = Error {
+        inner: ErrorKind::Unexpected { token: "error" },
+    };
+    assert_eq!("unexpected token: \"error\"", error.to_string());
+    assert!(error.source().is_none());
+}
diff --git a/crates/thiserror/tests/ui/bad-field-attr.rs b/crates/thiserror/tests/ui/bad-field-attr.rs
new file mode 100644
index 0000000..d5429b2
--- /dev/null
+++ b/crates/thiserror/tests/ui/bad-field-attr.rs
@@ -0,0 +1,7 @@
+use thiserror::Error;
+
+#[derive(Error, Debug)]
+#[error(transparent)]
+pub struct Error(#[error(transparent)] std::io::Error);
+
+fn main() {}
diff --git a/crates/thiserror/tests/ui/bad-field-attr.stderr b/crates/thiserror/tests/ui/bad-field-attr.stderr
new file mode 100644
index 0000000..5fb5744
--- /dev/null
+++ b/crates/thiserror/tests/ui/bad-field-attr.stderr
@@ -0,0 +1,5 @@
+error: #[error(transparent)] needs to go outside the enum or struct, not on an individual field
+ --> tests/ui/bad-field-attr.rs:5:18
+  |
+5 | pub struct Error(#[error(transparent)] std::io::Error);
+  |                  ^^^^^^^^^^^^^^^^^^^^^
diff --git a/crates/thiserror/tests/ui/concat-display.rs b/crates/thiserror/tests/ui/concat-display.rs
new file mode 100644
index 0000000..8b53cc0
--- /dev/null
+++ b/crates/thiserror/tests/ui/concat-display.rs
@@ -0,0 +1,15 @@
+use thiserror::Error;
+
+macro_rules! error_type {
+    ($name:ident, $what:expr) => {
+        // Use #[error("invalid {}", $what)] instead.
+
+        #[derive(Error, Debug)]
+        #[error(concat!("invalid ", $what))]
+        pub struct $name;
+    };
+}
+
+error_type!(Error, "foo");
+
+fn main() {}
diff --git a/crates/thiserror/tests/ui/concat-display.stderr b/crates/thiserror/tests/ui/concat-display.stderr
new file mode 100644
index 0000000..dbecd69
--- /dev/null
+++ b/crates/thiserror/tests/ui/concat-display.stderr
@@ -0,0 +1,10 @@
+error: expected string literal
+  --> tests/ui/concat-display.rs:8:17
+   |
+8  |         #[error(concat!("invalid ", $what))]
+   |                 ^^^^^^
+...
+13 | error_type!(Error, "foo");
+   | ------------------------- in this macro invocation
+   |
+   = note: this error originates in the macro `error_type` (in Nightly builds, run with -Z macro-backtrace for more info)
diff --git a/crates/thiserror/tests/ui/duplicate-enum-source.rs b/crates/thiserror/tests/ui/duplicate-enum-source.rs
new file mode 100644
index 0000000..15e579f
--- /dev/null
+++ b/crates/thiserror/tests/ui/duplicate-enum-source.rs
@@ -0,0 +1,13 @@
+use thiserror::Error;
+
+#[derive(Error, Debug)]
+pub enum ErrorEnum {
+    Confusing {
+        #[source]
+        a: std::io::Error,
+        #[source]
+        b: anyhow::Error,
+    },
+}
+
+fn main() {}
diff --git a/crates/thiserror/tests/ui/duplicate-enum-source.stderr b/crates/thiserror/tests/ui/duplicate-enum-source.stderr
new file mode 100644
index 0000000..4a4b2d3
--- /dev/null
+++ b/crates/thiserror/tests/ui/duplicate-enum-source.stderr
@@ -0,0 +1,5 @@
+error: duplicate #[source] attribute
+ --> tests/ui/duplicate-enum-source.rs:8:9
+  |
+8 |         #[source]
+  |         ^^^^^^^^^
diff --git a/crates/thiserror/tests/ui/duplicate-fmt.rs b/crates/thiserror/tests/ui/duplicate-fmt.rs
new file mode 100644
index 0000000..cb3d678
--- /dev/null
+++ b/crates/thiserror/tests/ui/duplicate-fmt.rs
@@ -0,0 +1,8 @@
+use thiserror::Error;
+
+#[derive(Error, Debug)]
+#[error("...")]
+#[error("...")]
+pub struct Error;
+
+fn main() {}
diff --git a/crates/thiserror/tests/ui/duplicate-fmt.stderr b/crates/thiserror/tests/ui/duplicate-fmt.stderr
new file mode 100644
index 0000000..532b16b
--- /dev/null
+++ b/crates/thiserror/tests/ui/duplicate-fmt.stderr
@@ -0,0 +1,5 @@
+error: only one #[error(...)] attribute is allowed
+ --> tests/ui/duplicate-fmt.rs:5:1
+  |
+5 | #[error("...")]
+  | ^^^^^^^^^^^^^^^
diff --git a/crates/thiserror/tests/ui/duplicate-struct-source.rs b/crates/thiserror/tests/ui/duplicate-struct-source.rs
new file mode 100644
index 0000000..569df8d
--- /dev/null
+++ b/crates/thiserror/tests/ui/duplicate-struct-source.rs
@@ -0,0 +1,11 @@
+use thiserror::Error;
+
+#[derive(Error, Debug)]
+pub struct ErrorStruct {
+    #[source]
+    a: std::io::Error,
+    #[source]
+    b: anyhow::Error,
+}
+
+fn main() {}
diff --git a/crates/thiserror/tests/ui/duplicate-struct-source.stderr b/crates/thiserror/tests/ui/duplicate-struct-source.stderr
new file mode 100644
index 0000000..c8de574
--- /dev/null
+++ b/crates/thiserror/tests/ui/duplicate-struct-source.stderr
@@ -0,0 +1,5 @@
+error: duplicate #[source] attribute
+ --> tests/ui/duplicate-struct-source.rs:7:5
+  |
+7 |     #[source]
+  |     ^^^^^^^^^
diff --git a/crates/thiserror/tests/ui/duplicate-transparent.rs b/crates/thiserror/tests/ui/duplicate-transparent.rs
new file mode 100644
index 0000000..49c0e46
--- /dev/null
+++ b/crates/thiserror/tests/ui/duplicate-transparent.rs
@@ -0,0 +1,8 @@
+use thiserror::Error;
+
+#[derive(Error, Debug)]
+#[error(transparent)]
+#[error(transparent)]
+pub struct Error(anyhow::Error);
+
+fn main() {}
diff --git a/crates/thiserror/tests/ui/duplicate-transparent.stderr b/crates/thiserror/tests/ui/duplicate-transparent.stderr
new file mode 100644
index 0000000..a830879
--- /dev/null
+++ b/crates/thiserror/tests/ui/duplicate-transparent.stderr
@@ -0,0 +1,5 @@
+error: duplicate #[error(transparent)] attribute
+ --> tests/ui/duplicate-transparent.rs:5:1
+  |
+5 | #[error(transparent)]
+  | ^^^^^^^^^^^^^^^^^^^^^
diff --git a/crates/thiserror/tests/ui/from-backtrace-backtrace.rs b/crates/thiserror/tests/ui/from-backtrace-backtrace.rs
new file mode 100644
index 0000000..8f411bf
--- /dev/null
+++ b/crates/thiserror/tests/ui/from-backtrace-backtrace.rs
@@ -0,0 +1,10 @@
+// https://github.com/dtolnay/thiserror/issues/163
+
+use std::backtrace::Backtrace;
+use thiserror::Error;
+
+#[derive(Error, Debug)]
+#[error("...")]
+pub struct Error(#[from] #[backtrace] std::io::Error, Backtrace);
+
+fn main() {}
diff --git a/crates/thiserror/tests/ui/from-backtrace-backtrace.stderr b/crates/thiserror/tests/ui/from-backtrace-backtrace.stderr
new file mode 100644
index 0000000..55d647b
--- /dev/null
+++ b/crates/thiserror/tests/ui/from-backtrace-backtrace.stderr
@@ -0,0 +1,5 @@
+error: deriving From requires no fields other than source and backtrace
+ --> tests/ui/from-backtrace-backtrace.rs:8:18
+  |
+8 | pub struct Error(#[from] #[backtrace] std::io::Error, Backtrace);
+  |                  ^^^^^^^
diff --git a/crates/thiserror/tests/ui/from-not-source.rs b/crates/thiserror/tests/ui/from-not-source.rs
new file mode 100644
index 0000000..d1855be
--- /dev/null
+++ b/crates/thiserror/tests/ui/from-not-source.rs
@@ -0,0 +1,11 @@
+use thiserror::Error;
+
+#[derive(Debug, Error)]
+pub struct Error {
+    #[source]
+    source: std::io::Error,
+    #[from]
+    other: anyhow::Error,
+}
+
+fn main() {}
diff --git a/crates/thiserror/tests/ui/from-not-source.stderr b/crates/thiserror/tests/ui/from-not-source.stderr
new file mode 100644
index 0000000..9713601
--- /dev/null
+++ b/crates/thiserror/tests/ui/from-not-source.stderr
@@ -0,0 +1,5 @@
+error: #[from] is only supported on the source field, not any other field
+ --> tests/ui/from-not-source.rs:7:5
+  |
+7 |     #[from]
+  |     ^^^^^^^
diff --git a/crates/thiserror/tests/ui/lifetime.rs b/crates/thiserror/tests/ui/lifetime.rs
new file mode 100644
index 0000000..698f8c4
--- /dev/null
+++ b/crates/thiserror/tests/ui/lifetime.rs
@@ -0,0 +1,24 @@
+use std::fmt::Debug;
+use thiserror::Error;
+
+#[derive(Error, Debug)]
+#[error("error")]
+struct Error<'a>(#[from] Inner<'a>);
+
+#[derive(Error, Debug)]
+#[error("{0}")]
+struct Inner<'a>(&'a str);
+
+#[derive(Error, Debug)]
+enum Enum<'a> {
+    #[error("error")]
+    Foo(#[from] Generic<&'a str>),
+}
+
+#[derive(Error, Debug)]
+#[error("{0:?}")]
+struct Generic<T: Debug>(T);
+
+fn main() -> Result<(), Error<'static>> {
+    Err(Error(Inner("some text")))
+}
diff --git a/crates/thiserror/tests/ui/lifetime.stderr b/crates/thiserror/tests/ui/lifetime.stderr
new file mode 100644
index 0000000..8b58136
--- /dev/null
+++ b/crates/thiserror/tests/ui/lifetime.stderr
@@ -0,0 +1,11 @@
+error: non-static lifetimes are not allowed in the source of an error, because std::error::Error requires the source is dyn Error + 'static
+ --> tests/ui/lifetime.rs:6:26
+  |
+6 | struct Error<'a>(#[from] Inner<'a>);
+  |                          ^^^^^^^^^
+
+error: non-static lifetimes are not allowed in the source of an error, because std::error::Error requires the source is dyn Error + 'static
+  --> tests/ui/lifetime.rs:15:17
+   |
+15 |     Foo(#[from] Generic<&'a str>),
+   |                 ^^^^^^^^^^^^^^^^
diff --git a/crates/thiserror/tests/ui/missing-fmt.rs b/crates/thiserror/tests/ui/missing-fmt.rs
new file mode 100644
index 0000000..d52fbdf
--- /dev/null
+++ b/crates/thiserror/tests/ui/missing-fmt.rs
@@ -0,0 +1,10 @@
+use thiserror::Error;
+
+#[derive(Error, Debug)]
+pub enum Error {
+    #[error("...")]
+    A(usize),
+    B(usize),
+}
+
+fn main() {}
diff --git a/crates/thiserror/tests/ui/missing-fmt.stderr b/crates/thiserror/tests/ui/missing-fmt.stderr
new file mode 100644
index 0000000..c0be373
--- /dev/null
+++ b/crates/thiserror/tests/ui/missing-fmt.stderr
@@ -0,0 +1,5 @@
+error: missing #[error("...")] display attribute
+ --> tests/ui/missing-fmt.rs:7:5
+  |
+7 |     B(usize),
+  |     ^^^^^^^^
diff --git a/crates/thiserror/tests/ui/no-display.rs b/crates/thiserror/tests/ui/no-display.rs
new file mode 100644
index 0000000..181a66e
--- /dev/null
+++ b/crates/thiserror/tests/ui/no-display.rs
@@ -0,0 +1,12 @@
+use thiserror::Error;
+
+#[derive(Debug)]
+struct NoDisplay;
+
+#[derive(Error, Debug)]
+#[error("thread: {thread}")]
+pub struct Error {
+    thread: NoDisplay,
+}
+
+fn main() {}
diff --git a/crates/thiserror/tests/ui/no-display.stderr b/crates/thiserror/tests/ui/no-display.stderr
new file mode 100644
index 0000000..0f47c24
--- /dev/null
+++ b/crates/thiserror/tests/ui/no-display.stderr
@@ -0,0 +1,17 @@
+error[E0599]: the method `as_display` exists for reference `&NoDisplay`, but its trait bounds were not satisfied
+ --> tests/ui/no-display.rs:7:9
+  |
+4 | struct NoDisplay;
+  | ---------------- doesn't satisfy `NoDisplay: std::fmt::Display`
+...
+7 | #[error("thread: {thread}")]
+  |         ^^^^^^^^^^^^^^^^^^ method cannot be called on `&NoDisplay` due to unsatisfied trait bounds
+  |
+  = note: the following trait bounds were not satisfied:
+          `NoDisplay: std::fmt::Display`
+          which is required by `&NoDisplay: AsDisplay<'_>`
+note: the trait `std::fmt::Display` must be implemented
+ --> $RUST/core/src/fmt/mod.rs
+  |
+  | pub trait Display {
+  | ^^^^^^^^^^^^^^^^^
diff --git a/crates/thiserror/tests/ui/source-enum-not-error.rs b/crates/thiserror/tests/ui/source-enum-not-error.rs
new file mode 100644
index 0000000..3eb0d3e
--- /dev/null
+++ b/crates/thiserror/tests/ui/source-enum-not-error.rs
@@ -0,0 +1,14 @@
+use thiserror::Error;
+
+#[derive(Debug)]
+pub struct NotError;
+
+#[derive(Error, Debug)]
+#[error("...")]
+pub enum ErrorEnum {
+    Broken {
+        source: NotError,
+    },
+}
+
+fn main() {}
diff --git a/crates/thiserror/tests/ui/source-enum-not-error.stderr b/crates/thiserror/tests/ui/source-enum-not-error.stderr
new file mode 100644
index 0000000..750c69e
--- /dev/null
+++ b/crates/thiserror/tests/ui/source-enum-not-error.stderr
@@ -0,0 +1,22 @@
+error[E0599]: the method `as_dyn_error` exists for reference `&NotError`, but its trait bounds were not satisfied
+  --> tests/ui/source-enum-not-error.rs:10:9
+   |
+4  | pub struct NotError;
+   | -------------------
+   | |
+   | doesn't satisfy `NotError: AsDynError<'_>`
+   | doesn't satisfy `NotError: std::error::Error`
+...
+10 |         source: NotError,
+   |         ^^^^^^ method cannot be called on `&NotError` due to unsatisfied trait bounds
+   |
+   = note: the following trait bounds were not satisfied:
+           `NotError: std::error::Error`
+           which is required by `NotError: AsDynError<'_>`
+           `&NotError: std::error::Error`
+           which is required by `&NotError: AsDynError<'_>`
+note: the trait `std::error::Error` must be implemented
+  --> $RUST/core/src/error.rs
+   |
+   | pub trait Error: Debug + Display {
+   | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/crates/thiserror/tests/ui/source-struct-not-error.rs b/crates/thiserror/tests/ui/source-struct-not-error.rs
new file mode 100644
index 0000000..d59df1e
--- /dev/null
+++ b/crates/thiserror/tests/ui/source-struct-not-error.rs
@@ -0,0 +1,12 @@
+use thiserror::Error;
+
+#[derive(Debug)]
+struct NotError;
+
+#[derive(Error, Debug)]
+#[error("...")]
+pub struct ErrorStruct {
+    source: NotError,
+}
+
+fn main() {}
diff --git a/crates/thiserror/tests/ui/source-struct-not-error.stderr b/crates/thiserror/tests/ui/source-struct-not-error.stderr
new file mode 100644
index 0000000..b98460f
--- /dev/null
+++ b/crates/thiserror/tests/ui/source-struct-not-error.stderr
@@ -0,0 +1,21 @@
+error[E0599]: the method `as_dyn_error` exists for struct `NotError`, but its trait bounds were not satisfied
+ --> tests/ui/source-struct-not-error.rs:9:5
+  |
+4 | struct NotError;
+  | ---------------
+  | |
+  | method `as_dyn_error` not found for this struct
+  | doesn't satisfy `NotError: AsDynError<'_>`
+  | doesn't satisfy `NotError: std::error::Error`
+...
+9 |     source: NotError,
+  |     ^^^^^^ method cannot be called on `NotError` due to unsatisfied trait bounds
+  |
+  = note: the following trait bounds were not satisfied:
+          `NotError: std::error::Error`
+          which is required by `NotError: AsDynError<'_>`
+note: the trait `std::error::Error` must be implemented
+ --> $RUST/core/src/error.rs
+  |
+  | pub trait Error: Debug + Display {
+  | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/crates/thiserror/tests/ui/transparent-display.rs b/crates/thiserror/tests/ui/transparent-display.rs
new file mode 100644
index 0000000..2a59f18
--- /dev/null
+++ b/crates/thiserror/tests/ui/transparent-display.rs
@@ -0,0 +1,8 @@
+use thiserror::Error;
+
+#[derive(Error, Debug)]
+#[error(transparent)]
+#[error("...")]
+pub struct Error(anyhow::Error);
+
+fn main() {}
diff --git a/crates/thiserror/tests/ui/transparent-display.stderr b/crates/thiserror/tests/ui/transparent-display.stderr
new file mode 100644
index 0000000..54d958b
--- /dev/null
+++ b/crates/thiserror/tests/ui/transparent-display.stderr
@@ -0,0 +1,5 @@
+error: cannot have both #[error(transparent)] and a display attribute
+ --> tests/ui/transparent-display.rs:5:1
+  |
+5 | #[error("...")]
+  | ^^^^^^^^^^^^^^^
diff --git a/crates/thiserror/tests/ui/transparent-enum-many.rs b/crates/thiserror/tests/ui/transparent-enum-many.rs
new file mode 100644
index 0000000..e2a73a4
--- /dev/null
+++ b/crates/thiserror/tests/ui/transparent-enum-many.rs
@@ -0,0 +1,9 @@
+use thiserror::Error;
+
+#[derive(Error, Debug)]
+pub enum Error {
+    #[error(transparent)]
+    Other(anyhow::Error, String),
+}
+
+fn main() {}
diff --git a/crates/thiserror/tests/ui/transparent-enum-many.stderr b/crates/thiserror/tests/ui/transparent-enum-many.stderr
new file mode 100644
index 0000000..a9adfa5
--- /dev/null
+++ b/crates/thiserror/tests/ui/transparent-enum-many.stderr
@@ -0,0 +1,6 @@
+error: #[error(transparent)] requires exactly one field
+ --> tests/ui/transparent-enum-many.rs:5:5
+  |
+5 | /     #[error(transparent)]
+6 | |     Other(anyhow::Error, String),
+  | |________________________________^
diff --git a/crates/thiserror/tests/ui/transparent-enum-source.rs b/crates/thiserror/tests/ui/transparent-enum-source.rs
new file mode 100644
index 0000000..3849f66
--- /dev/null
+++ b/crates/thiserror/tests/ui/transparent-enum-source.rs
@@ -0,0 +1,9 @@
+use thiserror::Error;
+
+#[derive(Error, Debug)]
+pub enum Error {
+    #[error(transparent)]
+    Other(#[source] anyhow::Error),
+}
+
+fn main() {}
diff --git a/crates/thiserror/tests/ui/transparent-enum-source.stderr b/crates/thiserror/tests/ui/transparent-enum-source.stderr
new file mode 100644
index 0000000..ccb9067
--- /dev/null
+++ b/crates/thiserror/tests/ui/transparent-enum-source.stderr
@@ -0,0 +1,5 @@
+error: transparent variant can't contain #[source]
+ --> tests/ui/transparent-enum-source.rs:6:11
+  |
+6 |     Other(#[source] anyhow::Error),
+  |           ^^^^^^^^^
diff --git a/crates/thiserror/tests/ui/transparent-struct-many.rs b/crates/thiserror/tests/ui/transparent-struct-many.rs
new file mode 100644
index 0000000..18f2466
--- /dev/null
+++ b/crates/thiserror/tests/ui/transparent-struct-many.rs
@@ -0,0 +1,10 @@
+use thiserror::Error;
+
+#[derive(Error, Debug)]
+#[error(transparent)]
+pub struct Error {
+    inner: anyhow::Error,
+    what: String,
+}
+
+fn main() {}
diff --git a/crates/thiserror/tests/ui/transparent-struct-many.stderr b/crates/thiserror/tests/ui/transparent-struct-many.stderr
new file mode 100644
index 0000000..c0e3806
--- /dev/null
+++ b/crates/thiserror/tests/ui/transparent-struct-many.stderr
@@ -0,0 +1,5 @@
+error: #[error(transparent)] requires exactly one field
+ --> tests/ui/transparent-struct-many.rs:4:1
+  |
+4 | #[error(transparent)]
+  | ^^^^^^^^^^^^^^^^^^^^^
diff --git a/crates/thiserror/tests/ui/transparent-struct-source.rs b/crates/thiserror/tests/ui/transparent-struct-source.rs
new file mode 100644
index 0000000..d4512c2
--- /dev/null
+++ b/crates/thiserror/tests/ui/transparent-struct-source.rs
@@ -0,0 +1,7 @@
+use thiserror::Error;
+
+#[derive(Error, Debug)]
+#[error(transparent)]
+pub struct Error(#[source] anyhow::Error);
+
+fn main() {}
diff --git a/crates/thiserror/tests/ui/transparent-struct-source.stderr b/crates/thiserror/tests/ui/transparent-struct-source.stderr
new file mode 100644
index 0000000..3012ca3
--- /dev/null
+++ b/crates/thiserror/tests/ui/transparent-struct-source.stderr
@@ -0,0 +1,5 @@
+error: transparent error struct can't contain #[source]
+ --> tests/ui/transparent-struct-source.rs:5:18
+  |
+5 | pub struct Error(#[source] anyhow::Error);
+  |                  ^^^^^^^^^
diff --git a/crates/thiserror/tests/ui/unexpected-field-fmt.rs b/crates/thiserror/tests/ui/unexpected-field-fmt.rs
new file mode 100644
index 0000000..7c439d9
--- /dev/null
+++ b/crates/thiserror/tests/ui/unexpected-field-fmt.rs
@@ -0,0 +1,11 @@
+use thiserror::Error;
+
+#[derive(Error, Debug)]
+pub enum Error {
+    What {
+        #[error("...")]
+        io: std::io::Error,
+    },
+}
+
+fn main() {}
diff --git a/crates/thiserror/tests/ui/unexpected-field-fmt.stderr b/crates/thiserror/tests/ui/unexpected-field-fmt.stderr
new file mode 100644
index 0000000..bf3c24d
--- /dev/null
+++ b/crates/thiserror/tests/ui/unexpected-field-fmt.stderr
@@ -0,0 +1,5 @@
+error: not expected here; the #[error(...)] attribute belongs on top of a struct or an enum variant
+ --> tests/ui/unexpected-field-fmt.rs:6:9
+  |
+6 |         #[error("...")]
+  |         ^^^^^^^^^^^^^^^
diff --git a/crates/thiserror/tests/ui/unexpected-struct-source.rs b/crates/thiserror/tests/ui/unexpected-struct-source.rs
new file mode 100644
index 0000000..f396494
--- /dev/null
+++ b/crates/thiserror/tests/ui/unexpected-struct-source.rs
@@ -0,0 +1,7 @@
+use thiserror::Error;
+
+#[derive(Error, Debug)]
+#[source]
+pub struct Error;
+
+fn main() {}
diff --git a/crates/thiserror/tests/ui/unexpected-struct-source.stderr b/crates/thiserror/tests/ui/unexpected-struct-source.stderr
new file mode 100644
index 0000000..6f15841
--- /dev/null
+++ b/crates/thiserror/tests/ui/unexpected-struct-source.stderr
@@ -0,0 +1,5 @@
+error: not expected here; the #[source] attribute belongs on a specific field
+ --> tests/ui/unexpected-struct-source.rs:4:1
+  |
+4 | #[source]
+  | ^^^^^^^^^
diff --git a/crates/thiserror/tests/ui/union.rs b/crates/thiserror/tests/ui/union.rs
new file mode 100644
index 0000000..cd6a934
--- /dev/null
+++ b/crates/thiserror/tests/ui/union.rs
@@ -0,0 +1,9 @@
+use thiserror::Error;
+
+#[derive(Error)]
+pub union U {
+    msg: &'static str,
+    num: usize,
+}
+
+fn main() {}
diff --git a/crates/thiserror/tests/ui/union.stderr b/crates/thiserror/tests/ui/union.stderr
new file mode 100644
index 0000000..3ec4d71
--- /dev/null
+++ b/crates/thiserror/tests/ui/union.stderr
@@ -0,0 +1,8 @@
+error: union as errors are not supported
+ --> tests/ui/union.rs:4:1
+  |
+4 | / pub union U {
+5 | |     msg: &'static str,
+6 | |     num: usize,
+7 | | }
+  | |_^
diff --git a/crates/thread_local/.cargo-checksum.json b/crates/thread_local/.cargo-checksum.json
new file mode 100644
index 0000000..b02a949
--- /dev/null
+++ b/crates/thread_local/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.toml":"1c3ffde735d10adc9bc72b296db6fd448581eddedb494f64cd966ce3de8d4b13","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"c9a75f18b9ab2927829a208fc6aa2cf4e63b8420887ba29cdb265d6619ae82d5","README.md":"d33aea1cf2bbc915e89347975fae37cc3d51df0227d56d070d8adc534200fca4","benches/thread_local.rs":"37dc7f2b6bc30ea093828ae0e5509dc5ffdf63fa8127804ff359e49e1175356e","src/cached.rs":"253cb48da470265f4c4702a4f9f6b8670860cb092d8304d807717d751d0b3350","src/lib.rs":"d65f31267980b0035aec8c7d503f0fef0d8615a0eb8311f2b3887e038d178153","src/thread_id.rs":"ab6dd1278798cb5d7c49a60a2e642fc0e0462e316938c8097757f5a5b9579b38","src/unreachable.rs":"f0c65f1b0516cc92fbd7df2c8c8edfdac5362377d27ae5b91a12204673e2fd73"},"package":"3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152"}
\ No newline at end of file
diff --git a/crates/thread_local/Android.bp b/crates/thread_local/Android.bp
new file mode 100644
index 0000000..8e11e92
--- /dev/null
+++ b/crates/thread_local/Android.bp
@@ -0,0 +1,54 @@
+// This file is generated by cargo_embargo.
+// Do not modify this file because the changes will be overridden on upgrade.
+
+package {
+    default_applicable_licenses: ["external_rust_crates_thread_local_license"],
+    default_team: "trendy_team_android_rust",
+}
+
+license {
+    name: "external_rust_crates_thread_local_license",
+    visibility: [":__subpackages__"],
+    license_kinds: ["SPDX-license-identifier-Apache-2.0"],
+    license_text: ["LICENSE"],
+}
+
+rust_library {
+    name: "libthread_local",
+    host_supported: true,
+    crate_name: "thread_local",
+    cargo_env_compat: true,
+    cargo_pkg_version: "1.1.7",
+    crate_root: "src/lib.rs",
+    edition: "2021",
+    rustlibs: [
+        "libcfg_if",
+        "libonce_cell",
+    ],
+    apex_available: [
+        "//apex_available:platform",
+        "//apex_available:anyapex",
+    ],
+    product_available: true,
+    vendor_available: true,
+}
+
+rust_test {
+    name: "thread_local_test_src_lib",
+    host_supported: true,
+    crate_name: "thread_local",
+    cargo_env_compat: true,
+    cargo_pkg_version: "1.1.7",
+    crate_root: "src/lib.rs",
+    test_suites: ["general-tests"],
+    auto_gen_config: true,
+    test_options: {
+        unit_test: true,
+    },
+    edition: "2021",
+    rustlibs: [
+        "libcfg_if",
+        "libcriterion",
+        "libonce_cell",
+    ],
+}
diff --git a/crates/thread_local/Cargo.lock b/crates/thread_local/Cargo.lock
new file mode 100644
index 0000000..2e109bf
--- /dev/null
+++ b/crates/thread_local/Cargo.lock
@@ -0,0 +1,653 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "aho-corasick"
+version = "1.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916"
+dependencies = [
+ "memchr",
+]
+
+[[package]]
+name = "anes"
+version = "0.1.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299"
+
+[[package]]
+name = "atty"
+version = "0.2.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
+dependencies = [
+ "hermit-abi",
+ "libc",
+ "winapi",
+]
+
+[[package]]
+name = "autocfg"
+version = "1.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0"
+
+[[package]]
+name = "bitflags"
+version = "1.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
+
+[[package]]
+name = "bumpalo"
+version = "3.16.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c"
+
+[[package]]
+name = "cast"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
+
+[[package]]
+name = "cfg-if"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
+
+[[package]]
+name = "ciborium"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e"
+dependencies = [
+ "ciborium-io",
+ "ciborium-ll",
+ "serde",
+]
+
+[[package]]
+name = "ciborium-io"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757"
+
+[[package]]
+name = "ciborium-ll"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9"
+dependencies = [
+ "ciborium-io",
+ "half",
+]
+
+[[package]]
+name = "clap"
+version = "3.2.25"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4ea181bf566f71cb9a5d17a59e1871af638180a18fb0035c92ae62b705207123"
+dependencies = [
+ "bitflags",
+ "clap_lex",
+ "indexmap",
+ "textwrap",
+]
+
+[[package]]
+name = "clap_lex"
+version = "0.2.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5"
+dependencies = [
+ "os_str_bytes",
+]
+
+[[package]]
+name = "criterion"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e7c76e09c1aae2bc52b3d2f29e13c6572553b30c4aa1b8a49fd70de6412654cb"
+dependencies = [
+ "anes",
+ "atty",
+ "cast",
+ "ciborium",
+ "clap",
+ "criterion-plot",
+ "itertools",
+ "lazy_static",
+ "num-traits",
+ "oorandom",
+ "plotters",
+ "rayon",
+ "regex",
+ "serde",
+ "serde_derive",
+ "serde_json",
+ "tinytemplate",
+ "walkdir",
+]
+
+[[package]]
+name = "criterion-plot"
+version = "0.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1"
+dependencies = [
+ "cast",
+ "itertools",
+]
+
+[[package]]
+name = "crossbeam-deque"
+version = "0.8.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d"
+dependencies = [
+ "crossbeam-epoch",
+ "crossbeam-utils",
+]
+
+[[package]]
+name = "crossbeam-epoch"
+version = "0.9.18"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e"
+dependencies = [
+ "crossbeam-utils",
+]
+
+[[package]]
+name = "crossbeam-utils"
+version = "0.8.20"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80"
+
+[[package]]
+name = "crunchy"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7"
+
+[[package]]
+name = "either"
+version = "1.13.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0"
+
+[[package]]
+name = "half"
+version = "2.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888"
+dependencies = [
+ "cfg-if",
+ "crunchy",
+]
+
+[[package]]
+name = "hashbrown"
+version = "0.12.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
+
+[[package]]
+name = "hermit-abi"
+version = "0.1.19"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "indexmap"
+version = "1.9.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99"
+dependencies = [
+ "autocfg",
+ "hashbrown",
+]
+
+[[package]]
+name = "itertools"
+version = "0.10.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473"
+dependencies = [
+ "either",
+]
+
+[[package]]
+name = "itoa"
+version = "1.0.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b"
+
+[[package]]
+name = "js-sys"
+version = "0.3.70"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a"
+dependencies = [
+ "wasm-bindgen",
+]
+
+[[package]]
+name = "lazy_static"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe"
+
+[[package]]
+name = "libc"
+version = "0.2.158"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439"
+
+[[package]]
+name = "log"
+version = "0.4.22"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
+
+[[package]]
+name = "memchr"
+version = "2.7.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3"
+
+[[package]]
+name = "num-traits"
+version = "0.2.19"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841"
+dependencies = [
+ "autocfg",
+]
+
+[[package]]
+name = "once_cell"
+version = "1.19.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92"
+
+[[package]]
+name = "oorandom"
+version = "11.1.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9"
+
+[[package]]
+name = "os_str_bytes"
+version = "6.6.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e2355d85b9a3786f481747ced0e0ff2ba35213a1f9bd406ed906554d7af805a1"
+
+[[package]]
+name = "plotters"
+version = "0.3.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a15b6eccb8484002195a3e44fe65a4ce8e93a625797a063735536fd59cb01cf3"
+dependencies = [
+ "num-traits",
+ "plotters-backend",
+ "plotters-svg",
+ "wasm-bindgen",
+ "web-sys",
+]
+
+[[package]]
+name = "plotters-backend"
+version = "0.3.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "414cec62c6634ae900ea1c56128dfe87cf63e7caece0852ec76aba307cebadb7"
+
+[[package]]
+name = "plotters-svg"
+version = "0.3.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "81b30686a7d9c3e010b84284bdd26a29f2138574f52f5eb6f794fc0ad924e705"
+dependencies = [
+ "plotters-backend",
+]
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.86"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77"
+dependencies = [
+ "unicode-ident",
+]
+
+[[package]]
+name = "quote"
+version = "1.0.37"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af"
+dependencies = [
+ "proc-macro2",
+]
+
+[[package]]
+name = "rayon"
+version = "1.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa"
+dependencies = [
+ "either",
+ "rayon-core",
+]
+
+[[package]]
+name = "rayon-core"
+version = "1.12.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2"
+dependencies = [
+ "crossbeam-deque",
+ "crossbeam-utils",
+]
+
+[[package]]
+name = "regex"
+version = "1.10.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619"
+dependencies = [
+ "aho-corasick",
+ "memchr",
+ "regex-automata",
+ "regex-syntax",
+]
+
+[[package]]
+name = "regex-automata"
+version = "0.4.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df"
+dependencies = [
+ "aho-corasick",
+ "memchr",
+ "regex-syntax",
+]
+
+[[package]]
+name = "regex-syntax"
+version = "0.8.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b"
+
+[[package]]
+name = "ryu"
+version = "1.0.18"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f"
+
+[[package]]
+name = "same-file"
+version = "1.0.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
+dependencies = [
+ "winapi-util",
+]
+
+[[package]]
+name = "serde"
+version = "1.0.209"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "99fce0ffe7310761ca6bf9faf5115afbc19688edd00171d81b1bb1b116c63e09"
+dependencies = [
+ "serde_derive",
+]
+
+[[package]]
+name = "serde_derive"
+version = "1.0.209"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a5831b979fd7b5439637af1752d535ff49f4860c0f341d1baeb6faf0f4242170"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "serde_json"
+version = "1.0.127"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8043c06d9f82bd7271361ed64f415fe5e12a77fdb52e573e7f06a516dea329ad"
+dependencies = [
+ "itoa",
+ "memchr",
+ "ryu",
+ "serde",
+]
+
+[[package]]
+name = "syn"
+version = "2.0.76"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "578e081a14e0cefc3279b0472138c513f37b41a08d5a3cca9b6e4e8ceb6cd525"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
+]
+
+[[package]]
+name = "textwrap"
+version = "0.16.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "23d434d3f8967a09480fb04132ebe0a3e088c173e6d0ee7897abbdf4eab0f8b9"
+
+[[package]]
+name = "thread_local"
+version = "1.1.7"
+dependencies = [
+ "cfg-if",
+ "criterion",
+ "once_cell",
+]
+
+[[package]]
+name = "tinytemplate"
+version = "1.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc"
+dependencies = [
+ "serde",
+ "serde_json",
+]
+
+[[package]]
+name = "unicode-ident"
+version = "1.0.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
+
+[[package]]
+name = "walkdir"
+version = "2.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b"
+dependencies = [
+ "same-file",
+ "winapi-util",
+]
+
+[[package]]
+name = "wasm-bindgen"
+version = "0.2.93"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5"
+dependencies = [
+ "cfg-if",
+ "once_cell",
+ "wasm-bindgen-macro",
+]
+
+[[package]]
+name = "wasm-bindgen-backend"
+version = "0.2.93"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b"
+dependencies = [
+ "bumpalo",
+ "log",
+ "once_cell",
+ "proc-macro2",
+ "quote",
+ "syn",
+ "wasm-bindgen-shared",
+]
+
+[[package]]
+name = "wasm-bindgen-macro"
+version = "0.2.93"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf"
+dependencies = [
+ "quote",
+ "wasm-bindgen-macro-support",
+]
+
+[[package]]
+name = "wasm-bindgen-macro-support"
+version = "0.2.93"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+ "wasm-bindgen-backend",
+ "wasm-bindgen-shared",
+]
+
+[[package]]
+name = "wasm-bindgen-shared"
+version = "0.2.93"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484"
+
+[[package]]
+name = "web-sys"
+version = "0.3.70"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "26fdeaafd9bd129f65e7c031593c24d62186301e0c72c8978fa1678be7d532c0"
+dependencies = [
+ "js-sys",
+ "wasm-bindgen",
+]
+
+[[package]]
+name = "winapi"
+version = "0.3.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
+dependencies = [
+ "winapi-i686-pc-windows-gnu",
+ "winapi-x86_64-pc-windows-gnu",
+]
+
+[[package]]
+name = "winapi-i686-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
+
+[[package]]
+name = "winapi-util"
+version = "0.1.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb"
+dependencies = [
+ "windows-sys",
+]
+
+[[package]]
+name = "winapi-x86_64-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
+
+[[package]]
+name = "windows-sys"
+version = "0.59.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b"
+dependencies = [
+ "windows-targets",
+]
+
+[[package]]
+name = "windows-targets"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973"
+dependencies = [
+ "windows_aarch64_gnullvm",
+ "windows_aarch64_msvc",
+ "windows_i686_gnu",
+ "windows_i686_gnullvm",
+ "windows_i686_msvc",
+ "windows_x86_64_gnu",
+ "windows_x86_64_gnullvm",
+ "windows_x86_64_msvc",
+]
+
+[[package]]
+name = "windows_aarch64_gnullvm"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"
+
+[[package]]
+name = "windows_aarch64_msvc"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"
+
+[[package]]
+name = "windows_i686_gnu"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b"
+
+[[package]]
+name = "windows_i686_gnullvm"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"
+
+[[package]]
+name = "windows_i686_msvc"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
+
+[[package]]
+name = "windows_x86_64_gnu"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
+
+[[package]]
+name = "windows_x86_64_gnullvm"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
+
+[[package]]
+name = "windows_x86_64_msvc"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
diff --git a/crates/thread_local/Cargo.toml b/crates/thread_local/Cargo.toml
new file mode 100644
index 0000000..d773a90
--- /dev/null
+++ b/crates/thread_local/Cargo.toml
@@ -0,0 +1,45 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2021"
+name = "thread_local"
+version = "1.1.7"
+authors = ["Amanieu d'Antras <amanieu@gmail.com>"]
+description = "Per-object thread-local storage"
+documentation = "https://docs.rs/thread_local/"
+readme = "README.md"
+keywords = [
+    "thread_local",
+    "concurrent",
+    "thread",
+]
+license = "MIT OR Apache-2.0"
+repository = "https://github.com/Amanieu/thread_local-rs"
+
+[[bench]]
+name = "thread_local"
+harness = false
+
+[dependencies.cfg-if]
+version = "1.0.0"
+
+[dependencies.once_cell]
+version = "1.5.2"
+
+[dev-dependencies.criterion]
+version = "0.4.0"
+
+[features]
+nightly = []
+
+[badges.travis-ci]
+repository = "Amanieu/thread_local-rs"
diff --git a/crates/thread_local/LICENSE b/crates/thread_local/LICENSE
new file mode 120000
index 0000000..6b579aa
--- /dev/null
+++ b/crates/thread_local/LICENSE
@@ -0,0 +1 @@
+LICENSE-APACHE
\ No newline at end of file
diff --git a/crates/thread_local/LICENSE-APACHE b/crates/thread_local/LICENSE-APACHE
new file mode 100644
index 0000000..16fe87b
--- /dev/null
+++ b/crates/thread_local/LICENSE-APACHE
@@ -0,0 +1,201 @@
+                              Apache License
+                        Version 2.0, January 2004
+                     http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+   "License" shall mean the terms and conditions for use, reproduction,
+   and distribution as defined by Sections 1 through 9 of this document.
+
+   "Licensor" shall mean the copyright owner or entity authorized by
+   the copyright owner that is granting the License.
+
+   "Legal Entity" shall mean the union of the acting entity and all
+   other entities that control, are controlled by, or are under common
+   control with that entity. For the purposes of this definition,
+   "control" means (i) the power, direct or indirect, to cause the
+   direction or management of such entity, whether by contract or
+   otherwise, or (ii) ownership of fifty percent (50%) or more of the
+   outstanding shares, or (iii) beneficial ownership of such entity.
+
+   "You" (or "Your") shall mean an individual or Legal Entity
+   exercising permissions granted by this License.
+
+   "Source" form shall mean the preferred form for making modifications,
+   including but not limited to software source code, documentation
+   source, and configuration files.
+
+   "Object" form shall mean any form resulting from mechanical
+   transformation or translation of a Source form, including but
+   not limited to compiled object code, generated documentation,
+   and conversions to other media types.
+
+   "Work" shall mean the work of authorship, whether in Source or
+   Object form, made available under the License, as indicated by a
+   copyright notice that is included in or attached to the work
+   (an example is provided in the Appendix below).
+
+   "Derivative Works" shall mean any work, whether in Source or Object
+   form, that is based on (or derived from) the Work and for which the
+   editorial revisions, annotations, elaborations, or other modifications
+   represent, as a whole, an original work of authorship. For the purposes
+   of this License, Derivative Works shall not include works that remain
+   separable from, or merely link (or bind by name) to the interfaces of,
+   the Work and Derivative Works thereof.
+
+   "Contribution" shall mean any work of authorship, including
+   the original version of the Work and any modifications or additions
+   to that Work or Derivative Works thereof, that is intentionally
+   submitted to Licensor for inclusion in the Work by the copyright owner
+   or by an individual or Legal Entity authorized to submit on behalf of
+   the copyright owner. For the purposes of this definition, "submitted"
+   means any form of electronic, verbal, or written communication sent
+   to the Licensor or its representatives, including but not limited to
+   communication on electronic mailing lists, source code control systems,
+   and issue tracking systems that are managed by, or on behalf of, the
+   Licensor for the purpose of discussing and improving the Work, but
+   excluding communication that is conspicuously marked or otherwise
+   designated in writing by the copyright owner as "Not a Contribution."
+
+   "Contributor" shall mean Licensor and any individual or Legal Entity
+   on behalf of whom a Contribution has been received by Licensor and
+   subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+   this License, each Contributor hereby grants to You a perpetual,
+   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+   copyright license to reproduce, prepare Derivative Works of,
+   publicly display, publicly perform, sublicense, and distribute the
+   Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+   this License, each Contributor hereby grants to You a perpetual,
+   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+   (except as stated in this section) patent license to make, have made,
+   use, offer to sell, sell, import, and otherwise transfer the Work,
+   where such license applies only to those patent claims licensable
+   by such Contributor that are necessarily infringed by their
+   Contribution(s) alone or by combination of their Contribution(s)
+   with the Work to which such Contribution(s) was submitted. If You
+   institute patent litigation against any entity (including a
+   cross-claim or counterclaim in a lawsuit) alleging that the Work
+   or a Contribution incorporated within the Work constitutes direct
+   or contributory patent infringement, then any patent licenses
+   granted to You under this License for that Work shall terminate
+   as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+   Work or Derivative Works thereof in any medium, with or without
+   modifications, and in Source or Object form, provided that You
+   meet the following conditions:
+
+   (a) You must give any other recipients of the Work or
+       Derivative Works a copy of this License; and
+
+   (b) You must cause any modified files to carry prominent notices
+       stating that You changed the files; and
+
+   (c) You must retain, in the Source form of any Derivative Works
+       that You distribute, all copyright, patent, trademark, and
+       attribution notices from the Source form of the Work,
+       excluding those notices that do not pertain to any part of
+       the Derivative Works; and
+
+   (d) If the Work includes a "NOTICE" text file as part of its
+       distribution, then any Derivative Works that You distribute must
+       include a readable copy of the attribution notices contained
+       within such NOTICE file, excluding those notices that do not
+       pertain to any part of the Derivative Works, in at least one
+       of the following places: within a NOTICE text file distributed
+       as part of the Derivative Works; within the Source form or
+       documentation, if provided along with the Derivative Works; or,
+       within a display generated by the Derivative Works, if and
+       wherever such third-party notices normally appear. The contents
+       of the NOTICE file are for informational purposes only and
+       do not modify the License. You may add Your own attribution
+       notices within Derivative Works that You distribute, alongside
+       or as an addendum to the NOTICE text from the Work, provided
+       that such additional attribution notices cannot be construed
+       as modifying the License.
+
+   You may add Your own copyright statement to Your modifications and
+   may provide additional or different license terms and conditions
+   for use, reproduction, or distribution of Your modifications, or
+   for any such Derivative Works as a whole, provided Your use,
+   reproduction, and distribution of the Work otherwise complies with
+   the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+   any Contribution intentionally submitted for inclusion in the Work
+   by You to the Licensor shall be under the terms and conditions of
+   this License, without any additional terms or conditions.
+   Notwithstanding the above, nothing herein shall supersede or modify
+   the terms of any separate license agreement you may have executed
+   with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+   names, trademarks, service marks, or product names of the Licensor,
+   except as required for reasonable and customary use in describing the
+   origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+   agreed to in writing, Licensor provides the Work (and each
+   Contributor provides its Contributions) on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+   implied, including, without limitation, any warranties or conditions
+   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+   PARTICULAR PURPOSE. You are solely responsible for determining the
+   appropriateness of using or redistributing the Work and assume any
+   risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+   whether in tort (including negligence), contract, or otherwise,
+   unless required by applicable law (such as deliberate and grossly
+   negligent acts) or agreed to in writing, shall any Contributor be
+   liable to You for damages, including any direct, indirect, special,
+   incidental, or consequential damages of any character arising as a
+   result of this License or out of the use or inability to use the
+   Work (including but not limited to damages for loss of goodwill,
+   work stoppage, computer failure or malfunction, or any and all
+   other commercial damages or losses), even if such Contributor
+   has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+   the Work or Derivative Works thereof, You may choose to offer,
+   and charge a fee for, acceptance of support, warranty, indemnity,
+   or other liability obligations and/or rights consistent with this
+   License. However, in accepting such obligations, You may act only
+   on Your own behalf and on Your sole responsibility, not on behalf
+   of any other Contributor, and only if You agree to indemnify,
+   defend, and hold each Contributor harmless for any liability
+   incurred by, or claims asserted against, such Contributor by reason
+   of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+   To apply the Apache License to your work, attach the following
+   boilerplate notice, with the fields enclosed by brackets "[]"
+   replaced with your own identifying information. (Don't include
+   the brackets!)  The text should be enclosed in the appropriate
+   comment syntax for the file format. We also recommend that a
+   file or class name and description of purpose be included on the
+   same "printed page" as the copyright notice for easier
+   identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+	http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/crates/thread_local/LICENSE-MIT b/crates/thread_local/LICENSE-MIT
new file mode 100644
index 0000000..40b8817
--- /dev/null
+++ b/crates/thread_local/LICENSE-MIT
@@ -0,0 +1,25 @@
+Copyright (c) 2016 The Rust Project Developers
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/crates/thread_local/METADATA b/crates/thread_local/METADATA
new file mode 100644
index 0000000..0466287
--- /dev/null
+++ b/crates/thread_local/METADATA
@@ -0,0 +1,23 @@
+# This project was upgraded with external_updater.
+# Usage: tools/external_updater/updater.sh update rust/crates/thread_local
+# For more info, check https://cs.android.com/android/platform/superproject/+/master:tools/external_updater/README.md
+
+name: "thread_local"
+description: "Per-object thread-local storage"
+third_party {
+  url {
+    type: HOMEPAGE
+    value: "https://crates.io/crates/thread_local"
+  }
+  url {
+    type: ARCHIVE
+    value: "https://static.crates.io/crates/thread_local/thread_local-1.1.7.crate"
+  }
+  version: "1.1.7"
+  license_type: NOTICE
+  last_upgrade_date {
+    year: 2023
+    month: 2
+    day: 17
+  }
+}
diff --git a/crates/thread_local/MODULE_LICENSE_APACHE2 b/crates/thread_local/MODULE_LICENSE_APACHE2
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/crates/thread_local/MODULE_LICENSE_APACHE2
diff --git a/crates/thread_local/README.md b/crates/thread_local/README.md
new file mode 100644
index 0000000..914451c
--- /dev/null
+++ b/crates/thread_local/README.md
@@ -0,0 +1,39 @@
+thread_local
+============
+
+[![Build Status](https://travis-ci.org/Amanieu/thread_local-rs.svg?branch=master)](https://travis-ci.org/Amanieu/thread_local-rs) [![Crates.io](https://img.shields.io/crates/v/thread_local.svg)](https://crates.io/crates/thread_local)
+
+This library provides the `ThreadLocal` type which allow a separate copy of an
+object to be used for each thread. This allows for per-object thread-local
+storage, unlike the standard library's `thread_local!` macro which only allows
+static thread-local storage.
+
+[Documentation](https://docs.rs/thread_local/)
+
+## Usage
+
+Add this to your `Cargo.toml`:
+
+```toml
+[dependencies]
+thread_local = "1.1"
+```
+
+## Minimum Rust version
+
+This crate's minimum supported Rust version (MSRV) is 1.59.0.
+
+## License
+
+Licensed under either of
+
+ * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
+ * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
+
+at your option.
+
+### Contribution
+
+Unless you explicitly state otherwise, any contribution intentionally submitted
+for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any
+additional terms or conditions.
diff --git a/crates/thread_local/TEST_MAPPING b/crates/thread_local/TEST_MAPPING
new file mode 100644
index 0000000..7974396
--- /dev/null
+++ b/crates/thread_local/TEST_MAPPING
@@ -0,0 +1,13 @@
+// Generated by update_crate_tests.py for tests that depend on this crate.
+{
+  "presubmit": [
+    {
+      "name": "thread_local_test_src_lib"
+    }
+  ],
+  "presubmit-rust": [
+    {
+      "name": "thread_local_test_src_lib"
+    }
+  ]
+}
diff --git a/crates/thread_local/benches/thread_local.rs b/crates/thread_local/benches/thread_local.rs
new file mode 100644
index 0000000..dd4716d
--- /dev/null
+++ b/crates/thread_local/benches/thread_local.rs
@@ -0,0 +1,25 @@
+use criterion::{black_box, BatchSize};
+
+use thread_local::ThreadLocal;
+
+fn main() {
+    let mut c = criterion::Criterion::default().configure_from_args();
+
+    c.bench_function("get", |b| {
+        let local = ThreadLocal::new();
+        local.get_or(|| Box::new(0));
+        b.iter(|| {
+            black_box(local.get());
+        });
+    });
+
+    c.bench_function("insert", |b| {
+        b.iter_batched_ref(
+            ThreadLocal::new,
+            |local| {
+                black_box(local.get_or(|| 0));
+            },
+            BatchSize::SmallInput,
+        )
+    });
+}
diff --git a/crates/thread_local/cargo_embargo.json b/crates/thread_local/cargo_embargo.json
new file mode 100644
index 0000000..c8842d1
--- /dev/null
+++ b/crates/thread_local/cargo_embargo.json
@@ -0,0 +1,4 @@
+{
+  "run_cargo": false,
+  "tests": true
+}
diff --git a/crates/thread_local/src/cached.rs b/crates/thread_local/src/cached.rs
new file mode 100644
index 0000000..16f6516
--- /dev/null
+++ b/crates/thread_local/src/cached.rs
@@ -0,0 +1,161 @@
+#![allow(deprecated)]
+
+use super::{IntoIter, IterMut, ThreadLocal};
+use std::fmt;
+use std::panic::UnwindSafe;
+use std::usize;
+
+/// Wrapper around [`ThreadLocal`].
+///
+/// This used to add a fast path for a single thread, however that has been
+/// obsoleted by performance improvements to [`ThreadLocal`] itself.
+#[deprecated(since = "1.1.0", note = "Use `ThreadLocal` instead")]
+pub struct CachedThreadLocal<T: Send> {
+    inner: ThreadLocal<T>,
+}
+
+impl<T: Send> Default for CachedThreadLocal<T> {
+    fn default() -> CachedThreadLocal<T> {
+        CachedThreadLocal::new()
+    }
+}
+
+impl<T: Send> CachedThreadLocal<T> {
+    /// Creates a new empty `CachedThreadLocal`.
+    #[inline]
+    pub fn new() -> CachedThreadLocal<T> {
+        CachedThreadLocal {
+            inner: ThreadLocal::new(),
+        }
+    }
+
+    /// Returns the element for the current thread, if it exists.
+    #[inline]
+    pub fn get(&self) -> Option<&T> {
+        self.inner.get()
+    }
+
+    /// Returns the element for the current thread, or creates it if it doesn't
+    /// exist.
+    #[inline]
+    pub fn get_or<F>(&self, create: F) -> &T
+    where
+        F: FnOnce() -> T,
+    {
+        self.inner.get_or(create)
+    }
+
+    /// Returns the element for the current thread, or creates it if it doesn't
+    /// exist. If `create` fails, that error is returned and no element is
+    /// added.
+    #[inline]
+    pub fn get_or_try<F, E>(&self, create: F) -> Result<&T, E>
+    where
+        F: FnOnce() -> Result<T, E>,
+    {
+        self.inner.get_or_try(create)
+    }
+
+    /// Returns a mutable iterator over the local values of all threads.
+    ///
+    /// Since this call borrows the `ThreadLocal` mutably, this operation can
+    /// be done safely---the mutable borrow statically guarantees no other
+    /// threads are currently accessing their associated values.
+    #[inline]
+    pub fn iter_mut(&mut self) -> CachedIterMut<T> {
+        CachedIterMut {
+            inner: self.inner.iter_mut(),
+        }
+    }
+
+    /// Removes all thread-specific values from the `ThreadLocal`, effectively
+    /// reseting it to its original state.
+    ///
+    /// Since this call borrows the `ThreadLocal` mutably, this operation can
+    /// be done safely---the mutable borrow statically guarantees no other
+    /// threads are currently accessing their associated values.
+    #[inline]
+    pub fn clear(&mut self) {
+        self.inner.clear();
+    }
+}
+
+impl<T: Send> IntoIterator for CachedThreadLocal<T> {
+    type Item = T;
+    type IntoIter = CachedIntoIter<T>;
+
+    fn into_iter(self) -> CachedIntoIter<T> {
+        CachedIntoIter {
+            inner: self.inner.into_iter(),
+        }
+    }
+}
+
+impl<'a, T: Send + 'a> IntoIterator for &'a mut CachedThreadLocal<T> {
+    type Item = &'a mut T;
+    type IntoIter = CachedIterMut<'a, T>;
+
+    fn into_iter(self) -> CachedIterMut<'a, T> {
+        self.iter_mut()
+    }
+}
+
+impl<T: Send + Default> CachedThreadLocal<T> {
+    /// Returns the element for the current thread, or creates a default one if
+    /// it doesn't exist.
+    pub fn get_or_default(&self) -> &T {
+        self.get_or(T::default)
+    }
+}
+
+impl<T: Send + fmt::Debug> fmt::Debug for CachedThreadLocal<T> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(f, "ThreadLocal {{ local_data: {:?} }}", self.get())
+    }
+}
+
+impl<T: Send + UnwindSafe> UnwindSafe for CachedThreadLocal<T> {}
+
+/// Mutable iterator over the contents of a `CachedThreadLocal`.
+#[deprecated(since = "1.1.0", note = "Use `IterMut` instead")]
+pub struct CachedIterMut<'a, T: Send + 'a> {
+    inner: IterMut<'a, T>,
+}
+
+impl<'a, T: Send + 'a> Iterator for CachedIterMut<'a, T> {
+    type Item = &'a mut T;
+
+    #[inline]
+    fn next(&mut self) -> Option<&'a mut T> {
+        self.inner.next()
+    }
+
+    #[inline]
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        self.inner.size_hint()
+    }
+}
+
+impl<'a, T: Send + 'a> ExactSizeIterator for CachedIterMut<'a, T> {}
+
+/// An iterator that moves out of a `CachedThreadLocal`.
+#[deprecated(since = "1.1.0", note = "Use `IntoIter` instead")]
+pub struct CachedIntoIter<T: Send> {
+    inner: IntoIter<T>,
+}
+
+impl<T: Send> Iterator for CachedIntoIter<T> {
+    type Item = T;
+
+    #[inline]
+    fn next(&mut self) -> Option<T> {
+        self.inner.next()
+    }
+
+    #[inline]
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        self.inner.size_hint()
+    }
+}
+
+impl<T: Send> ExactSizeIterator for CachedIntoIter<T> {}
diff --git a/crates/thread_local/src/lib.rs b/crates/thread_local/src/lib.rs
new file mode 100644
index 0000000..12d25f6
--- /dev/null
+++ b/crates/thread_local/src/lib.rs
@@ -0,0 +1,646 @@
+// Copyright 2017 Amanieu d'Antras
+//
+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
+// http://opensource.org/licenses/MIT>, at your option. This file may not be
+// copied, modified, or distributed except according to those terms.
+
+//! Per-object thread-local storage
+//!
+//! This library provides the `ThreadLocal` type which allows a separate copy of
+//! an object to be used for each thread. This allows for per-object
+//! thread-local storage, unlike the standard library's `thread_local!` macro
+//! which only allows static thread-local storage.
+//!
+//! Per-thread objects are not destroyed when a thread exits. Instead, objects
+//! are only destroyed when the `ThreadLocal` containing them is destroyed.
+//!
+//! You can also iterate over the thread-local values of all thread in a
+//! `ThreadLocal` object using the `iter_mut` and `into_iter` methods. This can
+//! only be done if you have mutable access to the `ThreadLocal` object, which
+//! guarantees that you are the only thread currently accessing it.
+//!
+//! Note that since thread IDs are recycled when a thread exits, it is possible
+//! for one thread to retrieve the object of another thread. Since this can only
+//! occur after a thread has exited this does not lead to any race conditions.
+//!
+//! # Examples
+//!
+//! Basic usage of `ThreadLocal`:
+//!
+//! ```rust
+//! use thread_local::ThreadLocal;
+//! let tls: ThreadLocal<u32> = ThreadLocal::new();
+//! assert_eq!(tls.get(), None);
+//! assert_eq!(tls.get_or(|| 5), &5);
+//! assert_eq!(tls.get(), Some(&5));
+//! ```
+//!
+//! Combining thread-local values into a single result:
+//!
+//! ```rust
+//! use thread_local::ThreadLocal;
+//! use std::sync::Arc;
+//! use std::cell::Cell;
+//! use std::thread;
+//!
+//! let tls = Arc::new(ThreadLocal::new());
+//!
+//! // Create a bunch of threads to do stuff
+//! for _ in 0..5 {
+//!     let tls2 = tls.clone();
+//!     thread::spawn(move || {
+//!         // Increment a counter to count some event...
+//!         let cell = tls2.get_or(|| Cell::new(0));
+//!         cell.set(cell.get() + 1);
+//!     }).join().unwrap();
+//! }
+//!
+//! // Once all threads are done, collect the counter values and return the
+//! // sum of all thread-local counter values.
+//! let tls = Arc::try_unwrap(tls).unwrap();
+//! let total = tls.into_iter().fold(0, |x, y| x + y.get());
+//! assert_eq!(total, 5);
+//! ```
+
+#![warn(missing_docs)]
+#![allow(clippy::mutex_atomic)]
+#![cfg_attr(feature = "nightly", feature(thread_local))]
+
+mod cached;
+mod thread_id;
+mod unreachable;
+
+#[allow(deprecated)]
+pub use cached::{CachedIntoIter, CachedIterMut, CachedThreadLocal};
+
+use std::cell::UnsafeCell;
+use std::fmt;
+use std::iter::FusedIterator;
+use std::mem;
+use std::mem::MaybeUninit;
+use std::panic::UnwindSafe;
+use std::ptr;
+use std::sync::atomic::{AtomicBool, AtomicPtr, AtomicUsize, Ordering};
+use thread_id::Thread;
+use unreachable::UncheckedResultExt;
+
+// Use usize::BITS once it has stabilized and the MSRV has been bumped.
+#[cfg(target_pointer_width = "16")]
+const POINTER_WIDTH: u8 = 16;
+#[cfg(target_pointer_width = "32")]
+const POINTER_WIDTH: u8 = 32;
+#[cfg(target_pointer_width = "64")]
+const POINTER_WIDTH: u8 = 64;
+
+/// The total number of buckets stored in each thread local.
+const BUCKETS: usize = (POINTER_WIDTH + 1) as usize;
+
+/// Thread-local variable wrapper
+///
+/// See the [module-level documentation](index.html) for more.
+pub struct ThreadLocal<T: Send> {
+    /// The buckets in the thread local. The nth bucket contains `2^(n-1)`
+    /// elements. Each bucket is lazily allocated.
+    buckets: [AtomicPtr<Entry<T>>; BUCKETS],
+
+    /// The number of values in the thread local. This can be less than the real number of values,
+    /// but is never more.
+    values: AtomicUsize,
+}
+
+struct Entry<T> {
+    present: AtomicBool,
+    value: UnsafeCell<MaybeUninit<T>>,
+}
+
+impl<T> Drop for Entry<T> {
+    fn drop(&mut self) {
+        unsafe {
+            if *self.present.get_mut() {
+                ptr::drop_in_place((*self.value.get()).as_mut_ptr());
+            }
+        }
+    }
+}
+
+// ThreadLocal is always Sync, even if T isn't
+unsafe impl<T: Send> Sync for ThreadLocal<T> {}
+
+impl<T: Send> Default for ThreadLocal<T> {
+    fn default() -> ThreadLocal<T> {
+        ThreadLocal::new()
+    }
+}
+
+impl<T: Send> Drop for ThreadLocal<T> {
+    fn drop(&mut self) {
+        let mut bucket_size = 1;
+
+        // Free each non-null bucket
+        for (i, bucket) in self.buckets.iter_mut().enumerate() {
+            let bucket_ptr = *bucket.get_mut();
+
+            let this_bucket_size = bucket_size;
+            if i != 0 {
+                bucket_size <<= 1;
+            }
+
+            if bucket_ptr.is_null() {
+                continue;
+            }
+
+            unsafe { deallocate_bucket(bucket_ptr, this_bucket_size) };
+        }
+    }
+}
+
+impl<T: Send> ThreadLocal<T> {
+    /// Creates a new empty `ThreadLocal`.
+    pub fn new() -> ThreadLocal<T> {
+        Self::with_capacity(2)
+    }
+
+    /// Creates a new `ThreadLocal` with an initial capacity. If less than the capacity threads
+    /// access the thread local it will never reallocate. The capacity may be rounded up to the
+    /// nearest power of two.
+    pub fn with_capacity(capacity: usize) -> ThreadLocal<T> {
+        let allocated_buckets = capacity
+            .checked_sub(1)
+            .map(|c| usize::from(POINTER_WIDTH) - (c.leading_zeros() as usize) + 1)
+            .unwrap_or(0);
+
+        let mut buckets = [ptr::null_mut(); BUCKETS];
+        let mut bucket_size = 1;
+        for (i, bucket) in buckets[..allocated_buckets].iter_mut().enumerate() {
+            *bucket = allocate_bucket::<T>(bucket_size);
+
+            if i != 0 {
+                bucket_size <<= 1;
+            }
+        }
+
+        ThreadLocal {
+            // Safety: AtomicPtr has the same representation as a pointer and arrays have the same
+            // representation as a sequence of their inner type.
+            buckets: unsafe { mem::transmute(buckets) },
+            values: AtomicUsize::new(0),
+        }
+    }
+
+    /// Returns the element for the current thread, if it exists.
+    pub fn get(&self) -> Option<&T> {
+        self.get_inner(thread_id::get())
+    }
+
+    /// Returns the element for the current thread, or creates it if it doesn't
+    /// exist.
+    pub fn get_or<F>(&self, create: F) -> &T
+    where
+        F: FnOnce() -> T,
+    {
+        unsafe {
+            self.get_or_try(|| Ok::<T, ()>(create()))
+                .unchecked_unwrap_ok()
+        }
+    }
+
+    /// Returns the element for the current thread, or creates it if it doesn't
+    /// exist. If `create` fails, that error is returned and no element is
+    /// added.
+    pub fn get_or_try<F, E>(&self, create: F) -> Result<&T, E>
+    where
+        F: FnOnce() -> Result<T, E>,
+    {
+        let thread = thread_id::get();
+        if let Some(val) = self.get_inner(thread) {
+            return Ok(val);
+        }
+
+        Ok(self.insert(create()?))
+    }
+
+    fn get_inner(&self, thread: Thread) -> Option<&T> {
+        let bucket_ptr =
+            unsafe { self.buckets.get_unchecked(thread.bucket) }.load(Ordering::Acquire);
+        if bucket_ptr.is_null() {
+            return None;
+        }
+        unsafe {
+            let entry = &*bucket_ptr.add(thread.index);
+            // Read without atomic operations as only this thread can set the value.
+            if (&entry.present as *const _ as *const bool).read() {
+                Some(&*(&*entry.value.get()).as_ptr())
+            } else {
+                None
+            }
+        }
+    }
+
+    #[cold]
+    fn insert(&self, data: T) -> &T {
+        let thread = thread_id::get();
+        let bucket_atomic_ptr = unsafe { self.buckets.get_unchecked(thread.bucket) };
+        let bucket_ptr: *const _ = bucket_atomic_ptr.load(Ordering::Acquire);
+
+        // If the bucket doesn't already exist, we need to allocate it
+        let bucket_ptr = if bucket_ptr.is_null() {
+            let new_bucket = allocate_bucket(thread.bucket_size);
+
+            match bucket_atomic_ptr.compare_exchange(
+                ptr::null_mut(),
+                new_bucket,
+                Ordering::AcqRel,
+                Ordering::Acquire,
+            ) {
+                Ok(_) => new_bucket,
+                // If the bucket value changed (from null), that means
+                // another thread stored a new bucket before we could,
+                // and we can free our bucket and use that one instead
+                Err(bucket_ptr) => {
+                    unsafe { deallocate_bucket(new_bucket, thread.bucket_size) }
+                    bucket_ptr
+                }
+            }
+        } else {
+            bucket_ptr
+        };
+
+        // Insert the new element into the bucket
+        let entry = unsafe { &*bucket_ptr.add(thread.index) };
+        let value_ptr = entry.value.get();
+        unsafe { value_ptr.write(MaybeUninit::new(data)) };
+        entry.present.store(true, Ordering::Release);
+
+        self.values.fetch_add(1, Ordering::Release);
+
+        unsafe { &*(&*value_ptr).as_ptr() }
+    }
+
+    /// Returns an iterator over the local values of all threads in unspecified
+    /// order.
+    ///
+    /// This call can be done safely, as `T` is required to implement [`Sync`].
+    pub fn iter(&self) -> Iter<'_, T>
+    where
+        T: Sync,
+    {
+        Iter {
+            thread_local: self,
+            raw: RawIter::new(),
+        }
+    }
+
+    /// Returns a mutable iterator over the local values of all threads in
+    /// unspecified order.
+    ///
+    /// Since this call borrows the `ThreadLocal` mutably, this operation can
+    /// be done safely---the mutable borrow statically guarantees no other
+    /// threads are currently accessing their associated values.
+    pub fn iter_mut(&mut self) -> IterMut<T> {
+        IterMut {
+            thread_local: self,
+            raw: RawIter::new(),
+        }
+    }
+
+    /// Removes all thread-specific values from the `ThreadLocal`, effectively
+    /// reseting it to its original state.
+    ///
+    /// Since this call borrows the `ThreadLocal` mutably, this operation can
+    /// be done safely---the mutable borrow statically guarantees no other
+    /// threads are currently accessing their associated values.
+    pub fn clear(&mut self) {
+        *self = ThreadLocal::new();
+    }
+}
+
+impl<T: Send> IntoIterator for ThreadLocal<T> {
+    type Item = T;
+    type IntoIter = IntoIter<T>;
+
+    fn into_iter(self) -> IntoIter<T> {
+        IntoIter {
+            thread_local: self,
+            raw: RawIter::new(),
+        }
+    }
+}
+
+impl<'a, T: Send + Sync> IntoIterator for &'a ThreadLocal<T> {
+    type Item = &'a T;
+    type IntoIter = Iter<'a, T>;
+
+    fn into_iter(self) -> Self::IntoIter {
+        self.iter()
+    }
+}
+
+impl<'a, T: Send> IntoIterator for &'a mut ThreadLocal<T> {
+    type Item = &'a mut T;
+    type IntoIter = IterMut<'a, T>;
+
+    fn into_iter(self) -> IterMut<'a, T> {
+        self.iter_mut()
+    }
+}
+
+impl<T: Send + Default> ThreadLocal<T> {
+    /// Returns the element for the current thread, or creates a default one if
+    /// it doesn't exist.
+    pub fn get_or_default(&self) -> &T {
+        self.get_or(Default::default)
+    }
+}
+
+impl<T: Send + fmt::Debug> fmt::Debug for ThreadLocal<T> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(f, "ThreadLocal {{ local_data: {:?} }}", self.get())
+    }
+}
+
+impl<T: Send + UnwindSafe> UnwindSafe for ThreadLocal<T> {}
+
+#[derive(Debug)]
+struct RawIter {
+    yielded: usize,
+    bucket: usize,
+    bucket_size: usize,
+    index: usize,
+}
+impl RawIter {
+    #[inline]
+    fn new() -> Self {
+        Self {
+            yielded: 0,
+            bucket: 0,
+            bucket_size: 1,
+            index: 0,
+        }
+    }
+
+    fn next<'a, T: Send + Sync>(&mut self, thread_local: &'a ThreadLocal<T>) -> Option<&'a T> {
+        while self.bucket < BUCKETS {
+            let bucket = unsafe { thread_local.buckets.get_unchecked(self.bucket) };
+            let bucket = bucket.load(Ordering::Acquire);
+
+            if !bucket.is_null() {
+                while self.index < self.bucket_size {
+                    let entry = unsafe { &*bucket.add(self.index) };
+                    self.index += 1;
+                    if entry.present.load(Ordering::Acquire) {
+                        self.yielded += 1;
+                        return Some(unsafe { &*(&*entry.value.get()).as_ptr() });
+                    }
+                }
+            }
+
+            self.next_bucket();
+        }
+        None
+    }
+    fn next_mut<'a, T: Send>(
+        &mut self,
+        thread_local: &'a mut ThreadLocal<T>,
+    ) -> Option<&'a mut Entry<T>> {
+        if *thread_local.values.get_mut() == self.yielded {
+            return None;
+        }
+
+        loop {
+            let bucket = unsafe { thread_local.buckets.get_unchecked_mut(self.bucket) };
+            let bucket = *bucket.get_mut();
+
+            if !bucket.is_null() {
+                while self.index < self.bucket_size {
+                    let entry = unsafe { &mut *bucket.add(self.index) };
+                    self.index += 1;
+                    if *entry.present.get_mut() {
+                        self.yielded += 1;
+                        return Some(entry);
+                    }
+                }
+            }
+
+            self.next_bucket();
+        }
+    }
+
+    #[inline]
+    fn next_bucket(&mut self) {
+        if self.bucket != 0 {
+            self.bucket_size <<= 1;
+        }
+        self.bucket += 1;
+        self.index = 0;
+    }
+
+    fn size_hint<T: Send>(&self, thread_local: &ThreadLocal<T>) -> (usize, Option<usize>) {
+        let total = thread_local.values.load(Ordering::Acquire);
+        (total - self.yielded, None)
+    }
+    fn size_hint_frozen<T: Send>(&self, thread_local: &ThreadLocal<T>) -> (usize, Option<usize>) {
+        let total = unsafe { *(&thread_local.values as *const AtomicUsize as *const usize) };
+        let remaining = total - self.yielded;
+        (remaining, Some(remaining))
+    }
+}
+
+/// Iterator over the contents of a `ThreadLocal`.
+#[derive(Debug)]
+pub struct Iter<'a, T: Send + Sync> {
+    thread_local: &'a ThreadLocal<T>,
+    raw: RawIter,
+}
+
+impl<'a, T: Send + Sync> Iterator for Iter<'a, T> {
+    type Item = &'a T;
+    fn next(&mut self) -> Option<Self::Item> {
+        self.raw.next(self.thread_local)
+    }
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        self.raw.size_hint(self.thread_local)
+    }
+}
+impl<T: Send + Sync> FusedIterator for Iter<'_, T> {}
+
+/// Mutable iterator over the contents of a `ThreadLocal`.
+pub struct IterMut<'a, T: Send> {
+    thread_local: &'a mut ThreadLocal<T>,
+    raw: RawIter,
+}
+
+impl<'a, T: Send> Iterator for IterMut<'a, T> {
+    type Item = &'a mut T;
+    fn next(&mut self) -> Option<&'a mut T> {
+        self.raw
+            .next_mut(self.thread_local)
+            .map(|entry| unsafe { &mut *(&mut *entry.value.get()).as_mut_ptr() })
+    }
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        self.raw.size_hint_frozen(self.thread_local)
+    }
+}
+
+impl<T: Send> ExactSizeIterator for IterMut<'_, T> {}
+impl<T: Send> FusedIterator for IterMut<'_, T> {}
+
+// Manual impl so we don't call Debug on the ThreadLocal, as doing so would create a reference to
+// this thread's value that potentially aliases with a mutable reference we have given out.
+impl<'a, T: Send + fmt::Debug> fmt::Debug for IterMut<'a, T> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        f.debug_struct("IterMut").field("raw", &self.raw).finish()
+    }
+}
+
+/// An iterator that moves out of a `ThreadLocal`.
+#[derive(Debug)]
+pub struct IntoIter<T: Send> {
+    thread_local: ThreadLocal<T>,
+    raw: RawIter,
+}
+
+impl<T: Send> Iterator for IntoIter<T> {
+    type Item = T;
+    fn next(&mut self) -> Option<T> {
+        self.raw.next_mut(&mut self.thread_local).map(|entry| {
+            *entry.present.get_mut() = false;
+            unsafe {
+                std::mem::replace(&mut *entry.value.get(), MaybeUninit::uninit()).assume_init()
+            }
+        })
+    }
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        self.raw.size_hint_frozen(&self.thread_local)
+    }
+}
+
+impl<T: Send> ExactSizeIterator for IntoIter<T> {}
+impl<T: Send> FusedIterator for IntoIter<T> {}
+
+fn allocate_bucket<T>(size: usize) -> *mut Entry<T> {
+    Box::into_raw(
+        (0..size)
+            .map(|_| Entry::<T> {
+                present: AtomicBool::new(false),
+                value: UnsafeCell::new(MaybeUninit::uninit()),
+            })
+            .collect(),
+    ) as *mut _
+}
+
+unsafe fn deallocate_bucket<T>(bucket: *mut Entry<T>, size: usize) {
+    let _ = Box::from_raw(std::slice::from_raw_parts_mut(bucket, size));
+}
+
+#[cfg(test)]
+mod tests {
+    use super::ThreadLocal;
+    use std::cell::RefCell;
+    use std::sync::atomic::AtomicUsize;
+    use std::sync::atomic::Ordering::Relaxed;
+    use std::sync::Arc;
+    use std::thread;
+
+    fn make_create() -> Arc<dyn Fn() -> usize + Send + Sync> {
+        let count = AtomicUsize::new(0);
+        Arc::new(move || count.fetch_add(1, Relaxed))
+    }
+
+    #[test]
+    fn same_thread() {
+        let create = make_create();
+        let mut tls = ThreadLocal::new();
+        assert_eq!(None, tls.get());
+        assert_eq!("ThreadLocal { local_data: None }", format!("{:?}", &tls));
+        assert_eq!(0, *tls.get_or(|| create()));
+        assert_eq!(Some(&0), tls.get());
+        assert_eq!(0, *tls.get_or(|| create()));
+        assert_eq!(Some(&0), tls.get());
+        assert_eq!(0, *tls.get_or(|| create()));
+        assert_eq!(Some(&0), tls.get());
+        assert_eq!("ThreadLocal { local_data: Some(0) }", format!("{:?}", &tls));
+        tls.clear();
+        assert_eq!(None, tls.get());
+    }
+
+    #[test]
+    fn different_thread() {
+        let create = make_create();
+        let tls = Arc::new(ThreadLocal::new());
+        assert_eq!(None, tls.get());
+        assert_eq!(0, *tls.get_or(|| create()));
+        assert_eq!(Some(&0), tls.get());
+
+        let tls2 = tls.clone();
+        let create2 = create.clone();
+        thread::spawn(move || {
+            assert_eq!(None, tls2.get());
+            assert_eq!(1, *tls2.get_or(|| create2()));
+            assert_eq!(Some(&1), tls2.get());
+        })
+        .join()
+        .unwrap();
+
+        assert_eq!(Some(&0), tls.get());
+        assert_eq!(0, *tls.get_or(|| create()));
+    }
+
+    #[test]
+    fn iter() {
+        let tls = Arc::new(ThreadLocal::new());
+        tls.get_or(|| Box::new(1));
+
+        let tls2 = tls.clone();
+        thread::spawn(move || {
+            tls2.get_or(|| Box::new(2));
+            let tls3 = tls2.clone();
+            thread::spawn(move || {
+                tls3.get_or(|| Box::new(3));
+            })
+            .join()
+            .unwrap();
+            drop(tls2);
+        })
+        .join()
+        .unwrap();
+
+        let mut tls = Arc::try_unwrap(tls).unwrap();
+
+        let mut v = tls.iter().map(|x| **x).collect::<Vec<i32>>();
+        v.sort_unstable();
+        assert_eq!(vec![1, 2, 3], v);
+
+        let mut v = tls.iter_mut().map(|x| **x).collect::<Vec<i32>>();
+        v.sort_unstable();
+        assert_eq!(vec![1, 2, 3], v);
+
+        let mut v = tls.into_iter().map(|x| *x).collect::<Vec<i32>>();
+        v.sort_unstable();
+        assert_eq!(vec![1, 2, 3], v);
+    }
+
+    #[test]
+    fn test_drop() {
+        let local = ThreadLocal::new();
+        struct Dropped(Arc<AtomicUsize>);
+        impl Drop for Dropped {
+            fn drop(&mut self) {
+                self.0.fetch_add(1, Relaxed);
+            }
+        }
+
+        let dropped = Arc::new(AtomicUsize::new(0));
+        local.get_or(|| Dropped(dropped.clone()));
+        assert_eq!(dropped.load(Relaxed), 0);
+        drop(local);
+        assert_eq!(dropped.load(Relaxed), 1);
+    }
+
+    #[test]
+    fn is_sync() {
+        fn foo<T: Sync>() {}
+        foo::<ThreadLocal<String>>();
+        foo::<ThreadLocal<RefCell<String>>>();
+    }
+}
diff --git a/crates/thread_local/src/thread_id.rs b/crates/thread_local/src/thread_id.rs
new file mode 100644
index 0000000..aa4f2d6
--- /dev/null
+++ b/crates/thread_local/src/thread_id.rs
@@ -0,0 +1,207 @@
+// Copyright 2017 Amanieu d'Antras
+//
+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
+// http://opensource.org/licenses/MIT>, at your option. This file may not be
+// copied, modified, or distributed except according to those terms.
+
+use crate::POINTER_WIDTH;
+use once_cell::sync::Lazy;
+use std::cell::Cell;
+use std::cmp::Reverse;
+use std::collections::BinaryHeap;
+use std::sync::Mutex;
+use std::usize;
+
+/// Thread ID manager which allocates thread IDs. It attempts to aggressively
+/// reuse thread IDs where possible to avoid cases where a ThreadLocal grows
+/// indefinitely when it is used by many short-lived threads.
+struct ThreadIdManager {
+    free_from: usize,
+    free_list: BinaryHeap<Reverse<usize>>,
+}
+impl ThreadIdManager {
+    fn new() -> ThreadIdManager {
+        ThreadIdManager {
+            free_from: 0,
+            free_list: BinaryHeap::new(),
+        }
+    }
+    fn alloc(&mut self) -> usize {
+        if let Some(id) = self.free_list.pop() {
+            id.0
+        } else {
+            let id = self.free_from;
+            self.free_from = self
+                .free_from
+                .checked_add(1)
+                .expect("Ran out of thread IDs");
+            id
+        }
+    }
+    fn free(&mut self, id: usize) {
+        self.free_list.push(Reverse(id));
+    }
+}
+static THREAD_ID_MANAGER: Lazy<Mutex<ThreadIdManager>> =
+    Lazy::new(|| Mutex::new(ThreadIdManager::new()));
+
+/// Data which is unique to the current thread while it is running.
+/// A thread ID may be reused after a thread exits.
+#[derive(Clone, Copy)]
+pub(crate) struct Thread {
+    /// The thread ID obtained from the thread ID manager.
+    pub(crate) id: usize,
+    /// The bucket this thread's local storage will be in.
+    pub(crate) bucket: usize,
+    /// The size of the bucket this thread's local storage will be in.
+    pub(crate) bucket_size: usize,
+    /// The index into the bucket this thread's local storage is in.
+    pub(crate) index: usize,
+}
+impl Thread {
+    fn new(id: usize) -> Thread {
+        let bucket = usize::from(POINTER_WIDTH) - id.leading_zeros() as usize;
+        let bucket_size = 1 << bucket.saturating_sub(1);
+        let index = if id != 0 { id ^ bucket_size } else { 0 };
+
+        Thread {
+            id,
+            bucket,
+            bucket_size,
+            index,
+        }
+    }
+}
+
+cfg_if::cfg_if! {
+    if #[cfg(feature = "nightly")] {
+        // This is split into 2 thread-local variables so that we can check whether the
+        // thread is initialized without having to register a thread-local destructor.
+        //
+        // This makes the fast path smaller.
+        #[thread_local]
+        static mut THREAD: Option<Thread> = None;
+        thread_local! { static THREAD_GUARD: ThreadGuard = const { ThreadGuard { id: Cell::new(0) } }; }
+
+        // Guard to ensure the thread ID is released on thread exit.
+        struct ThreadGuard {
+            // We keep a copy of the thread ID in the ThreadGuard: we can't
+            // reliably access THREAD in our Drop impl due to the unpredictable
+            // order of TLS destructors.
+            id: Cell<usize>,
+        }
+
+        impl Drop for ThreadGuard {
+            fn drop(&mut self) {
+                // Release the thread ID. Any further accesses to the thread ID
+                // will go through get_slow which will either panic or
+                // initialize a new ThreadGuard.
+                unsafe {
+                    THREAD = None;
+                }
+                THREAD_ID_MANAGER.lock().unwrap().free(self.id.get());
+            }
+        }
+
+        /// Returns a thread ID for the current thread, allocating one if needed.
+        #[inline]
+        pub(crate) fn get() -> Thread {
+            if let Some(thread) = unsafe { THREAD } {
+                thread
+            } else {
+                get_slow()
+            }
+        }
+
+        /// Out-of-line slow path for allocating a thread ID.
+        #[cold]
+         fn get_slow() -> Thread {
+            let new = Thread::new(THREAD_ID_MANAGER.lock().unwrap().alloc());
+            unsafe {
+                THREAD = Some(new);
+            }
+            THREAD_GUARD.with(|guard| guard.id.set(new.id));
+            new
+        }
+    } else {
+        // This is split into 2 thread-local variables so that we can check whether the
+        // thread is initialized without having to register a thread-local destructor.
+        //
+        // This makes the fast path smaller.
+        thread_local! { static THREAD: Cell<Option<Thread>> = const { Cell::new(None) }; }
+        thread_local! { static THREAD_GUARD: ThreadGuard = const { ThreadGuard { id: Cell::new(0) } }; }
+
+        // Guard to ensure the thread ID is released on thread exit.
+        struct ThreadGuard {
+            // We keep a copy of the thread ID in the ThreadGuard: we can't
+            // reliably access THREAD in our Drop impl due to the unpredictable
+            // order of TLS destructors.
+            id: Cell<usize>,
+        }
+
+        impl Drop for ThreadGuard {
+            fn drop(&mut self) {
+                // Release the thread ID. Any further accesses to the thread ID
+                // will go through get_slow which will either panic or
+                // initialize a new ThreadGuard.
+                let _ = THREAD.try_with(|thread| thread.set(None));
+                THREAD_ID_MANAGER.lock().unwrap().free(self.id.get());
+            }
+        }
+
+        /// Returns a thread ID for the current thread, allocating one if needed.
+        #[inline]
+        pub(crate) fn get() -> Thread {
+            THREAD.with(|thread| {
+                if let Some(thread) = thread.get() {
+                    thread
+                } else {
+                    get_slow(thread)
+                }
+            })
+        }
+
+        /// Out-of-line slow path for allocating a thread ID.
+        #[cold]
+        fn get_slow(thread: &Cell<Option<Thread>>) -> Thread {
+            let new = Thread::new(THREAD_ID_MANAGER.lock().unwrap().alloc());
+            thread.set(Some(new));
+            THREAD_GUARD.with(|guard| guard.id.set(new.id));
+            new
+        }
+    }
+}
+
+#[test]
+fn test_thread() {
+    let thread = Thread::new(0);
+    assert_eq!(thread.id, 0);
+    assert_eq!(thread.bucket, 0);
+    assert_eq!(thread.bucket_size, 1);
+    assert_eq!(thread.index, 0);
+
+    let thread = Thread::new(1);
+    assert_eq!(thread.id, 1);
+    assert_eq!(thread.bucket, 1);
+    assert_eq!(thread.bucket_size, 1);
+    assert_eq!(thread.index, 0);
+
+    let thread = Thread::new(2);
+    assert_eq!(thread.id, 2);
+    assert_eq!(thread.bucket, 2);
+    assert_eq!(thread.bucket_size, 2);
+    assert_eq!(thread.index, 0);
+
+    let thread = Thread::new(3);
+    assert_eq!(thread.id, 3);
+    assert_eq!(thread.bucket, 2);
+    assert_eq!(thread.bucket_size, 2);
+    assert_eq!(thread.index, 1);
+
+    let thread = Thread::new(19);
+    assert_eq!(thread.id, 19);
+    assert_eq!(thread.bucket, 5);
+    assert_eq!(thread.bucket_size, 16);
+    assert_eq!(thread.index, 3);
+}
diff --git a/crates/thread_local/src/unreachable.rs b/crates/thread_local/src/unreachable.rs
new file mode 100644
index 0000000..db4d831
--- /dev/null
+++ b/crates/thread_local/src/unreachable.rs
@@ -0,0 +1,57 @@
+// Copyright 2017 Amanieu d'Antras
+//
+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
+// http://opensource.org/licenses/MIT>, at your option. This file may not be
+// copied, modified, or distributed except according to those terms.
+
+use std::hint::unreachable_unchecked;
+
+/// An extension trait for `Option<T>` providing unchecked unwrapping methods.
+pub trait UncheckedOptionExt<T> {
+    /// Get the value out of this Option without checking for None.
+    unsafe fn unchecked_unwrap(self) -> T;
+
+    /// Assert that this Option is a None to the optimizer.
+    unsafe fn unchecked_unwrap_none(self);
+}
+
+/// An extension trait for `Result<T, E>` providing unchecked unwrapping methods.
+pub trait UncheckedResultExt<T, E> {
+    /// Get the value out of this Result without checking for Err.
+    unsafe fn unchecked_unwrap_ok(self) -> T;
+
+    /// Get the error out of this Result without checking for Ok.
+    unsafe fn unchecked_unwrap_err(self) -> E;
+}
+
+impl<T> UncheckedOptionExt<T> for Option<T> {
+    unsafe fn unchecked_unwrap(self) -> T {
+        match self {
+            Some(x) => x,
+            None => unreachable_unchecked(),
+        }
+    }
+
+    unsafe fn unchecked_unwrap_none(self) {
+        if self.is_some() {
+            unreachable_unchecked()
+        }
+    }
+}
+
+impl<T, E> UncheckedResultExt<T, E> for Result<T, E> {
+    unsafe fn unchecked_unwrap_ok(self) -> T {
+        match self {
+            Ok(x) => x,
+            Err(_) => unreachable_unchecked(),
+        }
+    }
+
+    unsafe fn unchecked_unwrap_err(self) -> E {
+        match self {
+            Ok(_) => unreachable_unchecked(),
+            Err(e) => e,
+        }
+    }
+}
diff --git a/crates/tinytemplate/.cargo-checksum.json b/crates/tinytemplate/.cargo-checksum.json
new file mode 100644
index 0000000..4ef4c90
--- /dev/null
+++ b/crates/tinytemplate/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"CHANGELOG.md":"59b98fa1a2681e85842dea1e7f6424e57322da775d90a49c4de18169b2551caf","CONTRIBUTING.md":"d9b4e8d9a4449b075d618f3f0c1454558f3140fb51a8013610dd9a9e882ecef1","Cargo.toml":"f5e4758f59eb8cebd7cfb8553d2158223f24fc4716327e2a1de3a8726b8bc441","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"b6d10b8851a36aa6ec9612a186e56c2fcea0ad7db7b90c3dda1d16ae27c0077e","README.md":"46a42e03b08a4d9910650bdeb4dd708f80c97000066fab4b3eac0ee14f5c7f9b","benches/benchmarks.rs":"43e0f13f15cb13edb0dfc8c155e86eec966b13ed36f524c2988afd04938f3f92","src/compiler.rs":"d0da5a0f0048c1ef64d89f51c8a7c4500a431353897007e55d158398bcd591b9","src/error.rs":"7aacecb663cf36dea3b685354b735790527ac7dac64a5dc0668281b46ea751e9","src/instruction.rs":"f8bf799553a5a60266d33fbde10468e7b27571f0af96703914bf5c39294444ea","src/lib.rs":"57fca7200d4316f73e75ca3610b558b8c4e6d5e3f5d6857310af122a43d7105b","src/syntax.rs":"f2077403cc8dd3ec5f53513d59dbbca3fbae0e14bcab6690f27ad45f39a484fe","src/template.rs":"6096b2cfe303365d09cf3773b9427dc5d2c86c7fd92e326f8089e9a9123b3557"},"package":"be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc"}
\ No newline at end of file
diff --git a/crates/tinytemplate/Android.bp b/crates/tinytemplate/Android.bp
new file mode 100644
index 0000000..856fa44
--- /dev/null
+++ b/crates/tinytemplate/Android.bp
@@ -0,0 +1,55 @@
+// This file is generated by cargo_embargo.
+// Do not modify this file because the changes will be overridden on upgrade.
+
+package {
+    default_applicable_licenses: ["external_rust_crates_tinytemplate_license"],
+    default_team: "trendy_team_android_rust",
+}
+
+license {
+    name: "external_rust_crates_tinytemplate_license",
+    visibility: [":__subpackages__"],
+    license_kinds: ["SPDX-license-identifier-Apache-2.0"],
+    license_text: ["LICENSE"],
+}
+
+rust_library {
+    name: "libtinytemplate",
+    host_supported: true,
+    crate_name: "tinytemplate",
+    cargo_env_compat: true,
+    cargo_pkg_version: "1.2.1",
+    crate_root: "src/lib.rs",
+    edition: "2015",
+    rustlibs: [
+        "libserde",
+        "libserde_json",
+    ],
+    apex_available: [
+        "//apex_available:platform",
+        "//apex_available:anyapex",
+    ],
+    product_available: true,
+    vendor_available: true,
+}
+
+rust_test {
+    name: "tinytemplate_test_src_lib",
+    host_supported: true,
+    crate_name: "tinytemplate",
+    cargo_env_compat: true,
+    cargo_pkg_version: "1.2.1",
+    crate_root: "src/lib.rs",
+    test_suites: ["general-tests"],
+    auto_gen_config: true,
+    test_options: {
+        unit_test: true,
+    },
+    edition: "2015",
+    rustlibs: [
+        "libcriterion",
+        "libserde",
+        "libserde_json",
+    ],
+    proc_macros: ["libserde_derive"],
+}
diff --git a/crates/tinytemplate/CHANGELOG.md b/crates/tinytemplate/CHANGELOG.md
new file mode 100755
index 0000000..2c757ac
--- /dev/null
+++ b/crates/tinytemplate/CHANGELOG.md
@@ -0,0 +1,51 @@
+# Changelog
+All notable changes to this project will be documented in this file.
+
+The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
+and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
+
+## [Unreleased]
+
+## [1.2.1] - 2021-03-03
+### Fixed
+- Fixed a compile error on some nightly compiler versions.
+
+## [1.2.0] - 2020-01-03
+### Fixed
+ - Fixed numeric values being truthy when zero, rather than when non-zero. (For real this time)
+### Added
+ - Allow numeric indexes to be used in paths, to index into JSON arrays.
+
+## [1.1.0] - 2020-05-31
+  - Added `TinyTemplate::set_default_formatter` which, for example, allows to dissable HTML-scaping
+
+## [1.0.4] - 2020-04-25
+### Added
+- Added `@root` keyword which allows printing, branching on or iterating over the root context
+  object. This is saves having to wrap simple context values in a struct.
+
+## [1.0.3] - 2019-12-26
+### Fixed
+- Fixed the @last keyword never evaluating to true
+- Fixed numeric values being truthy when zero, rather than when non-zero.
+
+## [1.0.2] - 2019-05-16
+### Fixed
+- Fixed possible panic when compiling templates with escaped curly braces.
+
+## [1.0.1] - 2019-01-19
+### Added
+- Added support for older versions of Rust (back to 1.26).
+
+## 1.0.0 - 2019-01-19
+### Added
+- Initial release on Crates.io.
+
+[Unreleased]: https://github.com/bheisler/TinyTemplate/compare/1.2.0...HEAD
+[1.0.1]: https://github.com/bheisler/TinyTemplate/compare/1.0.0...1.0.1
+[1.0.2]: https://github.com/bheisler/TinyTemplate/compare/1.0.1...1.0.2
+[1.0.3]: https://github.com/bheisler/TinyTemplate/compare/1.0.2...1.0.3
+[1.0.4]: https://github.com/bheisler/TinyTemplate/compare/1.0.3...1.0.4
+[1.1.0]: https://github.com/bheisler/TinyTemplate/compare/1.0.4...1.1.0
+[1.2.0]: https://github.com/bheisler/TinyTemplate/compare/1.1.0...1.2.0
+[1.2.1]: https://github.com/bheisler/TinyTemplate/compare/1.2.0...1.2.1
diff --git a/crates/tinytemplate/CONTRIBUTING.md b/crates/tinytemplate/CONTRIBUTING.md
new file mode 100755
index 0000000..e6af4b7
--- /dev/null
+++ b/crates/tinytemplate/CONTRIBUTING.md
@@ -0,0 +1,67 @@
+# Contributing to TinyTemplate

+

+## Ideas, Experiences and Questions

+

+The easiest way to contribute to TinyTemplate is to use it and report your experiences, ask questions and contribute ideas. We'd love to hear your thoughts on how to make TinyTemplate better, or your comments on why you are or are not currently using it.

+

+Issues, ideas, requests and questions should be posted on the issue tracker at:

+

+https://github.com/bheisler/TinyTemplate/issues

+

+## Code

+

+Pull requests are welcome, though please raise an issue or post a comment for discussion first. We're happy to assist new contributors.

+

+If you're not sure what to work on, try checking the [Beginner label](https://github.com/bheisler/TinyTemplate/labels/Beginner)

+

+To make changes to the code, fork the repo and clone it:

+

+`git clone git@github.com:your-username/TinyTemplate.git`

+

+Then make your changes to the code. When you're done, run the tests:

+

+```

+cargo test

+```

+

+It's a good idea to run clippy and fix any warnings as well:

+

+```

+rustup component add clippy-preview

+cargo clippy

+```

+

+Finally, run Rustfmt to maintain a common code style:

+

+```

+rustup component add rustfmt-preview

+cargo fmt

+```

+

+Don't forget to update the CHANGELOG.md file and any appropriate documentation. Once you're finished, push to your fork and submit a pull request. We try to respond to new issues and pull requests quickly, so if there hasn't been any response for more than a few days feel free to ping @bheisler.

+

+Some things that will increase the chance that your pull request is accepted:

+

+* Write tests

+* Clearly document public methods, with examples if possible

+* Write a good commit message

+

+Good documentation is one of the core goals of the TinyTemplate project, so new code in pull requests should have clear and complete documentation.

+

+## Github Labels

+

+TinyTemplate uses a simple set of labels to track issues. Most important are the difficulty labels:

+

+- Beginner - Suitable for people new to TinyTemplate

+- Intermediate - More challenging, likely involves some non-obvious design decisions or knowledge of CUDA

+- Bigger Project - Large and/or complex project such as designing a safe, Rusty wrapper around a complex part of the CUDA API

+

+Additionally, there are a few other noteworthy labels:

+

+- Breaking Change - Fixing this will have to wait until the next breaking-change release

+- Enhancement - Enhancements to existing functionality or documentation

+- Help Wanted - Input and ideas requested

+

+## Code of Conduct

+

+We follow the [Rust Code of Conduct](http://www.rust-lang.org/conduct.html).

diff --git a/crates/tinytemplate/Cargo.lock b/crates/tinytemplate/Cargo.lock
new file mode 100644
index 0000000..2791890
--- /dev/null
+++ b/crates/tinytemplate/Cargo.lock
@@ -0,0 +1,619 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "aho-corasick"
+version = "1.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916"
+dependencies = [
+ "memchr",
+]
+
+[[package]]
+name = "atty"
+version = "0.2.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
+dependencies = [
+ "hermit-abi",
+ "libc",
+ "winapi",
+]
+
+[[package]]
+name = "autocfg"
+version = "1.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0"
+
+[[package]]
+name = "bitflags"
+version = "1.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
+
+[[package]]
+name = "bumpalo"
+version = "3.16.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c"
+
+[[package]]
+name = "cast"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
+
+[[package]]
+name = "cfg-if"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
+
+[[package]]
+name = "clap"
+version = "2.34.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c"
+dependencies = [
+ "bitflags",
+ "textwrap",
+ "unicode-width",
+]
+
+[[package]]
+name = "criterion"
+version = "0.3.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b01d6de93b2b6c65e17c634a26653a29d107b3c98c607c765bf38d041531cd8f"
+dependencies = [
+ "atty",
+ "cast",
+ "clap",
+ "criterion-plot",
+ "csv",
+ "itertools",
+ "lazy_static",
+ "num-traits",
+ "oorandom",
+ "plotters",
+ "rayon",
+ "regex",
+ "serde",
+ "serde_cbor",
+ "serde_derive",
+ "serde_json",
+ "tinytemplate 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "walkdir",
+]
+
+[[package]]
+name = "criterion-plot"
+version = "0.4.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2673cc8207403546f45f5fd319a974b1e6983ad1a3ee7e6041650013be041876"
+dependencies = [
+ "cast",
+ "itertools",
+]
+
+[[package]]
+name = "crossbeam-deque"
+version = "0.8.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d"
+dependencies = [
+ "crossbeam-epoch",
+ "crossbeam-utils",
+]
+
+[[package]]
+name = "crossbeam-epoch"
+version = "0.9.18"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e"
+dependencies = [
+ "crossbeam-utils",
+]
+
+[[package]]
+name = "crossbeam-utils"
+version = "0.8.20"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80"
+
+[[package]]
+name = "csv"
+version = "1.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ac574ff4d437a7b5ad237ef331c17ccca63c46479e5b5453eb8e10bb99a759fe"
+dependencies = [
+ "csv-core",
+ "itoa",
+ "ryu",
+ "serde",
+]
+
+[[package]]
+name = "csv-core"
+version = "0.1.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5efa2b3d7902f4b634a20cae3c9c4e6209dc4779feb6863329607560143efa70"
+dependencies = [
+ "memchr",
+]
+
+[[package]]
+name = "either"
+version = "1.13.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0"
+
+[[package]]
+name = "half"
+version = "1.8.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1b43ede17f21864e81be2fa654110bf1e793774238d86ef8555c37e6519c0403"
+
+[[package]]
+name = "hermit-abi"
+version = "0.1.19"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "itertools"
+version = "0.10.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473"
+dependencies = [
+ "either",
+]
+
+[[package]]
+name = "itoa"
+version = "1.0.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b"
+
+[[package]]
+name = "js-sys"
+version = "0.3.70"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a"
+dependencies = [
+ "wasm-bindgen",
+]
+
+[[package]]
+name = "lazy_static"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe"
+
+[[package]]
+name = "libc"
+version = "0.2.158"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439"
+
+[[package]]
+name = "log"
+version = "0.4.22"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
+
+[[package]]
+name = "memchr"
+version = "2.7.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3"
+
+[[package]]
+name = "num-traits"
+version = "0.2.19"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841"
+dependencies = [
+ "autocfg",
+]
+
+[[package]]
+name = "once_cell"
+version = "1.19.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92"
+
+[[package]]
+name = "oorandom"
+version = "11.1.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9"
+
+[[package]]
+name = "plotters"
+version = "0.3.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a15b6eccb8484002195a3e44fe65a4ce8e93a625797a063735536fd59cb01cf3"
+dependencies = [
+ "num-traits",
+ "plotters-backend",
+ "plotters-svg",
+ "wasm-bindgen",
+ "web-sys",
+]
+
+[[package]]
+name = "plotters-backend"
+version = "0.3.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "414cec62c6634ae900ea1c56128dfe87cf63e7caece0852ec76aba307cebadb7"
+
+[[package]]
+name = "plotters-svg"
+version = "0.3.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "81b30686a7d9c3e010b84284bdd26a29f2138574f52f5eb6f794fc0ad924e705"
+dependencies = [
+ "plotters-backend",
+]
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.86"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77"
+dependencies = [
+ "unicode-ident",
+]
+
+[[package]]
+name = "quote"
+version = "1.0.37"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af"
+dependencies = [
+ "proc-macro2",
+]
+
+[[package]]
+name = "rayon"
+version = "1.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa"
+dependencies = [
+ "either",
+ "rayon-core",
+]
+
+[[package]]
+name = "rayon-core"
+version = "1.12.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2"
+dependencies = [
+ "crossbeam-deque",
+ "crossbeam-utils",
+]
+
+[[package]]
+name = "regex"
+version = "1.10.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619"
+dependencies = [
+ "aho-corasick",
+ "memchr",
+ "regex-automata",
+ "regex-syntax",
+]
+
+[[package]]
+name = "regex-automata"
+version = "0.4.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df"
+dependencies = [
+ "aho-corasick",
+ "memchr",
+ "regex-syntax",
+]
+
+[[package]]
+name = "regex-syntax"
+version = "0.8.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b"
+
+[[package]]
+name = "ryu"
+version = "1.0.18"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f"
+
+[[package]]
+name = "same-file"
+version = "1.0.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
+dependencies = [
+ "winapi-util",
+]
+
+[[package]]
+name = "serde"
+version = "1.0.209"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "99fce0ffe7310761ca6bf9faf5115afbc19688edd00171d81b1bb1b116c63e09"
+dependencies = [
+ "serde_derive",
+]
+
+[[package]]
+name = "serde_cbor"
+version = "0.11.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2bef2ebfde456fb76bbcf9f59315333decc4fda0b2b44b420243c11e0f5ec1f5"
+dependencies = [
+ "half",
+ "serde",
+]
+
+[[package]]
+name = "serde_derive"
+version = "1.0.209"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a5831b979fd7b5439637af1752d535ff49f4860c0f341d1baeb6faf0f4242170"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "serde_json"
+version = "1.0.127"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8043c06d9f82bd7271361ed64f415fe5e12a77fdb52e573e7f06a516dea329ad"
+dependencies = [
+ "itoa",
+ "memchr",
+ "ryu",
+ "serde",
+]
+
+[[package]]
+name = "syn"
+version = "2.0.76"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "578e081a14e0cefc3279b0472138c513f37b41a08d5a3cca9b6e4e8ceb6cd525"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
+]
+
+[[package]]
+name = "textwrap"
+version = "0.11.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060"
+dependencies = [
+ "unicode-width",
+]
+
+[[package]]
+name = "tinytemplate"
+version = "1.2.1"
+dependencies = [
+ "criterion",
+ "serde",
+ "serde_derive",
+ "serde_json",
+]
+
+[[package]]
+name = "tinytemplate"
+version = "1.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc"
+dependencies = [
+ "serde",
+ "serde_json",
+]
+
+[[package]]
+name = "unicode-ident"
+version = "1.0.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
+
+[[package]]
+name = "unicode-width"
+version = "0.1.13"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0336d538f7abc86d282a4189614dfaa90810dfc2c6f6427eaf88e16311dd225d"
+
+[[package]]
+name = "walkdir"
+version = "2.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b"
+dependencies = [
+ "same-file",
+ "winapi-util",
+]
+
+[[package]]
+name = "wasm-bindgen"
+version = "0.2.93"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5"
+dependencies = [
+ "cfg-if",
+ "once_cell",
+ "wasm-bindgen-macro",
+]
+
+[[package]]
+name = "wasm-bindgen-backend"
+version = "0.2.93"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b"
+dependencies = [
+ "bumpalo",
+ "log",
+ "once_cell",
+ "proc-macro2",
+ "quote",
+ "syn",
+ "wasm-bindgen-shared",
+]
+
+[[package]]
+name = "wasm-bindgen-macro"
+version = "0.2.93"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf"
+dependencies = [
+ "quote",
+ "wasm-bindgen-macro-support",
+]
+
+[[package]]
+name = "wasm-bindgen-macro-support"
+version = "0.2.93"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+ "wasm-bindgen-backend",
+ "wasm-bindgen-shared",
+]
+
+[[package]]
+name = "wasm-bindgen-shared"
+version = "0.2.93"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484"
+
+[[package]]
+name = "web-sys"
+version = "0.3.70"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "26fdeaafd9bd129f65e7c031593c24d62186301e0c72c8978fa1678be7d532c0"
+dependencies = [
+ "js-sys",
+ "wasm-bindgen",
+]
+
+[[package]]
+name = "winapi"
+version = "0.3.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
+dependencies = [
+ "winapi-i686-pc-windows-gnu",
+ "winapi-x86_64-pc-windows-gnu",
+]
+
+[[package]]
+name = "winapi-i686-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
+
+[[package]]
+name = "winapi-util"
+version = "0.1.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb"
+dependencies = [
+ "windows-sys",
+]
+
+[[package]]
+name = "winapi-x86_64-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
+
+[[package]]
+name = "windows-sys"
+version = "0.59.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b"
+dependencies = [
+ "windows-targets",
+]
+
+[[package]]
+name = "windows-targets"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973"
+dependencies = [
+ "windows_aarch64_gnullvm",
+ "windows_aarch64_msvc",
+ "windows_i686_gnu",
+ "windows_i686_gnullvm",
+ "windows_i686_msvc",
+ "windows_x86_64_gnu",
+ "windows_x86_64_gnullvm",
+ "windows_x86_64_msvc",
+]
+
+[[package]]
+name = "windows_aarch64_gnullvm"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"
+
+[[package]]
+name = "windows_aarch64_msvc"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"
+
+[[package]]
+name = "windows_i686_gnu"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b"
+
+[[package]]
+name = "windows_i686_gnullvm"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"
+
+[[package]]
+name = "windows_i686_msvc"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
+
+[[package]]
+name = "windows_x86_64_gnu"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
+
+[[package]]
+name = "windows_x86_64_gnullvm"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
+
+[[package]]
+name = "windows_x86_64_msvc"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
diff --git a/crates/tinytemplate/Cargo.toml b/crates/tinytemplate/Cargo.toml
new file mode 100644
index 0000000..7995565
--- /dev/null
+++ b/crates/tinytemplate/Cargo.toml
@@ -0,0 +1,41 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies
+#
+# If you believe there's an error in this file please file an
+# issue against the rust-lang/cargo repository. If you're
+# editing this file be aware that the upstream Cargo.toml
+# will likely look very different (and much more reasonable)
+
+[package]
+name = "tinytemplate"
+version = "1.2.1"
+authors = ["Brook Heisler <brookheisler@gmail.com>"]
+description = "Simple, lightweight template engine"
+readme = "README.md"
+keywords = ["template", "html"]
+categories = ["template-engine"]
+license = "Apache-2.0 OR MIT"
+repository = "https://github.com/bheisler/TinyTemplate"
+
+[[bench]]
+name = "benchmarks"
+harness = false
+[dependencies.serde]
+version = "1.0"
+
+[dependencies.serde_json]
+version = "1.0"
+[dev-dependencies.criterion]
+version = "0.3"
+
+[dev-dependencies.serde_derive]
+version = "1.0"
+[badges.maintenance]
+status = "passively-maintained"
+
+[badges.travis-ci]
+repository = "bheisler/TinyTemplate"
diff --git a/crates/tinytemplate/LICENSE b/crates/tinytemplate/LICENSE
new file mode 120000
index 0000000..6b579aa
--- /dev/null
+++ b/crates/tinytemplate/LICENSE
@@ -0,0 +1 @@
+LICENSE-APACHE
\ No newline at end of file
diff --git a/crates/tinytemplate/LICENSE-APACHE b/crates/tinytemplate/LICENSE-APACHE
new file mode 100755
index 0000000..16fe87b
--- /dev/null
+++ b/crates/tinytemplate/LICENSE-APACHE
@@ -0,0 +1,201 @@
+                              Apache License
+                        Version 2.0, January 2004
+                     http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+   "License" shall mean the terms and conditions for use, reproduction,
+   and distribution as defined by Sections 1 through 9 of this document.
+
+   "Licensor" shall mean the copyright owner or entity authorized by
+   the copyright owner that is granting the License.
+
+   "Legal Entity" shall mean the union of the acting entity and all
+   other entities that control, are controlled by, or are under common
+   control with that entity. For the purposes of this definition,
+   "control" means (i) the power, direct or indirect, to cause the
+   direction or management of such entity, whether by contract or
+   otherwise, or (ii) ownership of fifty percent (50%) or more of the
+   outstanding shares, or (iii) beneficial ownership of such entity.
+
+   "You" (or "Your") shall mean an individual or Legal Entity
+   exercising permissions granted by this License.
+
+   "Source" form shall mean the preferred form for making modifications,
+   including but not limited to software source code, documentation
+   source, and configuration files.
+
+   "Object" form shall mean any form resulting from mechanical
+   transformation or translation of a Source form, including but
+   not limited to compiled object code, generated documentation,
+   and conversions to other media types.
+
+   "Work" shall mean the work of authorship, whether in Source or
+   Object form, made available under the License, as indicated by a
+   copyright notice that is included in or attached to the work
+   (an example is provided in the Appendix below).
+
+   "Derivative Works" shall mean any work, whether in Source or Object
+   form, that is based on (or derived from) the Work and for which the
+   editorial revisions, annotations, elaborations, or other modifications
+   represent, as a whole, an original work of authorship. For the purposes
+   of this License, Derivative Works shall not include works that remain
+   separable from, or merely link (or bind by name) to the interfaces of,
+   the Work and Derivative Works thereof.
+
+   "Contribution" shall mean any work of authorship, including
+   the original version of the Work and any modifications or additions
+   to that Work or Derivative Works thereof, that is intentionally
+   submitted to Licensor for inclusion in the Work by the copyright owner
+   or by an individual or Legal Entity authorized to submit on behalf of
+   the copyright owner. For the purposes of this definition, "submitted"
+   means any form of electronic, verbal, or written communication sent
+   to the Licensor or its representatives, including but not limited to
+   communication on electronic mailing lists, source code control systems,
+   and issue tracking systems that are managed by, or on behalf of, the
+   Licensor for the purpose of discussing and improving the Work, but
+   excluding communication that is conspicuously marked or otherwise
+   designated in writing by the copyright owner as "Not a Contribution."
+
+   "Contributor" shall mean Licensor and any individual or Legal Entity
+   on behalf of whom a Contribution has been received by Licensor and
+   subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+   this License, each Contributor hereby grants to You a perpetual,
+   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+   copyright license to reproduce, prepare Derivative Works of,
+   publicly display, publicly perform, sublicense, and distribute the
+   Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+   this License, each Contributor hereby grants to You a perpetual,
+   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+   (except as stated in this section) patent license to make, have made,
+   use, offer to sell, sell, import, and otherwise transfer the Work,
+   where such license applies only to those patent claims licensable
+   by such Contributor that are necessarily infringed by their
+   Contribution(s) alone or by combination of their Contribution(s)
+   with the Work to which such Contribution(s) was submitted. If You
+   institute patent litigation against any entity (including a
+   cross-claim or counterclaim in a lawsuit) alleging that the Work
+   or a Contribution incorporated within the Work constitutes direct
+   or contributory patent infringement, then any patent licenses
+   granted to You under this License for that Work shall terminate
+   as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+   Work or Derivative Works thereof in any medium, with or without
+   modifications, and in Source or Object form, provided that You
+   meet the following conditions:
+
+   (a) You must give any other recipients of the Work or
+       Derivative Works a copy of this License; and
+
+   (b) You must cause any modified files to carry prominent notices
+       stating that You changed the files; and
+
+   (c) You must retain, in the Source form of any Derivative Works
+       that You distribute, all copyright, patent, trademark, and
+       attribution notices from the Source form of the Work,
+       excluding those notices that do not pertain to any part of
+       the Derivative Works; and
+
+   (d) If the Work includes a "NOTICE" text file as part of its
+       distribution, then any Derivative Works that You distribute must
+       include a readable copy of the attribution notices contained
+       within such NOTICE file, excluding those notices that do not
+       pertain to any part of the Derivative Works, in at least one
+       of the following places: within a NOTICE text file distributed
+       as part of the Derivative Works; within the Source form or
+       documentation, if provided along with the Derivative Works; or,
+       within a display generated by the Derivative Works, if and
+       wherever such third-party notices normally appear. The contents
+       of the NOTICE file are for informational purposes only and
+       do not modify the License. You may add Your own attribution
+       notices within Derivative Works that You distribute, alongside
+       or as an addendum to the NOTICE text from the Work, provided
+       that such additional attribution notices cannot be construed
+       as modifying the License.
+
+   You may add Your own copyright statement to Your modifications and
+   may provide additional or different license terms and conditions
+   for use, reproduction, or distribution of Your modifications, or
+   for any such Derivative Works as a whole, provided Your use,
+   reproduction, and distribution of the Work otherwise complies with
+   the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+   any Contribution intentionally submitted for inclusion in the Work
+   by You to the Licensor shall be under the terms and conditions of
+   this License, without any additional terms or conditions.
+   Notwithstanding the above, nothing herein shall supersede or modify
+   the terms of any separate license agreement you may have executed
+   with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+   names, trademarks, service marks, or product names of the Licensor,
+   except as required for reasonable and customary use in describing the
+   origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+   agreed to in writing, Licensor provides the Work (and each
+   Contributor provides its Contributions) on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+   implied, including, without limitation, any warranties or conditions
+   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+   PARTICULAR PURPOSE. You are solely responsible for determining the
+   appropriateness of using or redistributing the Work and assume any
+   risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+   whether in tort (including negligence), contract, or otherwise,
+   unless required by applicable law (such as deliberate and grossly
+   negligent acts) or agreed to in writing, shall any Contributor be
+   liable to You for damages, including any direct, indirect, special,
+   incidental, or consequential damages of any character arising as a
+   result of this License or out of the use or inability to use the
+   Work (including but not limited to damages for loss of goodwill,
+   work stoppage, computer failure or malfunction, or any and all
+   other commercial damages or losses), even if such Contributor
+   has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+   the Work or Derivative Works thereof, You may choose to offer,
+   and charge a fee for, acceptance of support, warranty, indemnity,
+   or other liability obligations and/or rights consistent with this
+   License. However, in accepting such obligations, You may act only
+   on Your own behalf and on Your sole responsibility, not on behalf
+   of any other Contributor, and only if You agree to indemnify,
+   defend, and hold each Contributor harmless for any liability
+   incurred by, or claims asserted against, such Contributor by reason
+   of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+   To apply the Apache License to your work, attach the following
+   boilerplate notice, with the fields enclosed by brackets "[]"
+   replaced with your own identifying information. (Don't include
+   the brackets!)  The text should be enclosed in the appropriate
+   comment syntax for the file format. We also recommend that a
+   file or class name and description of purpose be included on the
+   same "printed page" as the copyright notice for easier
+   identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+	http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/crates/tinytemplate/LICENSE-MIT b/crates/tinytemplate/LICENSE-MIT
new file mode 100755
index 0000000..74edb9f
--- /dev/null
+++ b/crates/tinytemplate/LICENSE-MIT
@@ -0,0 +1,25 @@
+Copyright (c) 2019 Brook Heisler
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/crates/tinytemplate/METADATA b/crates/tinytemplate/METADATA
new file mode 100644
index 0000000..c4c386e
--- /dev/null
+++ b/crates/tinytemplate/METADATA
@@ -0,0 +1,19 @@
+name: "tinytemplate"
+description: "Simple, lightweight template engine"
+third_party {
+  url {
+    type: HOMEPAGE
+    value: "https://crates.io/crates/tinytemplate"
+  }
+  url {
+    type: ARCHIVE
+    value: "https://static.crates.io/crates/tinytemplate/tinytemplate-1.2.1.crate"
+  }
+  version: "1.2.1"
+  license_type: NOTICE
+  last_upgrade_date {
+    year: 2021
+    month: 4
+    day: 2
+  }
+}
diff --git a/crates/tinytemplate/MODULE_LICENSE_APACHE2 b/crates/tinytemplate/MODULE_LICENSE_APACHE2
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/crates/tinytemplate/MODULE_LICENSE_APACHE2
diff --git a/crates/tinytemplate/README.md b/crates/tinytemplate/README.md
new file mode 100755
index 0000000..c5c793f
--- /dev/null
+++ b/crates/tinytemplate/README.md
@@ -0,0 +1,130 @@
+<h1 align="center">TinyTemplate</h1>

+

+<div align="center">Minimal Lightweight Text Templating</div>

+

+<div align="center">

+    <a href="https://docs.rs/tinytemplate/">API Documentation</a>

+    |

+    <a href="https://github.com/bheisler/TinyTemplate/blob/master/CHANGELOG.md">Changelog</a>

+</div>

+

+<div align="center">

+    <a href="https://github.com/bheisler/TinyTemplate/actions">

+        <img src="https://github.com/bheisler/TinyTemplate/workflows/Continuous%20integration/badge.svg" alt="Continuous integration">

+    </a>

+    <a href="https://crates.io/crates/tinytemplate">

+        <img src="https://img.shields.io/crates/v/tinytemplate.svg" alt="Crates.io">

+    </a>

+</div>

+

+TinyTemplate is a small, minimalistic text templating system with limited dependencies.

+

+## Table of Contents

+- [Table of Contents](#table-of-contents)

+  - [Goals](#goals)

+  - [Why TinyTemplate?](#why-tinytemplate)

+  - [Quickstart](#quickstart)

+  - [Compatibility Policy](#compatibility-policy)

+  - [Contributing](#contributing)

+  - [Maintenance](#maintenance)

+  - [License](#license)

+

+### Goals

+

+ The primary design goals are:

+

+ - __Small__: TinyTemplate deliberately does not support many features of more powerful template engines.

+ - __Simple__: TinyTemplate presents a minimal but well-documented user-facing API.

+ - __Lightweight__: TinyTemplate has minimal required dependencies.

+

+Non-goals include:

+

+- __Extensibility__: TinyTemplate supports custom value formatters, but that is all.

+- __Performance__: TinyTemplate provides decent performance, but other template engines are faster.

+

+### Why TinyTemplate?

+

+I created TinyTemplate after noticing that none of the existing template libraries really suited my

+needs for Criterion.rs. Some had large dependency trees to support features that I didn't use. Some

+required adding a build script to convert templates into code at runtime, in search of extreme

+performance that I didn't need. Some had elaborate macro-based DSL's to generate HTML, where I just

+wanted plain text with some markup. Some expect the templates to be provided in a directory of text

+files, but I wanted the template to be included in the binary. I just wanted something small and 

+minimal with good documentation but there was nothing like that out there so I wrote my own.

+

+TinyTemplate is well-suited to generating HTML reports and similar text files. It could be used for

+generating HTML or other text in a web-server, but for more-complex use cases another template

+engine may be a better fit.

+

+### Quickstart

+

+First, add TinyTemplate and serde-derive to your `Cargo.toml` file:

+

+```toml

+[dependencies]

+tinytemplate = "1.1"

+serde = { version = "1.0", features = ["derive"] }

+```

+

+Then add this code to "src.rs":

+

+```rust

+use serde::Serialize;

+

+use tinytemplate::TinyTemplate;

+use std::error::Error;

+

+#[derive(Serialize)]

+struct Context {

+    name: String,

+}

+

+static TEMPLATE : &'static str = "Hello {name}!";

+

+pub fn main() -> Result<(), Box<dyn Error>> {

+    let mut tt = TinyTemplate::new();

+    tt.add_template("hello", TEMPLATE)?;

+

+    let context = Context {

+        name: "World".to_string(),

+    };

+

+    let rendered = tt.render("hello", &context)?;

+    println!("{}", rendered);

+

+    Ok(())

+}

+```

+

+This should print "Hello World!" to stdout.

+

+### Compatibility Policy

+

+TinyTemplate supports the last three stable minor releases of Rust. At time of writing, this means

+Rust 1.38 or later. Older versions may work, but are not tested or guaranteed.

+

+Currently, the oldest version of Rust believed to work is 1.36. Future versions of TinyTemplate may

+break support for such old versions, and this will not be considered a breaking change. If you

+require TinyTemplate to work on old versions of Rust, you will need to stick to a

+specific patch version of TinyTemplate.

+

+### Contributing

+

+Thanks for your interest! Contributions are welcome.

+

+Issues, feature requests, questions and bug reports should be reported via the issue tracker above.

+In particular, becuase TinyTemplate aims to be well-documented, please report anything you find

+confusing or incorrect in the documentation.

+

+Code or documentation improvements in the form of pull requests are also welcome. Please file or

+comment on an issue to allow for discussion before doing a lot of work, though.

+

+For more details, see the [CONTRIBUTING.md file](https://github.com/bheisler/TinyTemplate/blob/master/CONTRIBUTING.md).

+

+### Maintenance

+

+TinyTemplate was created and is currently maintained by Brook Heisler (@bheisler).

+

+### License

+

+TinyTemplate is dual-licensed under the Apache 2.0 license and the MIT license.

diff --git a/crates/tinytemplate/TEST_MAPPING b/crates/tinytemplate/TEST_MAPPING
new file mode 100644
index 0000000..9a60ded
--- /dev/null
+++ b/crates/tinytemplate/TEST_MAPPING
@@ -0,0 +1,24 @@
+// Generated by update_crate_tests.py for tests that depend on this crate.
+{
+  "imports": [
+    {
+      "path": "external/rust/crates/base64"
+    },
+    {
+      "path": "external/rust/crates/tinyvec"
+    },
+    {
+      "path": "external/rust/crates/unicode-xid"
+    }
+  ],
+  "presubmit": [
+    {
+      "name": "tinytemplate_test_src_lib"
+    }
+  ],
+  "presubmit-rust": [
+    {
+      "name": "tinytemplate_test_src_lib"
+    }
+  ]
+}
diff --git a/crates/tinytemplate/benches/benchmarks.rs b/crates/tinytemplate/benches/benchmarks.rs
new file mode 100755
index 0000000..5170f98
--- /dev/null
+++ b/crates/tinytemplate/benches/benchmarks.rs
@@ -0,0 +1,58 @@
+#[macro_use]

+extern crate criterion;

+extern crate tinytemplate;

+#[macro_use]

+extern crate serde_derive;

+

+use criterion::Criterion;

+use tinytemplate::TinyTemplate;

+

+static TABLE_SOURCE: &'static str = "<html>

+    {{ for row in table }}

+        <tr>{{ for value in row }}<td>{value}</td>{{ endfor }}</tr>

+    {{ endfor }}

+</html>";

+

+#[derive(Serialize)]

+struct TableContext {

+    table: Vec<Vec<usize>>,

+}

+

+fn make_table_context(size: usize) -> TableContext {

+    let mut table = Vec::with_capacity(size);

+    for _ in 0..size {

+        let mut inner = Vec::with_capacity(size);

+        for i in 0..size {

+            inner.push(i);

+        }

+        table.push(inner);

+    }

+    TableContext { table }

+}

+

+fn parse(criterion: &mut Criterion) {

+    criterion.bench_function("parse-table", |b| {

+        b.iter(|| {

+            let mut tt = TinyTemplate::new();

+            tt.add_template("table", TABLE_SOURCE).unwrap()

+        });

+    });

+}

+

+fn render(criterion: &mut Criterion) {

+    let mut tt = TinyTemplate::new();

+    tt.add_template("table", TABLE_SOURCE).unwrap();

+

+    criterion.bench_function_over_inputs(

+        "render-table",

+        move |b, size| {

+            let data = make_table_context(*size);

+

+            b.iter(|| tt.render("table", &data).unwrap());

+        },

+        vec![1usize, 5, 10, 50, 100, 200],

+    );

+}

+

+criterion_group!(benchmarks, parse, render);

+criterion_main!(benchmarks);

diff --git a/crates/tinytemplate/cargo_embargo.json b/crates/tinytemplate/cargo_embargo.json
new file mode 100644
index 0000000..c8842d1
--- /dev/null
+++ b/crates/tinytemplate/cargo_embargo.json
@@ -0,0 +1,4 @@
+{
+  "run_cargo": false,
+  "tests": true
+}
diff --git a/crates/tinytemplate/src/compiler.rs b/crates/tinytemplate/src/compiler.rs
new file mode 100755
index 0000000..df37947
--- /dev/null
+++ b/crates/tinytemplate/src/compiler.rs
@@ -0,0 +1,698 @@
+#![allow(deprecated)]

+

+/// The compiler module houses the code which parses and compiles templates. TinyTemplate implements

+/// a simple bytecode interpreter (see the [instruction] module for more details) to render templates.

+/// The [`TemplateCompiler`](struct.TemplateCompiler.html) struct is responsible for parsing the

+/// template strings and generating the appropriate bytecode instructions.

+use error::Error::*;

+use error::{get_offset, Error, Result};

+use instruction::{Instruction, Path, PathStep};

+

+/// The end point of a branch or goto instruction is not known.

+const UNKNOWN: usize = ::std::usize::MAX;

+

+/// The compiler keeps a stack of the open blocks so that it can ensure that blocks are closed in

+/// the right order. The Block type is a simple enumeration of the kinds of blocks that could be

+/// open. It may contain the instruction index corresponding to the start of the block.

+enum Block {

+    Branch(usize),

+    For(usize),

+    With,

+}

+

+/// List of the known @-keywords so that we can error if the user spells them wrong.

+static KNOWN_KEYWORDS: [&str; 4] = ["@index", "@first", "@last", "@root"];

+

+/// The TemplateCompiler struct is responsible for parsing a template string and generating bytecode

+/// instructions based on it. The parser is a simple hand-written pattern-matching parser with no

+/// recursion, which makes it relatively easy to read.

+pub(crate) struct TemplateCompiler<'template> {

+    original_text: &'template str,

+    remaining_text: &'template str,

+    instructions: Vec<Instruction<'template>>,

+    block_stack: Vec<(&'template str, Block)>,

+

+    /// When we see a `{foo -}` or similar, we need to remember to left-trim the next text block we

+    /// encounter.

+    trim_next: bool,

+}

+impl<'template> TemplateCompiler<'template> {

+    /// Create a new template compiler to parse and compile the given template.

+    pub fn new(text: &'template str) -> TemplateCompiler<'template> {

+        TemplateCompiler {

+            original_text: text,

+            remaining_text: text,

+            instructions: vec![],

+            block_stack: vec![],

+            trim_next: false,

+        }

+    }

+

+    /// Consume the template compiler to parse the template and return the generated bytecode.

+    pub fn compile(mut self) -> Result<Vec<Instruction<'template>>> {

+        while !self.remaining_text.is_empty() {

+            // Comment, denoted by {# comment text #}

+            if self.remaining_text.starts_with("{#") {

+                self.trim_next = false;

+

+                let tag = self.consume_tag("#}")?;

+                let comment = tag[2..(tag.len() - 2)].trim();

+                if comment.starts_with('-') {

+                    self.trim_last_whitespace();

+                }

+                if comment.ends_with('-') {

+                    self.trim_next_whitespace();

+                }

+            // Block tag. Block tags are wrapped in {{ }} and always have one word at the start

+            // to identify which kind of tag it is. Depending on the tag type there may be more.

+            } else if self.remaining_text.starts_with("{{") {

+                self.trim_next = false;

+

+                let (discriminant, rest) = self.consume_block()?;

+                match discriminant {

+                    "if" => {

+                        let (path, negated) = if rest.starts_with("not") {

+                            (self.parse_path(&rest[4..])?, true)

+                        } else {

+                            (self.parse_path(rest)?, false)

+                        };

+                        self.block_stack

+                            .push((discriminant, Block::Branch(self.instructions.len())));

+                        self.instructions

+                            .push(Instruction::Branch(path, !negated, UNKNOWN));

+                    }

+                    "else" => {

+                        self.expect_empty(rest)?;

+                        let num_instructions = self.instructions.len() + 1;

+                        self.close_branch(num_instructions, discriminant)?;

+                        self.block_stack

+                            .push((discriminant, Block::Branch(self.instructions.len())));

+                        self.instructions.push(Instruction::Goto(UNKNOWN))

+                    }

+                    "endif" => {

+                        self.expect_empty(rest)?;

+                        let num_instructions = self.instructions.len();

+                        self.close_branch(num_instructions, discriminant)?;

+                    }

+                    "with" => {

+                        let (path, name) = self.parse_with(rest)?;

+                        let instruction = Instruction::PushNamedContext(path, name);

+                        self.instructions.push(instruction);

+                        self.block_stack.push((discriminant, Block::With));

+                    }

+                    "endwith" => {

+                        self.expect_empty(rest)?;

+                        if let Some((_, Block::With)) = self.block_stack.pop() {

+                            self.instructions.push(Instruction::PopContext)

+                        } else {

+                            return Err(self.parse_error(

+                                discriminant,

+                                "Found a closing endwith that doesn't match with a preceeding with.".to_string()

+                            ));

+                        }

+                    }

+                    "for" => {

+                        let (path, name) = self.parse_for(rest)?;

+                        self.instructions

+                            .push(Instruction::PushIterationContext(path, name));

+                        self.block_stack

+                            .push((discriminant, Block::For(self.instructions.len())));

+                        self.instructions.push(Instruction::Iterate(UNKNOWN));

+                    }

+                    "endfor" => {

+                        self.expect_empty(rest)?;

+                        let num_instructions = self.instructions.len() + 1;

+                        let goto_target = self.close_for(num_instructions, discriminant)?;

+                        self.instructions.push(Instruction::Goto(goto_target));

+                        self.instructions.push(Instruction::PopContext);

+                    }

+                    "call" => {

+                        let (name, path) = self.parse_call(rest)?;

+                        self.instructions.push(Instruction::Call(name, path));

+                    }

+                    _ => {

+                        return Err(self.parse_error(

+                            discriminant,

+                            format!("Unknown block type '{}'", discriminant),

+                        ));

+                    }

+                }

+            // Values, of the form { dotted.path.to.value.in.context }

+            // Note that it is not (currently) possible to escape curly braces in the templates to

+            // prevent them from being interpreted as values.

+            } else if self.remaining_text.starts_with('{') {

+                self.trim_next = false;

+

+                let (path, name) = self.consume_value()?;

+                let instruction = match name {

+                    Some(name) => Instruction::FormattedValue(path, name),

+                    None => Instruction::Value(path),

+                };

+                self.instructions.push(instruction);

+            // All other text - just consume characters until we see a {

+            } else {

+                let mut escaped = false;

+                loop {

+                    let mut text = self.consume_text(escaped);

+                    if self.trim_next {

+                        text = text.trim_left();

+                        self.trim_next = false;

+                    }

+                    escaped = text.ends_with('\\');

+                    if escaped {

+                        text = &text[0..(text.len() - 1)];

+                    }

+                    self.instructions.push(Instruction::Literal(text));

+

+                    if !escaped {

+                        break;

+                    }

+                }

+            }

+        }

+

+        if let Some((text, _)) = self.block_stack.pop() {

+            return Err(self.parse_error(

+                text,

+                "Expected block-closing tag, but reached the end of input.".to_string(),

+            ));

+        }

+

+        Ok(self.instructions)

+    }

+

+    /// Splits a string into a list of named segments which can later be used to look up values in the

+    /// context.

+    fn parse_path(&self, text: &'template str) -> Result<Path<'template>> {

+        if !text.starts_with('@') {

+            Ok(text

+                .split('.')

+                .map(|s| match s.parse::<usize>() {

+                    Ok(n) => PathStep::Index(s, n),

+                    Err(_) => PathStep::Name(s),

+                })

+                .collect::<Vec<_>>())

+        } else if KNOWN_KEYWORDS.iter().any(|k| *k == text) {

+            Ok(vec![PathStep::Name(text)])

+        } else {

+            Err(self.parse_error(text, format!("Invalid keyword name '{}'", text)))

+        }

+    }

+

+    /// Finds the line number and column where an error occurred. Location is the substring of

+    /// self.original_text where the error was found, and msg is the error message.

+    fn parse_error(&self, location: &str, msg: String) -> Error {

+        let (line, column) = get_offset(self.original_text, location);

+        ParseError { msg, line, column }

+    }

+

+    /// Tags which should have no text after the discriminant use this to raise an error if

+    /// text is found.

+    fn expect_empty(&self, text: &str) -> Result<()> {

+        if text.is_empty() {

+            Ok(())

+        } else {

+            Err(self.parse_error(text, format!("Unexpected text '{}'", text)))

+        }

+    }

+

+    /// Close the branch that is on top of the block stack by setting its target instruction

+    /// and popping it from the stack. Returns an error if the top of the block stack is not a

+    /// branch.

+    fn close_branch(&mut self, new_target: usize, discriminant: &str) -> Result<()> {

+        let branch_block = self.block_stack.pop();

+        if let Some((_, Block::Branch(index))) = branch_block {

+            match &mut self.instructions[index] {

+                Instruction::Branch(_, _, target) => {

+                    *target = new_target;

+                    Ok(())

+                }

+                Instruction::Goto(target) => {

+                    *target = new_target;

+                    Ok(())

+                }

+                _ => panic!(),

+            }

+        } else {

+            Err(self.parse_error(

+                discriminant,

+                "Found a closing endif or else which doesn't match with a preceding if."

+                    .to_string(),

+            ))

+        }

+    }

+

+    /// Close the for loop that is on top of the block stack by setting its target instruction and

+    /// popping it from the stack. Returns an error if the top of the stack is not a for loop.

+    /// Returns the index of the loop's Iterate instruction for further processing.

+    fn close_for(&mut self, new_target: usize, discriminant: &str) -> Result<usize> {

+        let branch_block = self.block_stack.pop();

+        if let Some((_, Block::For(index))) = branch_block {

+            match &mut self.instructions[index] {

+                Instruction::Iterate(target) => {

+                    *target = new_target;

+                    Ok(index)

+                }

+                _ => panic!(),

+            }

+        } else {

+            Err(self.parse_error(

+                discriminant,

+                "Found a closing endfor which doesn't match with a preceding for.".to_string(),

+            ))

+        }

+    }

+

+    /// Advance the cursor to the next { and return the consumed text. If `escaped` is true, skips

+    /// a { at the start of the text.

+    fn consume_text(&mut self, escaped: bool) -> &'template str {

+        let search_substr = if escaped {

+            &self.remaining_text[1..]

+        } else {

+            self.remaining_text

+        };

+

+        let mut position = search_substr

+            .find('{')

+            .unwrap_or_else(|| search_substr.len());

+        if escaped {

+            position += 1;

+        }

+

+        let (text, remaining) = self.remaining_text.split_at(position);

+        self.remaining_text = remaining;

+        text

+    }

+

+    /// Advance the cursor to the end of the value tag and return the value's path and optional

+    /// formatter name.

+    fn consume_value(&mut self) -> Result<(Path<'template>, Option<&'template str>)> {

+        let tag = self.consume_tag("}")?;

+        let mut tag = tag[1..(tag.len() - 1)].trim();

+        if tag.starts_with('-') {

+            tag = tag[1..].trim();

+            self.trim_last_whitespace();

+        }

+        if tag.ends_with('-') {

+            tag = tag[0..tag.len() - 1].trim();

+            self.trim_next_whitespace();

+        }

+

+        if let Some(index) = tag.find('|') {

+            let (path_str, name_str) = tag.split_at(index);

+            let name = name_str[1..].trim();

+            let path = self.parse_path(path_str.trim())?;

+            Ok((path, Some(name)))

+        } else {

+            Ok((self.parse_path(tag)?, None))

+        }

+    }

+

+    /// Right-trim whitespace from the last text block we parsed.

+    fn trim_last_whitespace(&mut self) {

+        if let Some(Instruction::Literal(text)) = self.instructions.last_mut() {

+            *text = text.trim_right();

+        }

+    }

+

+    /// Make a note to left-trim whitespace from the next text block we parse.

+    fn trim_next_whitespace(&mut self) {

+        self.trim_next = true;

+    }

+

+    /// Advance the cursor to the end of the current block tag and return the discriminant substring

+    /// and the rest of the text in the tag. Also handles trimming whitespace where needed.

+    fn consume_block(&mut self) -> Result<(&'template str, &'template str)> {

+        let tag = self.consume_tag("}}")?;

+        let mut block = tag[2..(tag.len() - 2)].trim();

+        if block.starts_with('-') {

+            block = block[1..].trim();

+            self.trim_last_whitespace();

+        }

+        if block.ends_with('-') {

+            block = block[0..block.len() - 1].trim();

+            self.trim_next_whitespace();

+        }

+        let discriminant = block.split_whitespace().next().unwrap_or(block);

+        let rest = block[discriminant.len()..].trim();

+        Ok((discriminant, rest))

+    }

+

+    /// Advance the cursor to after the given expected_close string and return the text in between

+    /// (including the expected_close characters), or return an error message if we reach the end

+    /// of a line of text without finding it.

+    fn consume_tag(&mut self, expected_close: &str) -> Result<&'template str> {

+        if let Some(line) = self.remaining_text.lines().next() {

+            if let Some(pos) = line.find(expected_close) {

+                let (tag, remaining) = self.remaining_text.split_at(pos + expected_close.len());

+                self.remaining_text = remaining;

+                Ok(tag)

+            } else {

+                Err(self.parse_error(

+                    line,

+                    format!(

+                        "Expected a closing '{}' but found end-of-line instead.",

+                        expected_close

+                    ),

+                ))

+            }

+        } else {

+            Err(self.parse_error(

+                self.remaining_text,

+                format!(

+                    "Expected a closing '{}' but found end-of-text instead.",

+                    expected_close

+                ),

+            ))

+        }

+    }

+

+    /// Parse a with tag to separate the value path from the (optional) name.

+    fn parse_with(&self, with_text: &'template str) -> Result<(Path<'template>, &'template str)> {

+        if let Some(index) = with_text.find(" as ") {

+            let (path_str, name_str) = with_text.split_at(index);

+            let path = self.parse_path(path_str.trim())?;

+            let name = name_str[" as ".len()..].trim();

+            Ok((path, name))

+        } else {

+            Err(self.parse_error(

+                with_text,

+                format!(

+                    "Expected 'as <path>' in with block, but found \"{}\" instead",

+                    with_text

+                ),

+            ))

+        }

+    }

+

+    /// Parse a for tag to separate the value path from the name.

+    fn parse_for(&self, for_text: &'template str) -> Result<(Path<'template>, &'template str)> {

+        if let Some(index) = for_text.find(" in ") {

+            let (name_str, path_str) = for_text.split_at(index);

+            let name = name_str.trim();

+            let path = self.parse_path(path_str[" in ".len()..].trim())?;

+            Ok((path, name))

+        } else {

+            Err(self.parse_error(

+                for_text,

+                format!("Unable to parse for block text '{}'", for_text),

+            ))

+        }

+    }

+

+    /// Parse a call tag to separate the template name and context value.

+    fn parse_call(&self, call_text: &'template str) -> Result<(&'template str, Path<'template>)> {

+        if let Some(index) = call_text.find(" with ") {

+            let (name_str, path_str) = call_text.split_at(index);

+            let name = name_str.trim();

+            let path = self.parse_path(path_str[" with ".len()..].trim())?;

+            Ok((name, path))

+        } else {

+            Err(self.parse_error(

+                call_text,

+                format!("Unable to parse call block text '{}'", call_text),

+            ))

+        }

+    }

+}

+

+#[cfg(test)]

+mod test {

+    use super::*;

+    use instruction::Instruction::*;

+

+    fn compile(text: &'static str) -> Result<Vec<Instruction<'static>>> {

+        TemplateCompiler::new(text).compile()

+    }

+

+    #[test]

+    fn test_compile_literal() {

+        let text = "Test String";

+        let instructions = compile(text).unwrap();

+        assert_eq!(1, instructions.len());

+        assert_eq!(&Literal(text), &instructions[0]);

+    }

+

+    #[test]

+    fn test_compile_value() {

+        let text = "{ foobar }";

+        let instructions = compile(text).unwrap();

+        assert_eq!(1, instructions.len());

+        assert_eq!(&Value(vec![PathStep::Name("foobar")]), &instructions[0]);

+    }

+

+    #[test]

+    fn test_compile_value_with_formatter() {

+        let text = "{ foobar | my_formatter }";

+        let instructions = compile(text).unwrap();

+        assert_eq!(1, instructions.len());

+        assert_eq!(

+            &FormattedValue(vec![PathStep::Name("foobar")], "my_formatter"),

+            &instructions[0]

+        );

+    }

+

+    #[test]

+    fn test_dotted_path() {

+        let text = "{ foo.bar }";

+        let instructions = compile(text).unwrap();

+        assert_eq!(1, instructions.len());

+        assert_eq!(

+            &Value(vec![PathStep::Name("foo"), PathStep::Name("bar")]),

+            &instructions[0]

+        );

+    }

+

+    #[test]

+    fn test_indexed_path() {

+        let text = "{ foo.0.bar }";

+        let instructions = compile(text).unwrap();

+        assert_eq!(1, instructions.len());

+        assert_eq!(

+            &Value(vec![

+                PathStep::Name("foo"),

+                PathStep::Index("0", 0),

+                PathStep::Name("bar")

+            ]),

+            &instructions[0]

+        );

+    }

+

+    #[test]

+    fn test_mixture() {

+        let text = "Hello { name }, how are you?";

+        let instructions = compile(text).unwrap();

+        assert_eq!(3, instructions.len());

+        assert_eq!(&Literal("Hello "), &instructions[0]);

+        assert_eq!(&Value(vec![PathStep::Name("name")]), &instructions[1]);

+        assert_eq!(&Literal(", how are you?"), &instructions[2]);

+    }

+

+    #[test]

+    fn test_if_endif() {

+        let text = "{{ if foo }}Hello!{{ endif }}";

+        let instructions = compile(text).unwrap();

+        assert_eq!(2, instructions.len());

+        assert_eq!(

+            &Branch(vec![PathStep::Name("foo")], true, 2),

+            &instructions[0]

+        );

+        assert_eq!(&Literal("Hello!"), &instructions[1]);

+    }

+

+    #[test]

+    fn test_if_not_endif() {

+        let text = "{{ if not foo }}Hello!{{ endif }}";

+        let instructions = compile(text).unwrap();

+        assert_eq!(2, instructions.len());

+        assert_eq!(

+            &Branch(vec![PathStep::Name("foo")], false, 2),

+            &instructions[0]

+        );

+        assert_eq!(&Literal("Hello!"), &instructions[1]);

+    }

+

+    #[test]

+    fn test_if_else_endif() {

+        let text = "{{ if foo }}Hello!{{ else }}Goodbye!{{ endif }}";

+        let instructions = compile(text).unwrap();

+        assert_eq!(4, instructions.len());

+        assert_eq!(

+            &Branch(vec![PathStep::Name("foo")], true, 3),

+            &instructions[0]

+        );

+        assert_eq!(&Literal("Hello!"), &instructions[1]);

+        assert_eq!(&Goto(4), &instructions[2]);

+        assert_eq!(&Literal("Goodbye!"), &instructions[3]);

+    }

+

+    #[test]

+    fn test_with() {

+        let text = "{{ with foo as bar }}Hello!{{ endwith }}";

+        let instructions = compile(text).unwrap();

+        assert_eq!(3, instructions.len());

+        assert_eq!(

+            &PushNamedContext(vec![PathStep::Name("foo")], "bar"),

+            &instructions[0]

+        );

+        assert_eq!(&Literal("Hello!"), &instructions[1]);

+        assert_eq!(&PopContext, &instructions[2]);

+    }

+

+    #[test]

+    fn test_foreach() {

+        let text = "{{ for foo in bar.baz }}{ foo }{{ endfor }}";

+        let instructions = compile(text).unwrap();

+        assert_eq!(5, instructions.len());

+        assert_eq!(

+            &PushIterationContext(vec![PathStep::Name("bar"), PathStep::Name("baz")], "foo"),

+            &instructions[0]

+        );

+        assert_eq!(&Iterate(4), &instructions[1]);

+        assert_eq!(&Value(vec![PathStep::Name("foo")]), &instructions[2]);

+        assert_eq!(&Goto(1), &instructions[3]);

+        assert_eq!(&PopContext, &instructions[4]);

+    }

+

+    #[test]

+    fn test_strip_whitespace_value() {

+        let text = "Hello,     {- name -}   , how are you?";

+        let instructions = compile(text).unwrap();

+        assert_eq!(3, instructions.len());

+        assert_eq!(&Literal("Hello,"), &instructions[0]);

+        assert_eq!(&Value(vec![PathStep::Name("name")]), &instructions[1]);

+        assert_eq!(&Literal(", how are you?"), &instructions[2]);

+    }

+

+    #[test]

+    fn test_strip_whitespace_block() {

+        let text = "Hello,     {{- if name -}}    {name}    {{- endif -}}   , how are you?";

+        let instructions = compile(text).unwrap();

+        assert_eq!(6, instructions.len());

+        assert_eq!(&Literal("Hello,"), &instructions[0]);

+        assert_eq!(

+            &Branch(vec![PathStep::Name("name")], true, 5),

+            &instructions[1]

+        );

+        assert_eq!(&Literal(""), &instructions[2]);

+        assert_eq!(&Value(vec![PathStep::Name("name")]), &instructions[3]);

+        assert_eq!(&Literal(""), &instructions[4]);

+        assert_eq!(&Literal(", how are you?"), &instructions[5]);

+    }

+

+    #[test]

+    fn test_comment() {

+        let text = "Hello, {# foo bar baz #} there!";

+        let instructions = compile(text).unwrap();

+        assert_eq!(2, instructions.len());

+        assert_eq!(&Literal("Hello, "), &instructions[0]);

+        assert_eq!(&Literal(" there!"), &instructions[1]);

+    }

+

+    #[test]

+    fn test_strip_whitespace_comment() {

+        let text = "Hello, \t\n    {#- foo bar baz -#} \t  there!";

+        let instructions = compile(text).unwrap();

+        assert_eq!(2, instructions.len());

+        assert_eq!(&Literal("Hello,"), &instructions[0]);

+        assert_eq!(&Literal("there!"), &instructions[1]);

+    }

+

+    #[test]

+    fn test_strip_whitespace_followed_by_another_tag() {

+        let text = "{value -}{value} Hello";

+        let instructions = compile(text).unwrap();

+        assert_eq!(3, instructions.len());

+        assert_eq!(&Value(vec![PathStep::Name("value")]), &instructions[0]);

+        assert_eq!(&Value(vec![PathStep::Name("value")]), &instructions[1]);

+        assert_eq!(&Literal(" Hello"), &instructions[2]);

+    }

+

+    #[test]

+    fn test_call() {

+        let text = "{{ call my_macro with foo.bar }}";

+        let instructions = compile(text).unwrap();

+        assert_eq!(1, instructions.len());

+        assert_eq!(

+            &Call(

+                "my_macro",

+                vec![PathStep::Name("foo"), PathStep::Name("bar")]

+            ),

+            &instructions[0]

+        );

+    }

+

+    #[test]

+    fn test_curly_brace_escaping() {

+        let text = "body \\{ \nfont-size: {fontsize} \n}";

+        let instructions = compile(text).unwrap();

+        assert_eq!(4, instructions.len());

+        assert_eq!(&Literal("body "), &instructions[0]);

+        assert_eq!(&Literal("{ \nfont-size: "), &instructions[1]);

+        assert_eq!(&Value(vec![PathStep::Name("fontsize")]), &instructions[2]);

+        assert_eq!(&Literal(" \n}"), &instructions[3]);

+    }

+

+    #[test]

+    fn test_unclosed_tags() {

+        let tags = vec![

+            "{",

+            "{ foo.bar",

+            "{ foo.bar\n }",

+            "{{",

+            "{{ if foo.bar",

+            "{{ if foo.bar \n}}",

+            "{#",

+            "{# if foo.bar",

+            "{# if foo.bar \n#}",

+        ];

+        for tag in tags {

+            compile(tag).unwrap_err();

+        }

+    }

+

+    #[test]

+    fn test_mismatched_blocks() {

+        let text = "{{ if foo }}{{ with bar }}{{ endif }} {{ endwith }}";

+        compile(text).unwrap_err();

+    }

+

+    #[test]

+    fn test_disallows_invalid_keywords() {

+        let text = "{ @foo }";

+        compile(text).unwrap_err();

+    }

+

+    #[test]

+    fn test_diallows_unknown_block_type() {

+        let text = "{{ foobar }}";

+        compile(text).unwrap_err();

+    }

+

+    #[test]

+    fn test_parse_error_line_column_num() {

+        let text = "\n\n\n{{ foobar }}";

+        let err = compile(text).unwrap_err();

+        if let ParseError { line, column, .. } = err {

+            assert_eq!(4, line);

+            assert_eq!(3, column);

+        } else {

+            panic!("Should have returned a parse error");

+        }

+    }

+

+    #[test]

+    fn test_parse_error_on_unclosed_if() {

+        let text = "{{ if foo }}";

+        compile(text).unwrap_err();

+    }

+

+    #[test]

+    fn test_parse_escaped_open_curly_brace() {

+        let text: &str = r"hello \{world}";

+        let instructions = compile(text).unwrap();

+        assert_eq!(2, instructions.len());

+        assert_eq!(&Literal("hello "), &instructions[0]);

+        assert_eq!(&Literal("{world}"), &instructions[1]);

+    }

+}

diff --git a/crates/tinytemplate/src/error.rs b/crates/tinytemplate/src/error.rs
new file mode 100755
index 0000000..730c648
--- /dev/null
+++ b/crates/tinytemplate/src/error.rs
@@ -0,0 +1,246 @@
+//! Module containing the error type returned by TinyTemplate if an error occurs.

+

+use instruction::{path_to_str, PathSlice};

+use serde_json::Error as SerdeJsonError;

+use serde_json::Value;

+use std::error::Error as StdError;

+use std::fmt;

+

+/// Enum representing the potential errors that TinyTemplate can encounter.

+#[derive(Debug)]

+pub enum Error {

+    ParseError {

+        msg: String,

+        line: usize,

+        column: usize,

+    },

+    RenderError {

+        msg: String,

+        line: usize,

+        column: usize,

+    },

+    SerdeError {

+        err: SerdeJsonError,

+    },

+    GenericError {

+        msg: String,

+    },

+    StdFormatError {

+        err: fmt::Error,

+    },

+    CalledTemplateError {

+        name: String,

+        err: Box<Error>,

+        line: usize,

+        column: usize,

+    },

+    CalledFormatterError {

+        name: String,

+        err: Box<Error>,

+        line: usize,

+        column: usize,

+    },

+

+    #[doc(hidden)]

+    __NonExhaustive,

+}

+impl From<SerdeJsonError> for Error {

+    fn from(err: SerdeJsonError) -> Error {

+        Error::SerdeError { err }

+    }

+}

+impl From<fmt::Error> for Error {

+    fn from(err: fmt::Error) -> Error {

+        Error::StdFormatError { err }

+    }

+}

+impl fmt::Display for Error {

+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {

+        match self {

+            Error::ParseError { msg, line, column } => write!(

+                f,

+                "Failed to parse the template (line {}, column {}). Reason: {}",

+                line, column, msg

+            ),

+            Error::RenderError { msg, line, column } => {

+                write!(

+                    f,

+                    "Encountered rendering error on line {}, column {}. Reason: {}",

+                    line, column, msg

+                )

+            }

+            Error::SerdeError { err } => {

+                write!(f, "Unexpected serde error while converting the context to a serde_json::Value. Error: {}", err)

+            }

+            Error::GenericError { msg } => {

+                write!(f, "{}", msg)

+            }

+            Error::StdFormatError { err } => {

+                write!(f, "Unexpected formatting error: {}", err)

+            }

+            Error::CalledTemplateError {

+                name,

+                err,

+                line,

+                column,

+            } => {

+                write!(

+                    f,

+                    "Call to sub-template \"{}\" on line {}, column {} failed. Reason: {}",

+                    name, line, column, err

+                )

+            }

+            Error::CalledFormatterError {

+                name,

+                err,

+                line,

+                column,

+            } => {

+                write!(

+                    f,

+                    "Call to value formatter \"{}\" on line {}, column {} failed. Reason: {}",

+                    name, line, column, err

+                )

+            }

+            Error::__NonExhaustive => unreachable!(),

+        }

+    }

+}

+impl StdError for Error {

+    fn description(&self) -> &str {

+        match self {

+            Error::ParseError { .. } => "ParseError",

+            Error::RenderError { .. } => "RenderError",

+            Error::SerdeError { .. } => "SerdeError",

+            Error::GenericError { msg } => &msg,

+            Error::StdFormatError { .. } => "StdFormatError",

+            Error::CalledTemplateError { .. } => "CalledTemplateError",

+            Error::CalledFormatterError { .. } => "CalledFormatterError",

+            Error::__NonExhaustive => unreachable!(),

+        }

+    }

+}

+

+pub type Result<T> = ::std::result::Result<T, Error>;

+

+pub(crate) fn lookup_error(source: &str, step: &str, path: PathSlice, current: &Value) -> Error {

+    let avail_str = if let Value::Object(object_map) = current {

+        let mut avail_str = " Available values at this level are ".to_string();

+        for (i, key) in object_map.keys().enumerate() {

+            if i > 0 {

+                avail_str.push_str(", ");

+            }

+            avail_str.push('\'');

+            avail_str.push_str(key);

+            avail_str.push('\'');

+        }

+        avail_str

+    } else {

+        "".to_string()

+    };

+

+    let (line, column) = get_offset(source, step);

+

+    Error::RenderError {

+        msg: format!(

+            "Failed to find value '{}' from path '{}'.{}",

+            step,

+            path_to_str(path),

+            avail_str

+        ),

+        line,

+        column,

+    }

+}

+

+pub(crate) fn truthiness_error(source: &str, path: PathSlice) -> Error {

+    let (line, column) = get_offset(source, path.last().unwrap());

+    Error::RenderError {

+        msg: format!(

+            "Path '{}' produced a value which could not be checked for truthiness.",

+            path_to_str(path)

+        ),

+        line,

+        column,

+    }

+}

+

+pub(crate) fn unprintable_error() -> Error {

+    Error::GenericError {

+        msg: "Expected a printable value but found array or object.".to_string(),

+    }

+}

+

+pub(crate) fn not_iterable_error(source: &str, path: PathSlice) -> Error {

+    let (line, column) = get_offset(source, path.last().unwrap());

+    Error::RenderError {

+        msg: format!(

+            "Expected an array for path '{}' but found a non-iterable value.",

+            path_to_str(path)

+        ),

+        line,

+        column,

+    }

+}

+

+pub(crate) fn unknown_template(source: &str, name: &str) -> Error {

+    let (line, column) = get_offset(source, name);

+    Error::RenderError {

+        msg: format!("Tried to call an unknown template '{}'", name),

+        line,

+        column,

+    }

+}

+

+pub(crate) fn unknown_formatter(source: &str, name: &str) -> Error {

+    let (line, column) = get_offset(source, name);

+    Error::RenderError {

+        msg: format!("Tried to call an unknown formatter '{}'", name),

+        line,

+        column,

+    }

+}

+

+pub(crate) fn called_template_error(source: &str, template_name: &str, err: Error) -> Error {

+    let (line, column) = get_offset(source, template_name);

+    Error::CalledTemplateError {

+        name: template_name.to_string(),

+        err: Box::new(err),

+        line,

+        column,

+    }

+}

+

+pub(crate) fn called_formatter_error(source: &str, formatter_name: &str, err: Error) -> Error {

+    let (line, column) = get_offset(source, formatter_name);

+    Error::CalledFormatterError {

+        name: formatter_name.to_string(),

+        err: Box::new(err),

+        line,

+        column,

+    }

+}

+

+/// Find the line number and column of the target string within the source string. Will panic if

+/// target is not a substring of source.

+pub(crate) fn get_offset(source: &str, target: &str) -> (usize, usize) {

+    let offset = target.as_ptr() as isize - source.as_ptr() as isize;

+    let to_scan = &source[0..(offset as usize)];

+

+    let mut line = 1;

+    let mut column = 0;

+

+    for byte in to_scan.bytes() {

+        match byte as char {

+            '\n' => {

+                line += 1;

+                column = 0;

+            }

+            _ => {

+                column += 1;

+            }

+        }

+    }

+

+    (line, column)

+}

diff --git a/crates/tinytemplate/src/instruction.rs b/crates/tinytemplate/src/instruction.rs
new file mode 100755
index 0000000..0e19814
--- /dev/null
+++ b/crates/tinytemplate/src/instruction.rs
@@ -0,0 +1,85 @@
+use std::ops::Deref;

+

+/// TinyTemplate implements a simple bytecode interpreter for its template engine. Instructions

+/// for this interpreter are represented by the Instruction enum and typically contain various

+/// parameters such as the path to context values or name strings.

+///

+/// In TinyTemplate, the template string itself is assumed to be statically available (or at least

+/// longer-lived than the TinyTemplate instance) so paths and instructions simply borrow string

+/// slices from the template text. These string slices can then be appended directly to the output

+/// string.

+

+/// Enum for a step in a path which optionally contains a parsed index.

+#[derive(Eq, PartialEq, Debug, Clone)]

+pub(crate) enum PathStep<'template> {

+    Name(&'template str),

+    Index(&'template str, usize),

+}

+impl<'template> Deref for PathStep<'template> {

+    type Target = str;

+

+    fn deref(&self) -> &Self::Target {

+        match self {

+            PathStep::Name(s) => s,

+            PathStep::Index(s, _) => s,

+        }

+    }

+}

+

+/// Sequence of named steps used for looking up values in the context

+pub(crate) type Path<'template> = Vec<PathStep<'template>>;

+

+/// Path, but as a slice.

+pub(crate) type PathSlice<'a, 'template> = &'a [PathStep<'template>];

+

+/// Enum representing the bytecode instructions.

+#[derive(Eq, PartialEq, Debug, Clone)]

+pub(crate) enum Instruction<'template> {

+    /// Emit a literal string into the output buffer

+    Literal(&'template str),

+

+    /// Look up the value for the given path and render it into the output buffer using the default

+    /// formatter

+    Value(Path<'template>),

+

+    /// Look up the value for the given path and pass it to the formatter with the given name

+    FormattedValue(Path<'template>, &'template str),

+

+    /// Look up the value at the given path and jump to the given instruction index if that value

+    /// is truthy (if the boolean is true) or falsy (if the boolean is false)

+    Branch(Path<'template>, bool, usize),

+

+    /// Push a named context on the stack, shadowing only that name.

+    PushNamedContext(Path<'template>, &'template str),

+

+    /// Push an iteration context on the stack, shadowing the given name with the current value from

+    /// the vec pointed to by the path. The current value will be updated by the Iterate instruction.

+    /// This is always generated before an Iterate instruction which actually starts the iterator.

+    PushIterationContext(Path<'template>, &'template str),

+

+    /// Pop a context off the stack

+    PopContext,

+

+    /// Advance the topmost iterator on the context stack by one and update that context. If the

+    /// iterator is empty, jump to the given instruction.

+    Iterate(usize),

+

+    /// Unconditionally jump to the given instruction. Used to skip else blocks and repeat loops.

+    Goto(usize),

+

+    /// Look up the named template and render it into the output buffer with the value pointed to

+    /// by the path as its context.

+    Call(&'template str, Path<'template>),

+}

+

+/// Convert a path back into a dotted string.

+pub(crate) fn path_to_str(path: PathSlice) -> String {

+    let mut path_str = "".to_string();

+    for (i, step) in path.iter().enumerate() {

+        if i > 0 {

+            path_str.push('.');

+        }

+        path_str.push_str(step);

+    }

+    path_str

+}

diff --git a/crates/tinytemplate/src/lib.rs b/crates/tinytemplate/src/lib.rs
new file mode 100755
index 0000000..396be21
--- /dev/null
+++ b/crates/tinytemplate/src/lib.rs
@@ -0,0 +1,260 @@
+//! ## TinyTemplate
+//!
+//! TinyTemplate is a minimal templating library originally designed for use in [Criterion.rs].
+//! It deliberately does not provide all of the features of a full-power template engine, but in
+//! return it provides a simple API, clear templating syntax, decent performance and very few
+//! dependencies.
+//!
+//! ## Features
+//!
+//! The most important features are as follows (see the [syntax](syntax/index.html) module for full
+//! details on the template syntax):
+//!
+//! * Rendering values - `{ myvalue }`
+//! * Conditionals - `{{ if foo }}Foo is true{{ else }}Foo is false{{ endif }}`
+//! * Loops - `{{ for value in row }}{value}{{ endfor }}`
+//! * Customizable value formatters `{ value | my_formatter }`
+//! * Macros `{{ call my_template with foo }}`
+//!
+//! ## Restrictions
+//!
+//! TinyTemplate was designed with the assumption that the templates are available as static strings,
+//! either using string literals or the `include_str!` macro. Thus, it borrows `&str` slices from the
+//! template text itself and uses them during the rendering process. Although it is possible to use
+//! TinyTemplate with template strings loaded at runtime, this is not recommended.
+//!
+//! Additionally, TinyTemplate can only render templates into Strings. If you need to render a
+//! template directly to a socket or file, TinyTemplate may not be right for you.
+//!
+//! ## Example
+//!
+//! ```
+//! #[macro_use]
+//! extern crate serde_derive;
+//! extern crate tinytemplate;
+//!
+//! use tinytemplate::TinyTemplate;
+//! use std::error::Error;
+//!
+//! #[derive(Serialize)]
+//! struct Context {
+//!     name: String,
+//! }
+//!
+//! static TEMPLATE : &'static str = "Hello {name}!";
+//!
+//! pub fn main() -> Result<(), Box<Error>> {
+//!     let mut tt = TinyTemplate::new();
+//!     tt.add_template("hello", TEMPLATE)?;
+//!
+//!     let context = Context {
+//!         name: "World".to_string(),
+//!     };
+//!
+//!     let rendered = tt.render("hello", &context)?;
+//! #   assert_eq!("Hello World!", &rendered);
+//!     println!("{}", rendered);
+//!
+//!     Ok(())
+//! }
+//! ```
+//!
+//! [Criterion.rs]: https://github.com/bheisler/criterion.rs
+//!
+
+extern crate serde;
+extern crate serde_json;
+
+#[cfg(test)]
+#[cfg_attr(test, macro_use)]
+extern crate serde_derive;
+
+mod compiler;
+pub mod error;
+mod instruction;
+pub mod syntax;
+mod template;
+
+use error::*;
+use serde::Serialize;
+use serde_json::Value;
+use std::collections::HashMap;
+use std::fmt::Write;
+use template::Template;
+
+/// Type alias for closures which can be used as value formatters.
+pub type ValueFormatter = dyn Fn(&Value, &mut String) -> Result<()>;
+
+/// Appends `value` to `output`, performing HTML-escaping in the process.
+pub fn escape(value: &str, output: &mut String) {
+    // Algorithm taken from the rustdoc source code.
+    let value_str = value;
+    let mut last_emitted = 0;
+    for (i, ch) in value.bytes().enumerate() {
+        match ch as char {
+            '<' | '>' | '&' | '\'' | '"' => {
+                output.push_str(&value_str[last_emitted..i]);
+                let s = match ch as char {
+                    '>' => "&gt;",
+                    '<' => "&lt;",
+                    '&' => "&amp;",
+                    '\'' => "&#39;",
+                    '"' => "&quot;",
+                    _ => unreachable!(),
+                };
+                output.push_str(s);
+                last_emitted = i + 1;
+            }
+            _ => {}
+        }
+    }
+
+    if last_emitted < value_str.len() {
+        output.push_str(&value_str[last_emitted..]);
+    }
+}
+
+/// The format function is used as the default value formatter for all values unless the user
+/// specifies another. It is provided publicly so that it can be called as part of custom formatters.
+/// Values are formatted as follows:
+///
+/// * `Value::Null` => the empty string
+/// * `Value::Bool` => true|false
+/// * `Value::Number` => the number, as formatted by `serde_json`.
+/// * `Value::String` => the string, HTML-escaped
+///
+/// Arrays and objects are not formatted, and attempting to do so will result in a rendering error.
+pub fn format(value: &Value, output: &mut String) -> Result<()> {
+    match value {
+        Value::Null => Ok(()),
+        Value::Bool(b) => {
+            write!(output, "{}", b)?;
+            Ok(())
+        }
+        Value::Number(n) => {
+            write!(output, "{}", n)?;
+            Ok(())
+        }
+        Value::String(s) => {
+            escape(s, output);
+            Ok(())
+        }
+        _ => Err(unprintable_error()),
+    }
+}
+
+/// Identical to [`format`](fn.format.html) except that this does not perform HTML escaping.
+pub fn format_unescaped(value: &Value, output: &mut String) -> Result<()> {
+    match value {
+        Value::Null => Ok(()),
+        Value::Bool(b) => {
+            write!(output, "{}", b)?;
+            Ok(())
+        }
+        Value::Number(n) => {
+            write!(output, "{}", n)?;
+            Ok(())
+        }
+        Value::String(s) => {
+            output.push_str(s);
+            Ok(())
+        }
+        _ => Err(unprintable_error()),
+    }
+}
+
+/// The TinyTemplate struct is the entry point for the TinyTemplate library. It contains the
+/// template and formatter registries and provides functions to render templates as well as to
+/// register templates and formatters.
+pub struct TinyTemplate<'template> {
+    templates: HashMap<&'template str, Template<'template>>,
+    formatters: HashMap<&'template str, Box<ValueFormatter>>,
+    default_formatter: &'template ValueFormatter,
+}
+impl<'template> TinyTemplate<'template> {
+    /// Create a new TinyTemplate registry. The returned registry contains no templates, and has
+    /// [`format_unescaped`](fn.format_unescaped.html) registered as a formatter named "unescaped".
+    pub fn new() -> TinyTemplate<'template> {
+        let mut tt = TinyTemplate {
+            templates: HashMap::default(),
+            formatters: HashMap::default(),
+            default_formatter: &format,
+        };
+        tt.add_formatter("unescaped", format_unescaped);
+        tt
+    }
+
+    /// Parse and compile the given template, then register it under the given name.
+    pub fn add_template(&mut self, name: &'template str, text: &'template str) -> Result<()> {
+        let template = Template::compile(text)?;
+        self.templates.insert(name, template);
+        Ok(())
+    }
+
+    /// Changes the default formatter from [`format`](fn.format.html) to `formatter`. Usefull in combination with [`format_unescaped`](fn.format_unescaped.html) to deactivate HTML-escaping
+    pub fn set_default_formatter<F>(&mut self, formatter: &'template F)
+    where
+        F: 'static + Fn(&Value, &mut String) -> Result<()>,
+    {
+        self.default_formatter = formatter;
+    }
+
+    /// Register the given formatter function under the given name.
+    pub fn add_formatter<F>(&mut self, name: &'template str, formatter: F)
+    where
+        F: 'static + Fn(&Value, &mut String) -> Result<()>,
+    {
+        self.formatters.insert(name, Box::new(formatter));
+    }
+
+    /// Render the template with the given name using the given context object. The context
+    /// object must implement `serde::Serialize` as it will be converted to `serde_json::Value`.
+    pub fn render<C>(&self, template: &str, context: &C) -> Result<String>
+    where
+        C: Serialize,
+    {
+        let value = serde_json::to_value(context)?;
+        match self.templates.get(template) {
+            Some(tmpl) => tmpl.render(
+                &value,
+                &self.templates,
+                &self.formatters,
+                self.default_formatter,
+            ),
+            None => Err(Error::GenericError {
+                msg: format!("Unknown template '{}'", template),
+            }),
+        }
+    }
+}
+impl<'template> Default for TinyTemplate<'template> {
+    fn default() -> TinyTemplate<'template> {
+        TinyTemplate::new()
+    }
+}
+
+#[cfg(test)]
+mod test {
+    use super::*;
+
+    #[derive(Serialize)]
+    struct Context {
+        name: String,
+    }
+
+    static TEMPLATE: &'static str = "Hello {name}!";
+
+    #[test]
+    pub fn test_set_default_formatter() {
+        let mut tt = TinyTemplate::new();
+        tt.add_template("hello", TEMPLATE).unwrap();
+        tt.set_default_formatter(&format_unescaped);
+
+        let context = Context {
+            name: "<World>".to_string(),
+        };
+
+        let rendered = tt.render("hello", &context).unwrap();
+        assert_eq!(rendered, "Hello <World>!")
+    }
+}
diff --git a/crates/tinytemplate/src/syntax.rs b/crates/tinytemplate/src/syntax.rs
new file mode 100755
index 0000000..2e8eedd
--- /dev/null
+++ b/crates/tinytemplate/src/syntax.rs
@@ -0,0 +1,184 @@
+//! Documentation of TinyTemplate's template syntax.
+//!
+//! ### Context Types
+//!
+//! TinyTemplate uses `serde_json`'s Value structure to represent the context. Therefore, any
+//! `Serializable` structure can be used as a context. All values in such structures are mapped to
+//! their JSON representations - booleans, numbers, strings, arrays, objects and nulls.
+//!
+//! ### Values
+//!
+//! Template values are marked with `{...}`. For example, this will look up the "name" field in
+//! the context structure and insert it into the rendered string:
+//!
+//! ```text
+//! Hello, {name}, how are you?
+//! ```
+//!
+//! Optionally, a value formatter may be provided. One formatter, "unescaped", is provided by
+//! default. Any other formatters must be registered with the
+//! [`TinyTemplate.add_formatter`](../struct.TinyTemplate.html#method.add_formatter)
+//! function prior to rendering or an error will be generated. This will call the formatter function
+//! registered as "percent_formatter" with the value of the "percentage" field:
+//!
+//! ```text
+//! Give it {percentage | percent_formatter}!
+//! ```
+//!
+//! The value may be a dotted path through a hierarchy of context objects. This will look up the
+//! "friend" field in the context structure, then substitute the "name" field from the "friend"
+//! object.
+//!
+//! ```text
+//! And hello to {friend.name} as well!
+//! ```
+//!
+//! Additionally, you may use the `@root` keyword to refer to the root object of your context.
+//! Since TinyTemplate can't normally print complex context objects, this is only useful if the
+//! context is a simple object like an integer or string.
+//!
+//! ### Conditionals
+//!
+//! TinyTemplate blocks are marked with `{{...}}` - double-braces where values are single-braces.
+//!
+//! Conditionals are denoted by "{{ if path }}...{{ else }}...{{ endif }}". The Else block is
+//! optional. Else-if is not currently supported. If "path" evaluates to a truthy expression
+//! (true if boolean, non-zero if numeric, non-empty for strings and arrays, and non-null for
+//! objects) then the section of the template between "if" and "else" is evaluated, otherwise the
+//! section between "else" and "endif" (if present) is evaluated.
+//!
+//! ```text
+//! {{ if user.is_birthday }}
+//! Happy Birthday!
+//! {{ else }}
+//! Have a nice day!
+//! {{ endif }}
+//! ```
+//!
+//! The condition can be negated by using "{{ if not path }}":
+//!
+//! ```text
+//! {{ if not user.is_birthday }}
+//! Have a nice day!
+//! {{ else }}
+//! Happy Birthday!
+//! {{ endif }}
+//! ```
+//!
+//! If desired, the `@root` keyword can be used to branch on the root context object.
+//!
+//! ### Loops
+//!
+//! TinyTemplate supports iterating over the values of arrays. Only arrays are supported. Loops
+//! are denoted by "{{ for value_name in value.path }}...{{ endfor }}". The section of the template between
+//! the two tags will be executed once for each value in the array denoted by "value.path".
+//!
+//! ```text
+//! Hello to {{ for name in guests }}
+//! {name}
+//! {{ endfor }}
+//! ```
+//!
+//! If the iteration value chosen in the "for" tag is the same as that of a regular context value,
+//! the name in the tag will shadow the context value for the scope of the loop. For nested loops,
+//! inner loops will shadow the values of outer loops.
+//!
+//! ```text
+//! {{ for person in guests }}
+//! Hello to {person}{{ for person in person.friends }} and your friend {person}{{ endfor }}
+//! {{ endfor }}
+//! ```
+//!
+//! There are three special values which are available within a loop:
+//!
+//! * `@index` - zero-based index of the current value within the array.
+//! * `@first` - true if this is the first iteration of the loop, otherwise false.
+//! * `@last` - true if this is the last iteration of the loop, otherwise false.
+//!
+//! ```text
+//! Hello to {{ for name in guests -}}
+//! { @index }. {name},
+//! {{- endfor }}
+//! ```
+//!
+//!
+//! In case of nested loops, these values refer to the innermost loop which contains them.
+//!
+//! If the root context object is an array, the `@root` keyword can be used to iterate over the
+//! root object.
+//!
+//! ### With Blocks
+//!
+//! Templates can use with blocks to partially shadows the outer context, the same way that
+//! for-loops do. These are formed like so:
+//!
+//! "{{ with path.to.value as name }}..{{ endwith }}""
+//!
+//! For example:
+//!
+//! ```text
+//! {{ with person.spouse as s }}
+//! Hello { s.name }!
+//! {{ endwith }}
+//! ```
+//!
+//! This looks up "person.spouse" and adds that to the context as "s" within the block. Only the
+//! name "s" is shadowed within the with block and otherwise the outer context is still accessible.
+//!
+//! ### Trimming Whitespace
+//!
+//! If a block tag, comment or value tag includes a "-" character at the start, the trailing
+//! whitespace of the previous text section will be skipped in the output. Likewise, if the tag
+//! ends with a "-", the leading whitespace of the following text will be skipped.
+//!
+//! ```text
+//! Hello { friend.name -}
+//! , how are you?
+//!
+//! {{- if status.good }} I am fine.               {{- endif }}
+//! ```
+//!
+//! This will print "Hello friend, how are you? I am fine." without the newlines or extra spaces.
+//!
+//! ### Calling other Templates
+//!
+//! Templates may call other templates by name. The other template must have been registered using
+//! the [`TinyTemplate.add_template`](../struct.TinyTemplate.html#method.add_template) function
+//! before rendering or an error will be generated. This is done with the "call" tag:
+//!
+//! "{{ call template_name with path.to.context }}"
+//!
+//! The call tag has no closing tag. This will look up the "path.to.context" path in the current
+//! context, then render the "template_name" template using the value at that path as the context
+//! for the other template. The string produced by the called template is then inserted into the
+//! output from the calling template. This can be used for a limited form of template code reuse.
+//!
+//! ### Comments
+//!
+//! Comments in the templates are denoted by "{# comment text #}". Comments will be skipped when
+//! rendering the template, though whitespace adjacent to comments will not be stripped unless the
+//! "-" is added. For example:
+//!
+//! ```text
+//! Hello
+//!
+//! {#- This is a comment #} world!
+//! ```
+//!
+//! This will print "Hello world!".
+//!
+//! ### Escaping Curly Braces
+//!
+//! If your template contains opening curly-braces (`{`), they must be escaped using a leading `\`
+//! character. For example:
+//!
+//! ```text
+//! h2 \{
+//!     font-size: {fontsize};
+//! }
+//! ```
+//!
+//! If using a string literal in rust source code, the `\` itself must be escaped, producing `\\{`.
+//!
+
+// There's nothing here, this module is solely for documentation.
diff --git a/crates/tinytemplate/src/template.rs b/crates/tinytemplate/src/template.rs
new file mode 100755
index 0000000..6f0162d
--- /dev/null
+++ b/crates/tinytemplate/src/template.rs
@@ -0,0 +1,944 @@
+//! This module implements the bytecode interpreter that actually renders the templates.
+
+use compiler::TemplateCompiler;
+use error::Error::*;
+use error::*;
+use instruction::{Instruction, PathSlice, PathStep};
+use serde_json::Value;
+use std::collections::HashMap;
+use std::fmt::Write;
+use std::slice;
+use ValueFormatter;
+
+/// Enum defining the different kinds of records on the context stack.
+enum ContextElement<'render, 'template> {
+    /// Object contexts shadow everything below them on the stack, because every name is looked up
+    /// in this object.
+    Object(&'render Value),
+    /// Named contexts shadow only one name. Any path that starts with that name is looked up in
+    /// this object, and all others are passed on down the stack.
+    Named(&'template str, &'render Value),
+    /// Iteration contexts shadow one name with the current value of the iteration. They also
+    /// store the iteration state. The two usizes are the index of the current value and the length
+    /// of the array that we're iterating over.
+    Iteration(
+        &'template str,
+        &'render Value,
+        usize,
+        usize,
+        slice::Iter<'render, Value>,
+    ),
+}
+
+/// Helper struct which mostly exists so that I have somewhere to put functions that access the
+/// rendering context stack.
+struct RenderContext<'render, 'template> {
+    original_text: &'template str,
+    context_stack: Vec<ContextElement<'render, 'template>>,
+}
+impl<'render, 'template> RenderContext<'render, 'template> {
+    /// Look up the given path in the context stack and return the value (if found) or an error (if
+    /// not)
+    fn lookup(&self, path: PathSlice) -> Result<&'render Value> {
+        for stack_layer in self.context_stack.iter().rev() {
+            match stack_layer {
+                ContextElement::Object(obj) => return self.lookup_in(path, obj),
+                ContextElement::Named(name, obj) => {
+                    if *name == &*path[0] {
+                        return self.lookup_in(&path[1..], obj);
+                    }
+                }
+                ContextElement::Iteration(name, obj, _, _, _) => {
+                    if *name == &*path[0] {
+                        return self.lookup_in(&path[1..], obj);
+                    }
+                }
+            }
+        }
+        panic!("Attempted to do a lookup with an empty context stack. That shouldn't be possible.")
+    }
+
+    /// Look up a path within a given value object and return the resulting value (if found) or
+    /// an error (if not)
+    fn lookup_in(&self, path: PathSlice, object: &'render Value) -> Result<&'render Value> {
+        let mut current = object;
+        for step in path.iter() {
+            if let PathStep::Index(_, n) = step {
+                if let Some(next) = current.get(n) {
+                    current = next;
+                    continue;
+                }
+            }
+
+            let step: &str = &*step;
+
+            match current.get(step) {
+                Some(next) => current = next,
+                None => return Err(lookup_error(self.original_text, step, path, current)),
+            }
+        }
+        Ok(current)
+    }
+
+    /// Look up the index and length values for the top iteration context on the stack.
+    fn lookup_index(&self) -> Result<(usize, usize)> {
+        for stack_layer in self.context_stack.iter().rev() {
+            match stack_layer {
+                ContextElement::Iteration(_, _, index, length, _) => return Ok((*index, *length)),
+                _ => continue,
+            }
+        }
+        Err(GenericError {
+            msg: "Used @index outside of a foreach block.".to_string(),
+        })
+    }
+
+    /// Look up the root context object
+    fn lookup_root(&self) -> Result<&'render Value> {
+        match self.context_stack.get(0) {
+            Some(ContextElement::Object(obj)) => Ok(obj),
+            Some(_) => {
+                panic!("Expected Object value at root of context stack, but was something else.")
+            }
+            None => panic!(
+                "Attempted to do a lookup with an empty context stack. That shouldn't be possible."
+            ),
+        }
+    }
+}
+
+/// Structure representing a parsed template. It holds the bytecode program for rendering the
+/// template as well as the length of the original template string, which is used as a guess to
+/// pre-size the output string buffer.
+pub(crate) struct Template<'template> {
+    original_text: &'template str,
+    instructions: Vec<Instruction<'template>>,
+    template_len: usize,
+}
+impl<'template> Template<'template> {
+    /// Create a Template from the given template string.
+    pub fn compile(text: &'template str) -> Result<Template> {
+        Ok(Template {
+            original_text: text,
+            template_len: text.len(),
+            instructions: TemplateCompiler::new(text).compile()?,
+        })
+    }
+
+    /// Render this template into a string and return it (or any error if one is encountered).
+    pub fn render(
+        &self,
+        context: &Value,
+        template_registry: &HashMap<&str, Template>,
+        formatter_registry: &HashMap<&str, Box<ValueFormatter>>,
+        default_formatter: &ValueFormatter,
+    ) -> Result<String> {
+        // The length of the original template seems like a reasonable guess at the length of the
+        // output.
+        let mut output = String::with_capacity(self.template_len);
+        self.render_into(
+            context,
+            template_registry,
+            formatter_registry,
+            default_formatter,
+            &mut output,
+        )?;
+        Ok(output)
+    }
+
+    /// Render this template into a given string. Used for calling other templates.
+    pub fn render_into(
+        &self,
+        context: &Value,
+        template_registry: &HashMap<&str, Template>,
+        formatter_registry: &HashMap<&str, Box<ValueFormatter>>,
+        default_formatter: &ValueFormatter,
+        output: &mut String,
+    ) -> Result<()> {
+        let mut program_counter = 0;
+        let mut render_context = RenderContext {
+            original_text: self.original_text,
+            context_stack: vec![ContextElement::Object(context)],
+        };
+
+        while program_counter < self.instructions.len() {
+            match &self.instructions[program_counter] {
+                Instruction::Literal(text) => {
+                    output.push_str(text);
+                    program_counter += 1;
+                }
+                Instruction::Value(path) => {
+                    let first = path.first().unwrap();
+                    if first.starts_with('@') {
+                        // Currently we just hard-code the special @-keywords and have special
+                        // lookup functions to use them because there are lifetime complexities with
+                        // looking up values that don't live for as long as the given context object.
+                        let first: &str = &*first;
+                        match first {
+                            "@index" => {
+                                write!(output, "{}", render_context.lookup_index()?.0).unwrap()
+                            }
+                            "@first" => {
+                                write!(output, "{}", render_context.lookup_index()?.0 == 0).unwrap()
+                            }
+                            "@last" => {
+                                let (index, length) = render_context.lookup_index()?;
+                                write!(output, "{}", index == length - 1).unwrap()
+                            }
+                            "@root" => {
+                                let value_to_render = render_context.lookup_root()?;
+                                default_formatter(value_to_render, output)?;
+                            }
+                            _ => panic!(), // This should have been caught by the parser.
+                        }
+                    } else {
+                        let value_to_render = render_context.lookup(path)?;
+                        default_formatter(value_to_render, output)?;
+                    }
+                    program_counter += 1;
+                }
+                Instruction::FormattedValue(path, name) => {
+                    // The @ keywords aren't supported for formatted values. Should they be?
+                    let value_to_render = render_context.lookup(path)?;
+                    match formatter_registry.get(name) {
+                        Some(formatter) => {
+                            let formatter_result = formatter(value_to_render, output);
+                            if let Err(err) = formatter_result {
+                                return Err(called_formatter_error(self.original_text, name, err));
+                            }
+                        }
+                        None => return Err(unknown_formatter(self.original_text, name)),
+                    }
+                    program_counter += 1;
+                }
+                Instruction::Branch(path, negate, target) => {
+                    let first = path.first().unwrap();
+                    let mut truthy = if first.starts_with('@') {
+                        let first: &str = &*first;
+                        match &*first {
+                            "@index" => render_context.lookup_index()?.0 != 0,
+                            "@first" => render_context.lookup_index()?.0 == 0,
+                            "@last" => {
+                                let (index, length) = render_context.lookup_index()?;
+                                index == (length - 1)
+                            }
+                            "@root" => self.value_is_truthy(render_context.lookup_root()?, path)?,
+                            other => panic!("Unknown keyword {}", other), // This should have been caught by the parser.
+                        }
+                    } else {
+                        let value_to_render = render_context.lookup(path)?;
+                        self.value_is_truthy(value_to_render, path)?
+                    };
+                    if *negate {
+                        truthy = !truthy;
+                    }
+
+                    if truthy {
+                        program_counter = *target;
+                    } else {
+                        program_counter += 1;
+                    }
+                }
+                Instruction::PushNamedContext(path, name) => {
+                    let context_value = render_context.lookup(path)?;
+                    render_context
+                        .context_stack
+                        .push(ContextElement::Named(name, context_value));
+                    program_counter += 1;
+                }
+                Instruction::PushIterationContext(path, name) => {
+                    // We push a context with an invalid index and no value and then wait for the
+                    // following Iterate instruction to set the index and value properly.
+                    let first = path.first().unwrap();
+                    let context_value = match first {
+                        PathStep::Name("@root") => render_context.lookup_root()?,
+                        PathStep::Name(other) if other.starts_with('@') => {
+                            return Err(not_iterable_error(self.original_text, path))
+                        }
+                        _ => render_context.lookup(path)?,
+                    };
+                    match context_value {
+                        Value::Array(ref arr) => {
+                            render_context.context_stack.push(ContextElement::Iteration(
+                                name,
+                                &Value::Null,
+                                ::std::usize::MAX,
+                                arr.len(),
+                                arr.iter(),
+                            ))
+                        }
+                        _ => return Err(not_iterable_error(self.original_text, path)),
+                    };
+                    program_counter += 1;
+                }
+                Instruction::PopContext => {
+                    render_context.context_stack.pop();
+                    program_counter += 1;
+                }
+                Instruction::Goto(target) => {
+                    program_counter = *target;
+                }
+                Instruction::Iterate(target) => {
+                    match render_context.context_stack.last_mut() {
+                        Some(ContextElement::Iteration(_, val, index, _, iter)) => {
+                            match iter.next() {
+                                Some(new_val) => {
+                                    *val = new_val;
+                                    // On the first iteration, this will be usize::MAX so it will
+                                    // wrap around to zero.
+                                    *index = index.wrapping_add(1);
+                                    program_counter += 1;
+                                }
+                                None => {
+                                    program_counter = *target;
+                                }
+                            }
+                        }
+                        _ => panic!("Malformed program."),
+                    };
+                }
+                Instruction::Call(template_name, path) => {
+                    let context_value = render_context.lookup(path)?;
+                    match template_registry.get(template_name) {
+                        Some(templ) => {
+                            let called_templ_result = templ.render_into(
+                                context_value,
+                                template_registry,
+                                formatter_registry,
+                                default_formatter,
+                                output,
+                            );
+                            if let Err(err) = called_templ_result {
+                                return Err(called_template_error(
+                                    self.original_text,
+                                    template_name,
+                                    err,
+                                ));
+                            }
+                        }
+                        None => return Err(unknown_template(self.original_text, template_name)),
+                    }
+                    program_counter += 1;
+                }
+            }
+        }
+        Ok(())
+    }
+
+    fn value_is_truthy(&self, value: &Value, path: PathSlice) -> Result<bool> {
+        let truthy = match value {
+            Value::Null => false,
+            Value::Bool(b) => *b,
+            Value::Number(n) => match n.as_f64() {
+                Some(float) => float != 0.0,
+                None => {
+                    return Err(truthiness_error(self.original_text, path));
+                }
+            },
+            Value::String(s) => !s.is_empty(),
+            Value::Array(arr) => !arr.is_empty(),
+            Value::Object(_) => true,
+        };
+        Ok(truthy)
+    }
+}
+
+#[cfg(test)]
+mod test {
+    use super::*;
+    use compiler::TemplateCompiler;
+
+    fn compile(text: &'static str) -> Template<'static> {
+        Template {
+            original_text: text,
+            template_len: text.len(),
+            instructions: TemplateCompiler::new(text).compile().unwrap(),
+        }
+    }
+
+    #[derive(Serialize)]
+    struct NestedContext {
+        value: usize,
+    }
+
+    #[derive(Serialize)]
+    struct TestContext {
+        number: usize,
+        string: &'static str,
+        boolean: bool,
+        null: Option<usize>,
+        array: Vec<usize>,
+        nested: NestedContext,
+        escapes: &'static str,
+    }
+
+    fn context() -> Value {
+        let ctx = TestContext {
+            number: 5,
+            string: "test",
+            boolean: true,
+            null: None,
+            array: vec![1, 2, 3],
+            nested: NestedContext { value: 10 },
+            escapes: "1:< 2:> 3:& 4:' 5:\"",
+        };
+        ::serde_json::to_value(&ctx).unwrap()
+    }
+
+    fn other_templates() -> HashMap<&'static str, Template<'static>> {
+        let mut map = HashMap::new();
+        map.insert("my_macro", compile("{value}"));
+        map
+    }
+
+    fn format(value: &Value, output: &mut String) -> Result<()> {
+        output.push_str("{");
+        ::format(value, output)?;
+        output.push_str("}");
+        Ok(())
+    }
+
+    fn formatters() -> HashMap<&'static str, Box<ValueFormatter>> {
+        let mut map = HashMap::<&'static str, Box<ValueFormatter>>::new();
+        map.insert("my_formatter", Box::new(format));
+        map
+    }
+
+    pub fn default_formatter() -> &'static ValueFormatter {
+        &::format
+    }
+
+    #[test]
+    fn test_literal() {
+        let template = compile("Hello!");
+        let context = context();
+        let template_registry = other_templates();
+        let formatter_registry = formatters();
+        let string = template
+            .render(
+                &context,
+                &template_registry,
+                &formatter_registry,
+                &default_formatter(),
+            )
+            .unwrap();
+        assert_eq!("Hello!", &string);
+    }
+
+    #[test]
+    fn test_value() {
+        let template = compile("{ number }");
+        let context = context();
+        let template_registry = other_templates();
+        let formatter_registry = formatters();
+        let string = template
+            .render(
+                &context,
+                &template_registry,
+                &formatter_registry,
+                &default_formatter(),
+            )
+            .unwrap();
+        assert_eq!("5", &string);
+    }
+
+    #[test]
+    fn test_path() {
+        let template = compile("The number of the day is { nested.value }.");
+        let context = context();
+        let template_registry = other_templates();
+        let formatter_registry = formatters();
+        let string = template
+            .render(
+                &context,
+                &template_registry,
+                &formatter_registry,
+                &default_formatter(),
+            )
+            .unwrap();
+        assert_eq!("The number of the day is 10.", &string);
+    }
+
+    #[test]
+    fn test_if_taken() {
+        let template = compile("{{ if boolean }}Hello!{{ endif }}");
+        let context = context();
+        let template_registry = other_templates();
+        let formatter_registry = formatters();
+        let string = template
+            .render(
+                &context,
+                &template_registry,
+                &formatter_registry,
+                &default_formatter(),
+            )
+            .unwrap();
+        assert_eq!("Hello!", &string);
+    }
+
+    #[test]
+    fn test_if_untaken() {
+        let template = compile("{{ if null }}Hello!{{ endif }}");
+        let context = context();
+        let template_registry = other_templates();
+        let formatter_registry = formatters();
+        let string = template
+            .render(
+                &context,
+                &template_registry,
+                &formatter_registry,
+                &default_formatter(),
+            )
+            .unwrap();
+        assert_eq!("", &string);
+    }
+
+    #[test]
+    fn test_if_else_taken() {
+        let template = compile("{{ if boolean }}Hello!{{ else }}Goodbye!{{ endif }}");
+        let context = context();
+        let template_registry = other_templates();
+        let formatter_registry = formatters();
+        let string = template
+            .render(
+                &context,
+                &template_registry,
+                &formatter_registry,
+                &default_formatter(),
+            )
+            .unwrap();
+        assert_eq!("Hello!", &string);
+    }
+
+    #[test]
+    fn test_if_else_untaken() {
+        let template = compile("{{ if null }}Hello!{{ else }}Goodbye!{{ endif }}");
+        let context = context();
+        let template_registry = other_templates();
+        let formatter_registry = formatters();
+        let string = template
+            .render(
+                &context,
+                &template_registry,
+                &formatter_registry,
+                &default_formatter(),
+            )
+            .unwrap();
+        assert_eq!("Goodbye!", &string);
+    }
+
+    #[test]
+    fn test_ifnot_taken() {
+        let template = compile("{{ if not boolean }}Hello!{{ endif }}");
+        let context = context();
+        let template_registry = other_templates();
+        let formatter_registry = formatters();
+        let string = template
+            .render(
+                &context,
+                &template_registry,
+                &formatter_registry,
+                &default_formatter(),
+            )
+            .unwrap();
+        assert_eq!("", &string);
+    }
+
+    #[test]
+    fn test_ifnot_untaken() {
+        let template = compile("{{ if not null }}Hello!{{ endif }}");
+        let context = context();
+        let template_registry = other_templates();
+        let formatter_registry = formatters();
+        let string = template
+            .render(
+                &context,
+                &template_registry,
+                &formatter_registry,
+                &default_formatter(),
+            )
+            .unwrap();
+        assert_eq!("Hello!", &string);
+    }
+
+    #[test]
+    fn test_ifnot_else_taken() {
+        let template = compile("{{ if not boolean }}Hello!{{ else }}Goodbye!{{ endif }}");
+        let context = context();
+        let template_registry = other_templates();
+        let formatter_registry = formatters();
+        let string = template
+            .render(
+                &context,
+                &template_registry,
+                &formatter_registry,
+                &default_formatter(),
+            )
+            .unwrap();
+        assert_eq!("Goodbye!", &string);
+    }
+
+    #[test]
+    fn test_ifnot_else_untaken() {
+        let template = compile("{{ if not null }}Hello!{{ else }}Goodbye!{{ endif }}");
+        let context = context();
+        let template_registry = other_templates();
+        let formatter_registry = formatters();
+        let string = template
+            .render(
+                &context,
+                &template_registry,
+                &formatter_registry,
+                &default_formatter(),
+            )
+            .unwrap();
+        assert_eq!("Hello!", &string);
+    }
+
+    #[test]
+    fn test_nested_ifs() {
+        let template = compile(
+            "{{ if boolean }}Hi, {{ if null }}there!{{ else }}Hello!{{ endif }}{{ endif }}",
+        );
+        let context = context();
+        let template_registry = other_templates();
+        let formatter_registry = formatters();
+        let string = template
+            .render(
+                &context,
+                &template_registry,
+                &formatter_registry,
+                &default_formatter(),
+            )
+            .unwrap();
+        assert_eq!("Hi, Hello!", &string);
+    }
+
+    #[test]
+    fn test_with() {
+        let template = compile("{{ with nested as n }}{ n.value } { number }{{endwith}}");
+        let context = context();
+        let template_registry = other_templates();
+        let formatter_registry = formatters();
+        let string = template
+            .render(
+                &context,
+                &template_registry,
+                &formatter_registry,
+                &default_formatter(),
+            )
+            .unwrap();
+        assert_eq!("10 5", &string);
+    }
+
+    #[test]
+    fn test_for_loop() {
+        let template = compile("{{ for a in array }}{ a }{{ endfor }}");
+        let context = context();
+        let template_registry = other_templates();
+        let formatter_registry = formatters();
+        let string = template
+            .render(
+                &context,
+                &template_registry,
+                &formatter_registry,
+                &default_formatter(),
+            )
+            .unwrap();
+        assert_eq!("123", &string);
+    }
+
+    #[test]
+    fn test_for_loop_index() {
+        let template = compile("{{ for a in array }}{ @index }{{ endfor }}");
+        let context = context();
+        let template_registry = other_templates();
+        let formatter_registry = formatters();
+        let string = template
+            .render(
+                &context,
+                &template_registry,
+                &formatter_registry,
+                &default_formatter(),
+            )
+            .unwrap();
+        assert_eq!("012", &string);
+    }
+
+    #[test]
+    fn test_for_loop_first() {
+        let template =
+            compile("{{ for a in array }}{{if @first }}{ @index }{{ endif }}{{ endfor }}");
+        let context = context();
+        let template_registry = other_templates();
+        let formatter_registry = formatters();
+        let string = template
+            .render(
+                &context,
+                &template_registry,
+                &formatter_registry,
+                &default_formatter(),
+            )
+            .unwrap();
+        assert_eq!("0", &string);
+    }
+
+    #[test]
+    fn test_for_loop_last() {
+        let template =
+            compile("{{ for a in array }}{{ if @last}}{ @index }{{ endif }}{{ endfor }}");
+        let context = context();
+        let template_registry = other_templates();
+        let formatter_registry = formatters();
+        let string = template
+            .render(
+                &context,
+                &template_registry,
+                &formatter_registry,
+                &default_formatter(),
+            )
+            .unwrap();
+        assert_eq!("2", &string);
+    }
+
+    #[test]
+    fn test_whitespace_stripping_value() {
+        let template = compile("1  \n\t   {- number -}  \n   1");
+        let context = context();
+        let template_registry = other_templates();
+        let formatter_registry = formatters();
+        let string = template
+            .render(
+                &context,
+                &template_registry,
+                &formatter_registry,
+                &default_formatter(),
+            )
+            .unwrap();
+        assert_eq!("151", &string);
+    }
+
+    #[test]
+    fn test_call() {
+        let template = compile("{{ call my_macro with nested }}");
+        let context = context();
+        let template_registry = other_templates();
+        let formatter_registry = formatters();
+        let string = template
+            .render(
+                &context,
+                &template_registry,
+                &formatter_registry,
+                &default_formatter(),
+            )
+            .unwrap();
+        assert_eq!("10", &string);
+    }
+
+    #[test]
+    fn test_formatter() {
+        let template = compile("{ nested.value | my_formatter }");
+        let context = context();
+        let template_registry = other_templates();
+        let formatter_registry = formatters();
+        let string = template
+            .render(
+                &context,
+                &template_registry,
+                &formatter_registry,
+                &default_formatter(),
+            )
+            .unwrap();
+        assert_eq!("{10}", &string);
+    }
+
+    #[test]
+    fn test_unknown() {
+        let template = compile("{ foobar }");
+        let context = context();
+        let template_registry = other_templates();
+        let formatter_registry = formatters();
+        template
+            .render(
+                &context,
+                &template_registry,
+                &formatter_registry,
+                &default_formatter(),
+            )
+            .unwrap_err();
+    }
+
+    #[test]
+    fn test_escaping() {
+        let template = compile("{ escapes }");
+        let context = context();
+        let template_registry = other_templates();
+        let formatter_registry = formatters();
+        let string = template
+            .render(
+                &context,
+                &template_registry,
+                &formatter_registry,
+                &default_formatter(),
+            )
+            .unwrap();
+        assert_eq!("1:&lt; 2:&gt; 3:&amp; 4:&#39; 5:&quot;", &string);
+    }
+
+    #[test]
+    fn test_unescaped() {
+        let template = compile("{ escapes | unescaped }");
+        let context = context();
+        let template_registry = other_templates();
+        let mut formatter_registry = formatters();
+        formatter_registry.insert("unescaped", Box::new(::format_unescaped));
+        let string = template
+            .render(
+                &context,
+                &template_registry,
+                &formatter_registry,
+                &default_formatter(),
+            )
+            .unwrap();
+        assert_eq!("1:< 2:> 3:& 4:' 5:\"", &string);
+    }
+
+    #[test]
+    fn test_root_print() {
+        let template = compile("{ @root }");
+        let context = "Hello World!";
+        let context = ::serde_json::to_value(&context).unwrap();
+        let template_registry = other_templates();
+        let formatter_registry = formatters();
+        let string = template
+            .render(
+                &context,
+                &template_registry,
+                &formatter_registry,
+                &default_formatter(),
+            )
+            .unwrap();
+        assert_eq!("Hello World!", &string);
+    }
+
+    #[test]
+    fn test_root_branch() {
+        let template = compile("{{ if @root }}Hello World!{{ endif }}");
+        let context = true;
+        let context = ::serde_json::to_value(&context).unwrap();
+        let template_registry = other_templates();
+        let formatter_registry = formatters();
+        let string = template
+            .render(
+                &context,
+                &template_registry,
+                &formatter_registry,
+                &default_formatter(),
+            )
+            .unwrap();
+        assert_eq!("Hello World!", &string);
+    }
+
+    #[test]
+    fn test_root_iterate() {
+        let template = compile("{{ for a in @root }}{ a }{{ endfor }}");
+        let context = vec!["foo", "bar"];
+        let context = ::serde_json::to_value(&context).unwrap();
+        let template_registry = other_templates();
+        let formatter_registry = formatters();
+        let string = template
+            .render(
+                &context,
+                &template_registry,
+                &formatter_registry,
+                &default_formatter(),
+            )
+            .unwrap();
+        assert_eq!("foobar", &string);
+    }
+
+    #[test]
+    fn test_number_truthiness_zero() {
+        let template = compile("{{ if @root }}truthy{{else}}not truthy{{ endif }}");
+        let context = 0;
+        let context = ::serde_json::to_value(&context).unwrap();
+        let template_registry = other_templates();
+        let formatter_registry = formatters();
+        let string = template
+            .render(
+                &context,
+                &template_registry,
+                &formatter_registry,
+                &default_formatter(),
+            )
+            .unwrap();
+        assert_eq!("not truthy", &string);
+    }
+
+    #[test]
+    fn test_number_truthiness_one() {
+        let template = compile("{{ if @root }}truthy{{else}}not truthy{{ endif }}");
+        let context = 1;
+        let context = ::serde_json::to_value(&context).unwrap();
+        let template_registry = other_templates();
+        let formatter_registry = formatters();
+        let string = template
+            .render(
+                &context,
+                &template_registry,
+                &formatter_registry,
+                &default_formatter(),
+            )
+            .unwrap();
+        assert_eq!("truthy", &string);
+    }
+
+    #[test]
+    fn test_indexed_paths() {
+        #[derive(Serialize)]
+        struct Context {
+            foo: (usize, usize),
+        }
+
+        let template = compile("{ foo.1 }{ foo.0 }");
+        let context = Context { foo: (123, 456) };
+        let context = ::serde_json::to_value(&context).unwrap();
+        let template_registry = other_templates();
+        let formatter_registry = formatters();
+        let string = template
+            .render(
+                &context,
+                &template_registry,
+                &formatter_registry,
+                &default_formatter(),
+            )
+            .unwrap();
+        assert_eq!("456123", &string);
+    }
+
+    #[test]
+    fn test_indexed_paths_fall_back_to_string_lookup() {
+        #[derive(Serialize)]
+        struct Context {
+            foo: HashMap<&'static str, usize>,
+        }
+
+        let template = compile("{ foo.1 }{ foo.0 }");
+        let mut foo = HashMap::new();
+        foo.insert("0", 123);
+        foo.insert("1", 456);
+        let context = Context { foo };
+        let context = ::serde_json::to_value(&context).unwrap();
+        let template_registry = other_templates();
+        let formatter_registry = formatters();
+        let string = template
+            .render(
+                &context,
+                &template_registry,
+                &formatter_registry,
+                &default_formatter(),
+            )
+            .unwrap();
+        assert_eq!("456123", &string);
+    }
+}
diff --git a/crates/tokio-macros/.cargo-checksum.json b/crates/tokio-macros/.cargo-checksum.json
new file mode 100644
index 0000000..b0b0a0e
--- /dev/null
+++ b/crates/tokio-macros/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"CHANGELOG.md":"0fa977ef55cfe5c26907f5f3c8700a777d205608ab9edf7fd98762b60ba95790","Cargo.toml":"8133ec808652d1d7aa1eb39b135c2afa559265bb46ebbaf2a3cfe8b552fa9202","LICENSE":"9797ea525350e8779aab3771e0276bbeea8b824893882172acfc94743b8d953d","README.md":"6094ea500349ce239a12b07d7dfd4ea965a7f14c993da2abc4b3c39a0479683a","src/entry.rs":"237a4a8e159e841aaa2b49ec1697a68fef6165542ac0197dba1ee963f688dc1d","src/lib.rs":"8ada51fda322e36e6b52a3e1fcbeab7c72332f79e54af111584f5a17b961afb5","src/select.rs":"e01c34fe0fdbc49a40b15b5b42816eea7d7b13db6e3a2774de92eb87f6e48231"},"package":"630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e"}
\ No newline at end of file
diff --git a/crates/tokio-macros/Android.bp b/crates/tokio-macros/Android.bp
new file mode 100644
index 0000000..da4c48c
--- /dev/null
+++ b/crates/tokio-macros/Android.bp
@@ -0,0 +1,28 @@
+// This file is generated by cargo_embargo.
+// Do not modify this file because the changes will be overridden on upgrade.
+
+package {
+    default_applicable_licenses: ["external_rust_crates_tokio-macros_license"],
+    default_team: "trendy_team_android_rust",
+}
+
+license {
+    name: "external_rust_crates_tokio-macros_license",
+    visibility: [":__subpackages__"],
+    license_kinds: ["SPDX-license-identifier-MIT"],
+    license_text: ["LICENSE"],
+}
+
+rust_proc_macro {
+    name: "libtokio_macros",
+    crate_name: "tokio_macros",
+    cargo_env_compat: true,
+    cargo_pkg_version: "2.1.0",
+    crate_root: "src/lib.rs",
+    edition: "2018",
+    rustlibs: [
+        "libproc_macro2",
+        "libquote",
+        "libsyn",
+    ],
+}
diff --git a/crates/tokio-macros/CHANGELOG.md b/crates/tokio-macros/CHANGELOG.md
new file mode 100644
index 0000000..e9d58db
--- /dev/null
+++ b/crates/tokio-macros/CHANGELOG.md
@@ -0,0 +1,172 @@
+# 2.1.0 (April 25th, 2023)
+
+- macros: fix typo in `#[tokio::test]` docs ([#5636])
+- macros: make entrypoints more efficient ([#5621])
+
+[#5621]: https://github.com/tokio-rs/tokio/pull/5621
+[#5636]: https://github.com/tokio-rs/tokio/pull/5636
+
+# 2.0.0 (March 24th, 2023)
+
+This major release updates the dependency on the syn crate to 2.0.0, and
+increases the MSRV to 1.56.
+
+As part of this release, we are adopting a policy of depending on a specific minor
+release of tokio-macros. This prevents Tokio from being able to pull in many different
+versions of tokio-macros.
+
+- macros: update `syn` ([#5572])
+- macros: accept path as crate rename ([#5557])
+
+[#5572]: https://github.com/tokio-rs/tokio/pull/5572
+[#5557]: https://github.com/tokio-rs/tokio/pull/5557
+
+# 1.8.2 (November 30th, 2022)
+
+- fix a regression introduced in 1.8.1 ([#5244])
+
+[#5244]: https://github.com/tokio-rs/tokio/pull/5244
+
+# 1.8.1 (November 29th, 2022)
+
+(yanked)
+
+- macros: Pin Futures in `#[tokio::test]` to stack ([#5205])
+- macros: Reduce usage of last statement spans in proc-macros ([#5092])
+- macros: Improve the documentation for `#[tokio::test]` ([#4761])
+
+[#5205]: https://github.com/tokio-rs/tokio/pull/5205
+[#5092]: https://github.com/tokio-rs/tokio/pull/5092
+[#4761]: https://github.com/tokio-rs/tokio/pull/4761
+
+# 1.8.0 (June 4th, 2022)
+
+- macros: always emit return statement ([#4636])
+- macros: support setting a custom crate name for `#[tokio::main]` and `#[tokio::test]` ([#4613])
+
+[#4613]: https://github.com/tokio-rs/tokio/pull/4613
+[#4636]: https://github.com/tokio-rs/tokio/pull/4636
+
+# 1.7.0 (December 15th, 2021)
+
+- macros: address remaining `clippy::semicolon_if_nothing_returned` warning ([#4252])
+
+[#4252]: https://github.com/tokio-rs/tokio/pull/4252
+
+# 1.6.0 (November 16th, 2021)
+
+- macros: fix mut patterns in `select!` macro ([#4211])
+
+[#4211]: https://github.com/tokio-rs/tokio/pull/4211
+
+# 1.5.1 (October 29th, 2021)
+
+- macros: fix type resolution error in `#[tokio::main]` ([#4176])
+
+[#4176]: https://github.com/tokio-rs/tokio/pull/4176
+
+# 1.5.0 (October 13th, 2021)
+
+- macros: make tokio-macros attributes more IDE friendly ([#4162])
+
+[#4162]: https://github.com/tokio-rs/tokio/pull/4162
+
+# 1.4.1 (September 30th, 2021)
+
+Reverted: run `current_thread` inside `LocalSet` ([#4027])
+
+# 1.4.0 (September 29th, 2021)
+
+(yanked)
+
+### Changed
+
+- macros: run `current_thread` inside `LocalSet` ([#4027])
+- macros: explicitly relaxed clippy lint for `.expect()` in runtime entry macro ([#4030])
+
+### Fixed
+
+- macros: fix invalid error messages in functions wrapped with `#[main]` or `#[test]` ([#4067])
+
+[#4027]: https://github.com/tokio-rs/tokio/pull/4027
+[#4030]: https://github.com/tokio-rs/tokio/pull/4030
+[#4067]: https://github.com/tokio-rs/tokio/pull/4067
+
+# 1.3.0 (July 7, 2021)
+
+- macros: don't trigger `clippy::unwrap_used` ([#3926])
+
+[#3926]: https://github.com/tokio-rs/tokio/pull/3926
+
+# 1.2.0 (May 14, 2021)
+
+- macros: forward input arguments in `#[tokio::test]` ([#3691])
+- macros: improve diagnostics on type mismatch ([#3766])
+- macros: various error message improvements ([#3677])
+
+[#3677]: https://github.com/tokio-rs/tokio/pull/3677
+[#3691]: https://github.com/tokio-rs/tokio/pull/3691
+[#3766]: https://github.com/tokio-rs/tokio/pull/3766
+
+# 1.1.0 (February 5, 2021)
+
+- add `start_paused` option to macros ([#3492])
+
+# 1.0.0 (December 23, 2020)
+
+- track `tokio` 1.0 release.
+
+# 0.3.1 (October 25, 2020)
+
+### Fixed
+
+- fix incorrect docs regarding `max_threads` option ([#3038])
+
+# 0.3.0 (October 15, 2020)
+
+- Track `tokio` 0.3 release.
+
+### Changed
+- options are renamed to track `tokio` runtime builder fn names.
+- `#[tokio::main]` macro requires `rt-multi-thread` when no `flavor` is specified.
+
+# 0.2.5 (February 27, 2019)
+
+### Fixed
+- doc improvements ([#2225]).
+
+# 0.2.4 (January 27, 2019)
+
+### Fixed
+- generics on `#[tokio::main]` function ([#2177]).
+
+### Added
+- support for `tokio::select!` ([#2152]).
+
+# 0.2.3 (January 7, 2019)
+
+### Fixed
+- Revert breaking change.
+
+# 0.2.2 (January 7, 2019)
+
+### Added
+- General refactoring and inclusion of additional runtime options ([#2022] and [#2038])
+
+# 0.2.1 (December 18, 2019)
+
+### Fixes
+- inherit visibility when wrapping async fn ([#1954]).
+
+# 0.2.0 (November 26, 2019)
+
+- Initial release
+
+[#1954]: https://github.com/tokio-rs/tokio/pull/1954
+[#2022]: https://github.com/tokio-rs/tokio/pull/2022
+[#2038]: https://github.com/tokio-rs/tokio/pull/2038
+[#2152]: https://github.com/tokio-rs/tokio/pull/2152
+[#2177]: https://github.com/tokio-rs/tokio/pull/2177
+[#2225]: https://github.com/tokio-rs/tokio/pull/2225
+[#3038]: https://github.com/tokio-rs/tokio/pull/3038
+[#3492]: https://github.com/tokio-rs/tokio/pull/3492
diff --git a/crates/tokio-macros/Cargo.lock b/crates/tokio-macros/Cargo.lock
new file mode 100644
index 0000000..258c103
--- /dev/null
+++ b/crates/tokio-macros/Cargo.lock
@@ -0,0 +1,364 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "addr2line"
+version = "0.22.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6e4503c46a5c0c7844e948c9a4d6acd9f50cccb4de1c48eb9e291ea17470c678"
+dependencies = [
+ "gimli",
+]
+
+[[package]]
+name = "adler"
+version = "1.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
+
+[[package]]
+name = "autocfg"
+version = "1.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0"
+
+[[package]]
+name = "backtrace"
+version = "0.3.73"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5cc23269a4f8976d0a4d2e7109211a419fe30e8d88d677cd60b6bc79c5732e0a"
+dependencies = [
+ "addr2line",
+ "cc",
+ "cfg-if",
+ "libc",
+ "miniz_oxide",
+ "object",
+ "rustc-demangle",
+]
+
+[[package]]
+name = "bitflags"
+version = "2.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de"
+
+[[package]]
+name = "bytes"
+version = "1.7.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50"
+
+[[package]]
+name = "cc"
+version = "1.1.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "57b6a275aa2903740dc87da01c62040406b8812552e97129a63ea8850a17c6e6"
+dependencies = [
+ "shlex",
+]
+
+[[package]]
+name = "cfg-if"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
+
+[[package]]
+name = "gimli"
+version = "0.29.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "40ecd4077b5ae9fd2e9e169b102c6c330d0605168eb0e8bf79952b256dbefffd"
+
+[[package]]
+name = "hermit-abi"
+version = "0.3.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024"
+
+[[package]]
+name = "libc"
+version = "0.2.158"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439"
+
+[[package]]
+name = "lock_api"
+version = "0.4.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17"
+dependencies = [
+ "autocfg",
+ "scopeguard",
+]
+
+[[package]]
+name = "memchr"
+version = "2.7.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3"
+
+[[package]]
+name = "miniz_oxide"
+version = "0.7.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b8a240ddb74feaf34a79a7add65a741f3167852fba007066dcac1ca548d89c08"
+dependencies = [
+ "adler",
+]
+
+[[package]]
+name = "mio"
+version = "1.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec"
+dependencies = [
+ "hermit-abi",
+ "libc",
+ "wasi",
+ "windows-sys",
+]
+
+[[package]]
+name = "object"
+version = "0.36.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "27b64972346851a39438c60b341ebc01bba47464ae329e55cf343eb93964efd9"
+dependencies = [
+ "memchr",
+]
+
+[[package]]
+name = "parking_lot"
+version = "0.12.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27"
+dependencies = [
+ "lock_api",
+ "parking_lot_core",
+]
+
+[[package]]
+name = "parking_lot_core"
+version = "0.9.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8"
+dependencies = [
+ "cfg-if",
+ "libc",
+ "redox_syscall",
+ "smallvec",
+ "windows-targets",
+]
+
+[[package]]
+name = "pin-project-lite"
+version = "0.2.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02"
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.86"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77"
+dependencies = [
+ "unicode-ident",
+]
+
+[[package]]
+name = "quote"
+version = "1.0.37"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af"
+dependencies = [
+ "proc-macro2",
+]
+
+[[package]]
+name = "redox_syscall"
+version = "0.5.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2a908a6e00f1fdd0dfd9c0eb08ce85126f6d8bbda50017e74bc4a4b7d4a926a4"
+dependencies = [
+ "bitflags",
+]
+
+[[package]]
+name = "rustc-demangle"
+version = "0.1.24"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f"
+
+[[package]]
+name = "scopeguard"
+version = "1.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
+
+[[package]]
+name = "shlex"
+version = "1.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64"
+
+[[package]]
+name = "signal-hook-registry"
+version = "1.4.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "smallvec"
+version = "1.13.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67"
+
+[[package]]
+name = "socket2"
+version = "0.5.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c"
+dependencies = [
+ "libc",
+ "windows-sys",
+]
+
+[[package]]
+name = "syn"
+version = "2.0.76"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "578e081a14e0cefc3279b0472138c513f37b41a08d5a3cca9b6e4e8ceb6cd525"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
+]
+
+[[package]]
+name = "tokio"
+version = "1.39.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9babc99b9923bfa4804bd74722ff02c0381021eafa4db9949217e3be8e84fff5"
+dependencies = [
+ "backtrace",
+ "bytes",
+ "libc",
+ "mio",
+ "parking_lot",
+ "pin-project-lite",
+ "signal-hook-registry",
+ "socket2",
+ "tokio-macros 2.4.0",
+ "windows-sys",
+]
+
+[[package]]
+name = "tokio-macros"
+version = "2.1.0"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+ "tokio",
+]
+
+[[package]]
+name = "tokio-macros"
+version = "2.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "unicode-ident"
+version = "1.0.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
+
+[[package]]
+name = "wasi"
+version = "0.11.0+wasi-snapshot-preview1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
+
+[[package]]
+name = "windows-sys"
+version = "0.52.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d"
+dependencies = [
+ "windows-targets",
+]
+
+[[package]]
+name = "windows-targets"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973"
+dependencies = [
+ "windows_aarch64_gnullvm",
+ "windows_aarch64_msvc",
+ "windows_i686_gnu",
+ "windows_i686_gnullvm",
+ "windows_i686_msvc",
+ "windows_x86_64_gnu",
+ "windows_x86_64_gnullvm",
+ "windows_x86_64_msvc",
+]
+
+[[package]]
+name = "windows_aarch64_gnullvm"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"
+
+[[package]]
+name = "windows_aarch64_msvc"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"
+
+[[package]]
+name = "windows_i686_gnu"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b"
+
+[[package]]
+name = "windows_i686_gnullvm"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"
+
+[[package]]
+name = "windows_i686_msvc"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
+
+[[package]]
+name = "windows_x86_64_gnu"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
+
+[[package]]
+name = "windows_x86_64_gnullvm"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
+
+[[package]]
+name = "windows_x86_64_msvc"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
diff --git a/crates/tokio-macros/Cargo.toml b/crates/tokio-macros/Cargo.toml
new file mode 100644
index 0000000..6bd8b81
--- /dev/null
+++ b/crates/tokio-macros/Cargo.toml
@@ -0,0 +1,47 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2018"
+rust-version = "1.56"
+name = "tokio-macros"
+version = "2.1.0"
+authors = ["Tokio Contributors <team@tokio.rs>"]
+description = """
+Tokio's proc macros.
+"""
+homepage = "https://tokio.rs"
+readme = "README.md"
+categories = ["asynchronous"]
+license = "MIT"
+repository = "https://github.com/tokio-rs/tokio"
+
+[package.metadata.docs.rs]
+all-features = true
+
+[lib]
+proc-macro = true
+
+[dependencies.proc-macro2]
+version = "1.0.7"
+
+[dependencies.quote]
+version = "1"
+
+[dependencies.syn]
+version = "2.0"
+features = ["full"]
+
+[dev-dependencies.tokio]
+version = "1.0.0"
+features = ["full"]
+
+[features]
diff --git a/crates/tokio-macros/LICENSE b/crates/tokio-macros/LICENSE
new file mode 100644
index 0000000..12d1037
--- /dev/null
+++ b/crates/tokio-macros/LICENSE
@@ -0,0 +1,47 @@
+Copyright (c) 2023 Tokio Contributors
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
+
+The MIT License (MIT)
+
+Copyright (c) 2019 Yoshua Wuyts
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/crates/tokio-macros/METADATA b/crates/tokio-macros/METADATA
new file mode 100644
index 0000000..d71aedd
--- /dev/null
+++ b/crates/tokio-macros/METADATA
@@ -0,0 +1,23 @@
+# This project was upgraded with external_updater.
+# Usage: tools/external_updater/updater.sh update rust/crates/tokio-macros
+# For more info, check https://cs.android.com/android/platform/superproject/+/master:tools/external_updater/README.md
+
+name: "tokio-macros"
+description: "Tokio\'s proc macros."
+third_party {
+  url {
+    type: HOMEPAGE
+    value: "https://crates.io/crates/tokio-macros"
+  }
+  url {
+    type: ARCHIVE
+    value: "https://static.crates.io/crates/tokio-macros/tokio-macros-2.1.0.crate"
+  }
+  version: "2.1.0"
+  license_type: NOTICE
+  last_upgrade_date {
+    year: 2023
+    month: 11
+    day: 1
+  }
+}
diff --git a/crates/tokio-macros/MODULE_LICENSE_MIT b/crates/tokio-macros/MODULE_LICENSE_MIT
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/crates/tokio-macros/MODULE_LICENSE_MIT
diff --git a/crates/tokio-macros/README.md b/crates/tokio-macros/README.md
new file mode 100644
index 0000000..988726f
--- /dev/null
+++ b/crates/tokio-macros/README.md
@@ -0,0 +1,13 @@
+# Tokio Macros
+
+Procedural macros for use with Tokio
+
+## License
+
+This project is licensed under the [MIT license](LICENSE).
+
+### Contribution
+
+Unless you explicitly state otherwise, any contribution intentionally submitted
+for inclusion in Tokio by you, shall be licensed as MIT, without any additional
+terms or conditions.
diff --git a/crates/tokio-macros/TEST_MAPPING b/crates/tokio-macros/TEST_MAPPING
new file mode 100644
index 0000000..a32d61c
--- /dev/null
+++ b/crates/tokio-macros/TEST_MAPPING
@@ -0,0 +1,29 @@
+// Generated by update_crate_tests.py for tests that depend on this crate.
+{
+  "imports": [
+    {
+      "path": "external/rust/crates/async-stream"
+    },
+    {
+      "path": "external/rust/crates/futures-util"
+    },
+    {
+      "path": "external/rust/crates/tokio"
+    },
+    {
+      "path": "external/rust/crates/tokio-test"
+    },
+    {
+      "path": "external/uwb/src"
+    },
+    {
+      "path": "packages/modules/DnsResolver"
+    },
+    {
+      "path": "system/security/keystore2"
+    },
+    {
+      "path": "system/security/keystore2/legacykeystore"
+    }
+  ]
+}
diff --git a/crates/tokio-macros/cargo_embargo.json b/crates/tokio-macros/cargo_embargo.json
new file mode 100644
index 0000000..ab4e99c
--- /dev/null
+++ b/crates/tokio-macros/cargo_embargo.json
@@ -0,0 +1,8 @@
+{
+  "package": {
+    "tokio-macros": {
+      "device_supported": false
+    }
+  },
+  "run_cargo": false
+}
diff --git a/crates/tokio-macros/src/entry.rs b/crates/tokio-macros/src/entry.rs
new file mode 100644
index 0000000..edac530
--- /dev/null
+++ b/crates/tokio-macros/src/entry.rs
@@ -0,0 +1,591 @@
+use proc_macro2::{Span, TokenStream, TokenTree};
+use quote::{quote, quote_spanned, ToTokens};
+use syn::parse::{Parse, ParseStream, Parser};
+use syn::{braced, Attribute, Ident, Path, Signature, Visibility};
+
+// syn::AttributeArgs does not implement syn::Parse
+type AttributeArgs = syn::punctuated::Punctuated<syn::Meta, syn::Token![,]>;
+
+#[derive(Clone, Copy, PartialEq)]
+enum RuntimeFlavor {
+    CurrentThread,
+    Threaded,
+}
+
+impl RuntimeFlavor {
+    fn from_str(s: &str) -> Result<RuntimeFlavor, String> {
+        match s {
+            "current_thread" => Ok(RuntimeFlavor::CurrentThread),
+            "multi_thread" => Ok(RuntimeFlavor::Threaded),
+            "single_thread" => Err("The single threaded runtime flavor is called `current_thread`.".to_string()),
+            "basic_scheduler" => Err("The `basic_scheduler` runtime flavor has been renamed to `current_thread`.".to_string()),
+            "threaded_scheduler" => Err("The `threaded_scheduler` runtime flavor has been renamed to `multi_thread`.".to_string()),
+            _ => Err(format!("No such runtime flavor `{}`. The runtime flavors are `current_thread` and `multi_thread`.", s)),
+        }
+    }
+}
+
+struct FinalConfig {
+    flavor: RuntimeFlavor,
+    worker_threads: Option<usize>,
+    start_paused: Option<bool>,
+    crate_name: Option<Path>,
+}
+
+/// Config used in case of the attribute not being able to build a valid config
+const DEFAULT_ERROR_CONFIG: FinalConfig = FinalConfig {
+    flavor: RuntimeFlavor::CurrentThread,
+    worker_threads: None,
+    start_paused: None,
+    crate_name: None,
+};
+
+struct Configuration {
+    rt_multi_thread_available: bool,
+    default_flavor: RuntimeFlavor,
+    flavor: Option<RuntimeFlavor>,
+    worker_threads: Option<(usize, Span)>,
+    start_paused: Option<(bool, Span)>,
+    is_test: bool,
+    crate_name: Option<Path>,
+}
+
+impl Configuration {
+    fn new(is_test: bool, rt_multi_thread: bool) -> Self {
+        Configuration {
+            rt_multi_thread_available: rt_multi_thread,
+            default_flavor: match is_test {
+                true => RuntimeFlavor::CurrentThread,
+                false => RuntimeFlavor::Threaded,
+            },
+            flavor: None,
+            worker_threads: None,
+            start_paused: None,
+            is_test,
+            crate_name: None,
+        }
+    }
+
+    fn set_flavor(&mut self, runtime: syn::Lit, span: Span) -> Result<(), syn::Error> {
+        if self.flavor.is_some() {
+            return Err(syn::Error::new(span, "`flavor` set multiple times."));
+        }
+
+        let runtime_str = parse_string(runtime, span, "flavor")?;
+        let runtime =
+            RuntimeFlavor::from_str(&runtime_str).map_err(|err| syn::Error::new(span, err))?;
+        self.flavor = Some(runtime);
+        Ok(())
+    }
+
+    fn set_worker_threads(
+        &mut self,
+        worker_threads: syn::Lit,
+        span: Span,
+    ) -> Result<(), syn::Error> {
+        if self.worker_threads.is_some() {
+            return Err(syn::Error::new(
+                span,
+                "`worker_threads` set multiple times.",
+            ));
+        }
+
+        let worker_threads = parse_int(worker_threads, span, "worker_threads")?;
+        if worker_threads == 0 {
+            return Err(syn::Error::new(span, "`worker_threads` may not be 0."));
+        }
+        self.worker_threads = Some((worker_threads, span));
+        Ok(())
+    }
+
+    fn set_start_paused(&mut self, start_paused: syn::Lit, span: Span) -> Result<(), syn::Error> {
+        if self.start_paused.is_some() {
+            return Err(syn::Error::new(span, "`start_paused` set multiple times."));
+        }
+
+        let start_paused = parse_bool(start_paused, span, "start_paused")?;
+        self.start_paused = Some((start_paused, span));
+        Ok(())
+    }
+
+    fn set_crate_name(&mut self, name: syn::Lit, span: Span) -> Result<(), syn::Error> {
+        if self.crate_name.is_some() {
+            return Err(syn::Error::new(span, "`crate` set multiple times."));
+        }
+        let name_path = parse_path(name, span, "crate")?;
+        self.crate_name = Some(name_path);
+        Ok(())
+    }
+
+    fn macro_name(&self) -> &'static str {
+        if self.is_test {
+            "tokio::test"
+        } else {
+            "tokio::main"
+        }
+    }
+
+    fn build(&self) -> Result<FinalConfig, syn::Error> {
+        let flavor = self.flavor.unwrap_or(self.default_flavor);
+        use RuntimeFlavor::*;
+
+        let worker_threads = match (flavor, self.worker_threads) {
+            (CurrentThread, Some((_, worker_threads_span))) => {
+                let msg = format!(
+                    "The `worker_threads` option requires the `multi_thread` runtime flavor. Use `#[{}(flavor = \"multi_thread\")]`",
+                    self.macro_name(),
+                );
+                return Err(syn::Error::new(worker_threads_span, msg));
+            }
+            (CurrentThread, None) => None,
+            (Threaded, worker_threads) if self.rt_multi_thread_available => {
+                worker_threads.map(|(val, _span)| val)
+            }
+            (Threaded, _) => {
+                let msg = if self.flavor.is_none() {
+                    "The default runtime flavor is `multi_thread`, but the `rt-multi-thread` feature is disabled."
+                } else {
+                    "The runtime flavor `multi_thread` requires the `rt-multi-thread` feature."
+                };
+                return Err(syn::Error::new(Span::call_site(), msg));
+            }
+        };
+
+        let start_paused = match (flavor, self.start_paused) {
+            (Threaded, Some((_, start_paused_span))) => {
+                let msg = format!(
+                    "The `start_paused` option requires the `current_thread` runtime flavor. Use `#[{}(flavor = \"current_thread\")]`",
+                    self.macro_name(),
+                );
+                return Err(syn::Error::new(start_paused_span, msg));
+            }
+            (CurrentThread, Some((start_paused, _))) => Some(start_paused),
+            (_, None) => None,
+        };
+
+        Ok(FinalConfig {
+            crate_name: self.crate_name.clone(),
+            flavor,
+            worker_threads,
+            start_paused,
+        })
+    }
+}
+
+fn parse_int(int: syn::Lit, span: Span, field: &str) -> Result<usize, syn::Error> {
+    match int {
+        syn::Lit::Int(lit) => match lit.base10_parse::<usize>() {
+            Ok(value) => Ok(value),
+            Err(e) => Err(syn::Error::new(
+                span,
+                format!("Failed to parse value of `{}` as integer: {}", field, e),
+            )),
+        },
+        _ => Err(syn::Error::new(
+            span,
+            format!("Failed to parse value of `{}` as integer.", field),
+        )),
+    }
+}
+
+fn parse_string(int: syn::Lit, span: Span, field: &str) -> Result<String, syn::Error> {
+    match int {
+        syn::Lit::Str(s) => Ok(s.value()),
+        syn::Lit::Verbatim(s) => Ok(s.to_string()),
+        _ => Err(syn::Error::new(
+            span,
+            format!("Failed to parse value of `{}` as string.", field),
+        )),
+    }
+}
+
+fn parse_path(lit: syn::Lit, span: Span, field: &str) -> Result<Path, syn::Error> {
+    match lit {
+        syn::Lit::Str(s) => {
+            let err = syn::Error::new(
+                span,
+                format!(
+                    "Failed to parse value of `{}` as path: \"{}\"",
+                    field,
+                    s.value()
+                ),
+            );
+            s.parse::<syn::Path>().map_err(|_| err.clone())
+        }
+        _ => Err(syn::Error::new(
+            span,
+            format!("Failed to parse value of `{}` as path.", field),
+        )),
+    }
+}
+
+fn parse_bool(bool: syn::Lit, span: Span, field: &str) -> Result<bool, syn::Error> {
+    match bool {
+        syn::Lit::Bool(b) => Ok(b.value),
+        _ => Err(syn::Error::new(
+            span,
+            format!("Failed to parse value of `{}` as bool.", field),
+        )),
+    }
+}
+
+fn build_config(
+    input: &ItemFn,
+    args: AttributeArgs,
+    is_test: bool,
+    rt_multi_thread: bool,
+) -> Result<FinalConfig, syn::Error> {
+    if input.sig.asyncness.is_none() {
+        let msg = "the `async` keyword is missing from the function declaration";
+        return Err(syn::Error::new_spanned(input.sig.fn_token, msg));
+    }
+
+    let mut config = Configuration::new(is_test, rt_multi_thread);
+    let macro_name = config.macro_name();
+
+    for arg in args {
+        match arg {
+            syn::Meta::NameValue(namevalue) => {
+                let ident = namevalue
+                    .path
+                    .get_ident()
+                    .ok_or_else(|| {
+                        syn::Error::new_spanned(&namevalue, "Must have specified ident")
+                    })?
+                    .to_string()
+                    .to_lowercase();
+                let lit = match &namevalue.value {
+                    syn::Expr::Lit(syn::ExprLit { lit, .. }) => lit,
+                    expr => return Err(syn::Error::new_spanned(expr, "Must be a literal")),
+                };
+                match ident.as_str() {
+                    "worker_threads" => {
+                        config.set_worker_threads(lit.clone(), syn::spanned::Spanned::span(lit))?;
+                    }
+                    "flavor" => {
+                        config.set_flavor(lit.clone(), syn::spanned::Spanned::span(lit))?;
+                    }
+                    "start_paused" => {
+                        config.set_start_paused(lit.clone(), syn::spanned::Spanned::span(lit))?;
+                    }
+                    "core_threads" => {
+                        let msg = "Attribute `core_threads` is renamed to `worker_threads`";
+                        return Err(syn::Error::new_spanned(namevalue, msg));
+                    }
+                    "crate" => {
+                        config.set_crate_name(lit.clone(), syn::spanned::Spanned::span(lit))?;
+                    }
+                    name => {
+                        let msg = format!(
+                            "Unknown attribute {} is specified; expected one of: `flavor`, `worker_threads`, `start_paused`, `crate`",
+                            name,
+                        );
+                        return Err(syn::Error::new_spanned(namevalue, msg));
+                    }
+                }
+            }
+            syn::Meta::Path(path) => {
+                let name = path
+                    .get_ident()
+                    .ok_or_else(|| syn::Error::new_spanned(&path, "Must have specified ident"))?
+                    .to_string()
+                    .to_lowercase();
+                let msg = match name.as_str() {
+                    "threaded_scheduler" | "multi_thread" => {
+                        format!(
+                            "Set the runtime flavor with #[{}(flavor = \"multi_thread\")].",
+                            macro_name
+                        )
+                    }
+                    "basic_scheduler" | "current_thread" | "single_threaded" => {
+                        format!(
+                            "Set the runtime flavor with #[{}(flavor = \"current_thread\")].",
+                            macro_name
+                        )
+                    }
+                    "flavor" | "worker_threads" | "start_paused" => {
+                        format!("The `{}` attribute requires an argument.", name)
+                    }
+                    name => {
+                        format!("Unknown attribute {} is specified; expected one of: `flavor`, `worker_threads`, `start_paused`, `crate`", name)
+                    }
+                };
+                return Err(syn::Error::new_spanned(path, msg));
+            }
+            other => {
+                return Err(syn::Error::new_spanned(
+                    other,
+                    "Unknown attribute inside the macro",
+                ));
+            }
+        }
+    }
+
+    config.build()
+}
+
+fn parse_knobs(mut input: ItemFn, is_test: bool, config: FinalConfig) -> TokenStream {
+    input.sig.asyncness = None;
+
+    // If type mismatch occurs, the current rustc points to the last statement.
+    let (last_stmt_start_span, last_stmt_end_span) = {
+        let mut last_stmt = input.stmts.last().cloned().unwrap_or_default().into_iter();
+
+        // `Span` on stable Rust has a limitation that only points to the first
+        // token, not the whole tokens. We can work around this limitation by
+        // using the first/last span of the tokens like
+        // `syn::Error::new_spanned` does.
+        let start = last_stmt.next().map_or_else(Span::call_site, |t| t.span());
+        let end = last_stmt.last().map_or(start, |t| t.span());
+        (start, end)
+    };
+
+    let crate_path = config
+        .crate_name
+        .map(ToTokens::into_token_stream)
+        .unwrap_or_else(|| Ident::new("tokio", last_stmt_start_span).into_token_stream());
+
+    let mut rt = match config.flavor {
+        RuntimeFlavor::CurrentThread => quote_spanned! {last_stmt_start_span=>
+            #crate_path::runtime::Builder::new_current_thread()
+        },
+        RuntimeFlavor::Threaded => quote_spanned! {last_stmt_start_span=>
+            #crate_path::runtime::Builder::new_multi_thread()
+        },
+    };
+    if let Some(v) = config.worker_threads {
+        rt = quote! { #rt.worker_threads(#v) };
+    }
+    if let Some(v) = config.start_paused {
+        rt = quote! { #rt.start_paused(#v) };
+    }
+
+    let header = if is_test {
+        quote! {
+            #[::core::prelude::v1::test]
+        }
+    } else {
+        quote! {}
+    };
+
+    let body_ident = quote! { body };
+    let last_block = quote_spanned! {last_stmt_end_span=>
+        #[allow(clippy::expect_used, clippy::diverging_sub_expression)]
+        {
+            return #rt
+                .enable_all()
+                .build()
+                .expect("Failed building the Runtime")
+                .block_on(#body_ident);
+        }
+    };
+
+    let body = input.body();
+
+    // For test functions pin the body to the stack and use `Pin<&mut dyn
+    // Future>` to reduce the amount of `Runtime::block_on` (and related
+    // functions) copies we generate during compilation due to the generic
+    // parameter `F` (the future to block on). This could have an impact on
+    // performance, but because it's only for testing it's unlikely to be very
+    // large.
+    //
+    // We don't do this for the main function as it should only be used once so
+    // there will be no benefit.
+    let body = if is_test {
+        let output_type = match &input.sig.output {
+            // For functions with no return value syn doesn't print anything,
+            // but that doesn't work as `Output` for our boxed `Future`, so
+            // default to `()` (the same type as the function output).
+            syn::ReturnType::Default => quote! { () },
+            syn::ReturnType::Type(_, ret_type) => quote! { #ret_type },
+        };
+        quote! {
+            let body = async #body;
+            #crate_path::pin!(body);
+            let body: ::std::pin::Pin<&mut dyn ::std::future::Future<Output = #output_type>> = body;
+        }
+    } else {
+        quote! {
+            let body = async #body;
+        }
+    };
+
+    input.into_tokens(header, body, last_block)
+}
+
+fn token_stream_with_error(mut tokens: TokenStream, error: syn::Error) -> TokenStream {
+    tokens.extend(error.into_compile_error());
+    tokens
+}
+
+#[cfg(not(test))] // Work around for rust-lang/rust#62127
+pub(crate) fn main(args: TokenStream, item: TokenStream, rt_multi_thread: bool) -> TokenStream {
+    // If any of the steps for this macro fail, we still want to expand to an item that is as close
+    // to the expected output as possible. This helps out IDEs such that completions and other
+    // related features keep working.
+    let input: ItemFn = match syn::parse2(item.clone()) {
+        Ok(it) => it,
+        Err(e) => return token_stream_with_error(item, e),
+    };
+
+    let config = if input.sig.ident == "main" && !input.sig.inputs.is_empty() {
+        let msg = "the main function cannot accept arguments";
+        Err(syn::Error::new_spanned(&input.sig.ident, msg))
+    } else {
+        AttributeArgs::parse_terminated
+            .parse2(args)
+            .and_then(|args| build_config(&input, args, false, rt_multi_thread))
+    };
+
+    match config {
+        Ok(config) => parse_knobs(input, false, config),
+        Err(e) => token_stream_with_error(parse_knobs(input, false, DEFAULT_ERROR_CONFIG), e),
+    }
+}
+
+pub(crate) fn test(args: TokenStream, item: TokenStream, rt_multi_thread: bool) -> TokenStream {
+    // If any of the steps for this macro fail, we still want to expand to an item that is as close
+    // to the expected output as possible. This helps out IDEs such that completions and other
+    // related features keep working.
+    let input: ItemFn = match syn::parse2(item.clone()) {
+        Ok(it) => it,
+        Err(e) => return token_stream_with_error(item, e),
+    };
+    let config = if let Some(attr) = input.attrs().find(|attr| attr.meta.path().is_ident("test")) {
+        let msg = "second test attribute is supplied";
+        Err(syn::Error::new_spanned(attr, msg))
+    } else {
+        AttributeArgs::parse_terminated
+            .parse2(args)
+            .and_then(|args| build_config(&input, args, true, rt_multi_thread))
+    };
+
+    match config {
+        Ok(config) => parse_knobs(input, true, config),
+        Err(e) => token_stream_with_error(parse_knobs(input, true, DEFAULT_ERROR_CONFIG), e),
+    }
+}
+
+struct ItemFn {
+    outer_attrs: Vec<Attribute>,
+    vis: Visibility,
+    sig: Signature,
+    brace_token: syn::token::Brace,
+    inner_attrs: Vec<Attribute>,
+    stmts: Vec<proc_macro2::TokenStream>,
+}
+
+impl ItemFn {
+    /// Access all attributes of the function item.
+    fn attrs(&self) -> impl Iterator<Item = &Attribute> {
+        self.outer_attrs.iter().chain(self.inner_attrs.iter())
+    }
+
+    /// Get the body of the function item in a manner so that it can be
+    /// conveniently used with the `quote!` macro.
+    fn body(&self) -> Body<'_> {
+        Body {
+            brace_token: self.brace_token,
+            stmts: &self.stmts,
+        }
+    }
+
+    /// Convert our local function item into a token stream.
+    fn into_tokens(
+        self,
+        header: proc_macro2::TokenStream,
+        body: proc_macro2::TokenStream,
+        last_block: proc_macro2::TokenStream,
+    ) -> TokenStream {
+        let mut tokens = proc_macro2::TokenStream::new();
+        header.to_tokens(&mut tokens);
+
+        // Outer attributes are simply streamed as-is.
+        for attr in self.outer_attrs {
+            attr.to_tokens(&mut tokens);
+        }
+
+        // Inner attributes require extra care, since they're not supported on
+        // blocks (which is what we're expanded into) we instead lift them
+        // outside of the function. This matches the behaviour of `syn`.
+        for mut attr in self.inner_attrs {
+            attr.style = syn::AttrStyle::Outer;
+            attr.to_tokens(&mut tokens);
+        }
+
+        self.vis.to_tokens(&mut tokens);
+        self.sig.to_tokens(&mut tokens);
+
+        self.brace_token.surround(&mut tokens, |tokens| {
+            body.to_tokens(tokens);
+            last_block.to_tokens(tokens);
+        });
+
+        tokens
+    }
+}
+
+impl Parse for ItemFn {
+    #[inline]
+    fn parse(input: ParseStream<'_>) -> syn::Result<Self> {
+        // This parse implementation has been largely lifted from `syn`, with
+        // the exception of:
+        // * We don't have access to the plumbing necessary to parse inner
+        //   attributes in-place.
+        // * We do our own statements parsing to avoid recursively parsing
+        //   entire statements and only look for the parts we're interested in.
+
+        let outer_attrs = input.call(Attribute::parse_outer)?;
+        let vis: Visibility = input.parse()?;
+        let sig: Signature = input.parse()?;
+
+        let content;
+        let brace_token = braced!(content in input);
+        let inner_attrs = Attribute::parse_inner(&content)?;
+
+        let mut buf = proc_macro2::TokenStream::new();
+        let mut stmts = Vec::new();
+
+        while !content.is_empty() {
+            if let Some(semi) = content.parse::<Option<syn::Token![;]>>()? {
+                semi.to_tokens(&mut buf);
+                stmts.push(buf);
+                buf = proc_macro2::TokenStream::new();
+                continue;
+            }
+
+            // Parse a single token tree and extend our current buffer with it.
+            // This avoids parsing the entire content of the sub-tree.
+            buf.extend([content.parse::<TokenTree>()?]);
+        }
+
+        if !buf.is_empty() {
+            stmts.push(buf);
+        }
+
+        Ok(Self {
+            outer_attrs,
+            vis,
+            sig,
+            brace_token,
+            inner_attrs,
+            stmts,
+        })
+    }
+}
+
+struct Body<'a> {
+    brace_token: syn::token::Brace,
+    // Statements, with terminating `;`.
+    stmts: &'a [TokenStream],
+}
+
+impl ToTokens for Body<'_> {
+    fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) {
+        self.brace_token.surround(tokens, |tokens| {
+            for stmt in self.stmts {
+                stmt.to_tokens(tokens);
+            }
+        })
+    }
+}
diff --git a/crates/tokio-macros/src/lib.rs b/crates/tokio-macros/src/lib.rs
new file mode 100644
index 0000000..1d024f5
--- /dev/null
+++ b/crates/tokio-macros/src/lib.rs
@@ -0,0 +1,489 @@
+#![allow(clippy::needless_doctest_main)]
+#![warn(
+    missing_debug_implementations,
+    missing_docs,
+    rust_2018_idioms,
+    unreachable_pub
+)]
+#![doc(test(
+    no_crate_inject,
+    attr(deny(warnings, rust_2018_idioms), allow(dead_code, unused_variables))
+))]
+
+//! Macros for use with Tokio
+
+// This `extern` is required for older `rustc` versions but newer `rustc`
+// versions warn about the unused `extern crate`.
+#[allow(unused_extern_crates)]
+extern crate proc_macro;
+
+mod entry;
+mod select;
+
+use proc_macro::TokenStream;
+
+/// Marks async function to be executed by the selected runtime. This macro
+/// helps set up a `Runtime` without requiring the user to use
+/// [Runtime](../tokio/runtime/struct.Runtime.html) or
+/// [Builder](../tokio/runtime/struct.Builder.html) directly.
+///
+/// Note: This macro is designed to be simplistic and targets applications that
+/// do not require a complex setup. If the provided functionality is not
+/// sufficient, you may be interested in using
+/// [Builder](../tokio/runtime/struct.Builder.html), which provides a more
+/// powerful interface.
+///
+/// Note: This macro can be used on any function and not just the `main`
+/// function. Using it on a non-main function makes the function behave as if it
+/// was synchronous by starting a new runtime each time it is called. If the
+/// function is called often, it is preferable to create the runtime using the
+/// runtime builder so the runtime can be reused across calls.
+///
+/// # Non-worker async function
+///
+/// Note that the async function marked with this macro does not run as a
+/// worker. The expectation is that other tasks are spawned by the function here.
+/// Awaiting on other futures from the function provided here will not
+/// perform as fast as those spawned as workers.
+///
+/// # Multi-threaded runtime
+///
+/// To use the multi-threaded runtime, the macro can be configured using
+///
+/// ```
+/// #[tokio::main(flavor = "multi_thread", worker_threads = 10)]
+/// # async fn main() {}
+/// ```
+///
+/// The `worker_threads` option configures the number of worker threads, and
+/// defaults to the number of cpus on the system. This is the default flavor.
+///
+/// Note: The multi-threaded runtime requires the `rt-multi-thread` feature
+/// flag.
+///
+/// # Current thread runtime
+///
+/// To use the single-threaded runtime known as the `current_thread` runtime,
+/// the macro can be configured using
+///
+/// ```
+/// #[tokio::main(flavor = "current_thread")]
+/// # async fn main() {}
+/// ```
+///
+/// ## Function arguments:
+///
+/// Arguments are allowed for any functions aside from `main` which is special
+///
+/// ## Usage
+///
+/// ### Using the multi-thread runtime
+///
+/// ```rust
+/// #[tokio::main]
+/// async fn main() {
+///     println!("Hello world");
+/// }
+/// ```
+///
+/// Equivalent code not using `#[tokio::main]`
+///
+/// ```rust
+/// fn main() {
+///     tokio::runtime::Builder::new_multi_thread()
+///         .enable_all()
+///         .build()
+///         .unwrap()
+///         .block_on(async {
+///             println!("Hello world");
+///         })
+/// }
+/// ```
+///
+/// ### Using current thread runtime
+///
+/// The basic scheduler is single-threaded.
+///
+/// ```rust
+/// #[tokio::main(flavor = "current_thread")]
+/// async fn main() {
+///     println!("Hello world");
+/// }
+/// ```
+///
+/// Equivalent code not using `#[tokio::main]`
+///
+/// ```rust
+/// fn main() {
+///     tokio::runtime::Builder::new_current_thread()
+///         .enable_all()
+///         .build()
+///         .unwrap()
+///         .block_on(async {
+///             println!("Hello world");
+///         })
+/// }
+/// ```
+///
+/// ### Set number of worker threads
+///
+/// ```rust
+/// #[tokio::main(worker_threads = 2)]
+/// async fn main() {
+///     println!("Hello world");
+/// }
+/// ```
+///
+/// Equivalent code not using `#[tokio::main]`
+///
+/// ```rust
+/// fn main() {
+///     tokio::runtime::Builder::new_multi_thread()
+///         .worker_threads(2)
+///         .enable_all()
+///         .build()
+///         .unwrap()
+///         .block_on(async {
+///             println!("Hello world");
+///         })
+/// }
+/// ```
+///
+/// ### Configure the runtime to start with time paused
+///
+/// ```rust
+/// #[tokio::main(flavor = "current_thread", start_paused = true)]
+/// async fn main() {
+///     println!("Hello world");
+/// }
+/// ```
+///
+/// Equivalent code not using `#[tokio::main]`
+///
+/// ```rust
+/// fn main() {
+///     tokio::runtime::Builder::new_current_thread()
+///         .enable_all()
+///         .start_paused(true)
+///         .build()
+///         .unwrap()
+///         .block_on(async {
+///             println!("Hello world");
+///         })
+/// }
+/// ```
+///
+/// Note that `start_paused` requires the `test-util` feature to be enabled.
+///
+/// ### Rename package
+///
+/// ```rust
+/// use tokio as tokio1;
+///
+/// #[tokio1::main(crate = "tokio1")]
+/// async fn main() {
+///     println!("Hello world");
+/// }
+/// ```
+///
+/// Equivalent code not using `#[tokio::main]`
+///
+/// ```rust
+/// use tokio as tokio1;
+///
+/// fn main() {
+///     tokio1::runtime::Builder::new_multi_thread()
+///         .enable_all()
+///         .build()
+///         .unwrap()
+///         .block_on(async {
+///             println!("Hello world");
+///         })
+/// }
+/// ```
+#[proc_macro_attribute]
+#[cfg(not(test))] // Work around for rust-lang/rust#62127
+pub fn main(args: TokenStream, item: TokenStream) -> TokenStream {
+    entry::main(args.into(), item.into(), true).into()
+}
+
+/// Marks async function to be executed by selected runtime. This macro helps set up a `Runtime`
+/// without requiring the user to use [Runtime](../tokio/runtime/struct.Runtime.html) or
+/// [Builder](../tokio/runtime/struct.builder.html) directly.
+///
+/// ## Function arguments:
+///
+/// Arguments are allowed for any functions aside from `main` which is special
+///
+/// ## Usage
+///
+/// ### Using default
+///
+/// ```rust
+/// #[tokio::main(flavor = "current_thread")]
+/// async fn main() {
+///     println!("Hello world");
+/// }
+/// ```
+///
+/// Equivalent code not using `#[tokio::main]`
+///
+/// ```rust
+/// fn main() {
+///     tokio::runtime::Builder::new_current_thread()
+///         .enable_all()
+///         .build()
+///         .unwrap()
+///         .block_on(async {
+///             println!("Hello world");
+///         })
+/// }
+/// ```
+///
+/// ### Rename package
+///
+/// ```rust
+/// use tokio as tokio1;
+///
+/// #[tokio1::main(crate = "tokio1")]
+/// async fn main() {
+///     println!("Hello world");
+/// }
+/// ```
+///
+/// Equivalent code not using `#[tokio::main]`
+///
+/// ```rust
+/// use tokio as tokio1;
+///
+/// fn main() {
+///     tokio1::runtime::Builder::new_multi_thread()
+///         .enable_all()
+///         .build()
+///         .unwrap()
+///         .block_on(async {
+///             println!("Hello world");
+///         })
+/// }
+/// ```
+#[proc_macro_attribute]
+#[cfg(not(test))] // Work around for rust-lang/rust#62127
+pub fn main_rt(args: TokenStream, item: TokenStream) -> TokenStream {
+    entry::main(args.into(), item.into(), false).into()
+}
+
+/// Marks async function to be executed by runtime, suitable to test environment.
+/// This macro helps set up a `Runtime` without requiring the user to use
+/// [Runtime](../tokio/runtime/struct.Runtime.html) or
+/// [Builder](../tokio/runtime/struct.Builder.html) directly.
+///
+/// Note: This macro is designed to be simplistic and targets applications that
+/// do not require a complex setup. If the provided functionality is not
+/// sufficient, you may be interested in using
+/// [Builder](../tokio/runtime/struct.Builder.html), which provides a more
+/// powerful interface.
+///
+/// # Multi-threaded runtime
+///
+/// To use the multi-threaded runtime, the macro can be configured using
+///
+/// ```no_run
+/// #[tokio::test(flavor = "multi_thread", worker_threads = 1)]
+/// async fn my_test() {
+///     assert!(true);
+/// }
+/// ```
+///
+/// The `worker_threads` option configures the number of worker threads, and
+/// defaults to the number of cpus on the system.
+///
+/// Note: The multi-threaded runtime requires the `rt-multi-thread` feature
+/// flag.
+///
+/// # Current thread runtime
+///
+/// The default test runtime is single-threaded. Each test gets a
+/// separate current-thread runtime.
+///
+/// ```no_run
+/// #[tokio::test]
+/// async fn my_test() {
+///     assert!(true);
+/// }
+/// ```
+///
+/// ## Usage
+///
+/// ### Using the multi-thread runtime
+///
+/// ```no_run
+/// #[tokio::test(flavor = "multi_thread")]
+/// async fn my_test() {
+///     assert!(true);
+/// }
+/// ```
+///
+/// Equivalent code not using `#[tokio::test]`
+///
+/// ```no_run
+/// #[test]
+/// fn my_test() {
+///     tokio::runtime::Builder::new_multi_thread()
+///         .enable_all()
+///         .build()
+///         .unwrap()
+///         .block_on(async {
+///             assert!(true);
+///         })
+/// }
+/// ```
+///
+/// ### Using current thread runtime
+///
+/// ```no_run
+/// #[tokio::test]
+/// async fn my_test() {
+///     assert!(true);
+/// }
+/// ```
+///
+/// Equivalent code not using `#[tokio::test]`
+///
+/// ```no_run
+/// #[test]
+/// fn my_test() {
+///     tokio::runtime::Builder::new_current_thread()
+///         .enable_all()
+///         .build()
+///         .unwrap()
+///         .block_on(async {
+///             assert!(true);
+///         })
+/// }
+/// ```
+///
+/// ### Set number of worker threads
+///
+/// ```no_run
+/// #[tokio::test(flavor ="multi_thread", worker_threads = 2)]
+/// async fn my_test() {
+///     assert!(true);
+/// }
+/// ```
+///
+/// Equivalent code not using `#[tokio::test]`
+///
+/// ```no_run
+/// #[test]
+/// fn my_test() {
+///     tokio::runtime::Builder::new_multi_thread()
+///         .worker_threads(2)
+///         .enable_all()
+///         .build()
+///         .unwrap()
+///         .block_on(async {
+///             assert!(true);
+///         })
+/// }
+/// ```
+///
+/// ### Configure the runtime to start with time paused
+///
+/// ```no_run
+/// #[tokio::test(start_paused = true)]
+/// async fn my_test() {
+///     assert!(true);
+/// }
+/// ```
+///
+/// Equivalent code not using `#[tokio::test]`
+///
+/// ```no_run
+/// #[test]
+/// fn my_test() {
+///     tokio::runtime::Builder::new_current_thread()
+///         .enable_all()
+///         .start_paused(true)
+///         .build()
+///         .unwrap()
+///         .block_on(async {
+///             assert!(true);
+///         })
+/// }
+/// ```
+///
+/// Note that `start_paused` requires the `test-util` feature to be enabled.
+///
+/// ### Rename package
+///
+/// ```rust
+/// use tokio as tokio1;
+///
+/// #[tokio1::test(crate = "tokio1")]
+/// async fn my_test() {
+///     println!("Hello world");
+/// }
+/// ```
+#[proc_macro_attribute]
+pub fn test(args: TokenStream, item: TokenStream) -> TokenStream {
+    entry::test(args.into(), item.into(), true).into()
+}
+
+/// Marks async function to be executed by runtime, suitable to test environment
+///
+/// ## Usage
+///
+/// ```no_run
+/// #[tokio::test]
+/// async fn my_test() {
+///     assert!(true);
+/// }
+/// ```
+#[proc_macro_attribute]
+pub fn test_rt(args: TokenStream, item: TokenStream) -> TokenStream {
+    entry::test(args.into(), item.into(), false).into()
+}
+
+/// Always fails with the error message below.
+/// ```text
+/// The #[tokio::main] macro requires rt or rt-multi-thread.
+/// ```
+#[proc_macro_attribute]
+pub fn main_fail(_args: TokenStream, _item: TokenStream) -> TokenStream {
+    syn::Error::new(
+        proc_macro2::Span::call_site(),
+        "The #[tokio::main] macro requires rt or rt-multi-thread.",
+    )
+    .to_compile_error()
+    .into()
+}
+
+/// Always fails with the error message below.
+/// ```text
+/// The #[tokio::test] macro requires rt or rt-multi-thread.
+/// ```
+#[proc_macro_attribute]
+pub fn test_fail(_args: TokenStream, _item: TokenStream) -> TokenStream {
+    syn::Error::new(
+        proc_macro2::Span::call_site(),
+        "The #[tokio::test] macro requires rt or rt-multi-thread.",
+    )
+    .to_compile_error()
+    .into()
+}
+
+/// Implementation detail of the `select!` macro. This macro is **not** intended
+/// to be used as part of the public API and is permitted to change.
+#[proc_macro]
+#[doc(hidden)]
+pub fn select_priv_declare_output_enum(input: TokenStream) -> TokenStream {
+    select::declare_output_enum(input)
+}
+
+/// Implementation detail of the `select!` macro. This macro is **not** intended
+/// to be used as part of the public API and is permitted to change.
+#[proc_macro]
+#[doc(hidden)]
+pub fn select_priv_clean_pattern(input: TokenStream) -> TokenStream {
+    select::clean_pattern_macro(input)
+}
diff --git a/crates/tokio-macros/src/select.rs b/crates/tokio-macros/src/select.rs
new file mode 100644
index 0000000..dd491f8
--- /dev/null
+++ b/crates/tokio-macros/src/select.rs
@@ -0,0 +1,109 @@
+use proc_macro::{TokenStream, TokenTree};
+use proc_macro2::Span;
+use quote::quote;
+use syn::{parse::Parser, Ident};
+
+pub(crate) fn declare_output_enum(input: TokenStream) -> TokenStream {
+    // passed in is: `(_ _ _)` with one `_` per branch
+    let branches = match input.into_iter().next() {
+        Some(TokenTree::Group(group)) => group.stream().into_iter().count(),
+        _ => panic!("unexpected macro input"),
+    };
+
+    let variants = (0..branches)
+        .map(|num| Ident::new(&format!("_{}", num), Span::call_site()))
+        .collect::<Vec<_>>();
+
+    // Use a bitfield to track which futures completed
+    let mask = Ident::new(
+        if branches <= 8 {
+            "u8"
+        } else if branches <= 16 {
+            "u16"
+        } else if branches <= 32 {
+            "u32"
+        } else if branches <= 64 {
+            "u64"
+        } else {
+            panic!("up to 64 branches supported");
+        },
+        Span::call_site(),
+    );
+
+    TokenStream::from(quote! {
+        pub(super) enum Out<#( #variants ),*> {
+            #( #variants(#variants), )*
+            // Include a `Disabled` variant signifying that all select branches
+            // failed to resolve.
+            Disabled,
+        }
+
+        pub(super) type Mask = #mask;
+    })
+}
+
+pub(crate) fn clean_pattern_macro(input: TokenStream) -> TokenStream {
+    // If this isn't a pattern, we return the token stream as-is. The select!
+    // macro is using it in a location requiring a pattern, so an error will be
+    // emitted there.
+    let mut input: syn::Pat = match syn::Pat::parse_single.parse(input.clone()) {
+        Ok(it) => it,
+        Err(_) => return input,
+    };
+
+    clean_pattern(&mut input);
+    quote::ToTokens::into_token_stream(input).into()
+}
+
+// Removes any occurrences of ref or mut in the provided pattern.
+fn clean_pattern(pat: &mut syn::Pat) {
+    match pat {
+        syn::Pat::Lit(_literal) => {}
+        syn::Pat::Macro(_macro) => {}
+        syn::Pat::Path(_path) => {}
+        syn::Pat::Range(_range) => {}
+        syn::Pat::Rest(_rest) => {}
+        syn::Pat::Verbatim(_tokens) => {}
+        syn::Pat::Wild(_underscore) => {}
+        syn::Pat::Ident(ident) => {
+            ident.by_ref = None;
+            ident.mutability = None;
+            if let Some((_at, pat)) = &mut ident.subpat {
+                clean_pattern(&mut *pat);
+            }
+        }
+        syn::Pat::Or(or) => {
+            for case in or.cases.iter_mut() {
+                clean_pattern(case);
+            }
+        }
+        syn::Pat::Slice(slice) => {
+            for elem in slice.elems.iter_mut() {
+                clean_pattern(elem);
+            }
+        }
+        syn::Pat::Struct(struct_pat) => {
+            for field in struct_pat.fields.iter_mut() {
+                clean_pattern(&mut field.pat);
+            }
+        }
+        syn::Pat::Tuple(tuple) => {
+            for elem in tuple.elems.iter_mut() {
+                clean_pattern(elem);
+            }
+        }
+        syn::Pat::TupleStruct(tuple) => {
+            for elem in tuple.elems.iter_mut() {
+                clean_pattern(elem);
+            }
+        }
+        syn::Pat::Reference(reference) => {
+            reference.mutability = None;
+            clean_pattern(&mut reference.pat);
+        }
+        syn::Pat::Type(type_pat) => {
+            clean_pattern(&mut type_pat.pat);
+        }
+        _ => {}
+    }
+}
diff --git a/pseudo_crate/Cargo.lock b/pseudo_crate/Cargo.lock
index e33cc20..74a059f 100644
--- a/pseudo_crate/Cargo.lock
+++ b/pseudo_crate/Cargo.lock
@@ -216,6 +216,31 @@
  "rustc-hash",
  "rustversion",
  "scopeguard",
+ "sec1",
+ "semver",
+ "serde_cbor",
+ "serde_derive",
+ "shared_library",
+ "slab",
+ "smallvec",
+ "smccc",
+ "socket2",
+ "spin",
+ "spki",
+ "strsim 0.11.0",
+ "strum",
+ "strum_macros",
+ "syn-mid",
+ "sync_wrapper 1.0.1",
+ "synstructure",
+ "tempfile",
+ "termcolor",
+ "termtree",
+ "textwrap",
+ "thiserror",
+ "thread_local",
+ "tinytemplate",
+ "tokio-macros",
 ]
 
 [[package]]
@@ -399,7 +424,7 @@
  "serde_json",
  "serde_path_to_error",
  "serde_urlencoded",
- "sync_wrapper",
+ "sync_wrapper 0.1.2",
  "tokio",
  "tower",
  "tower-layer",
@@ -439,6 +464,12 @@
 ]
 
 [[package]]
+name = "base16ct"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf"
+
+[[package]]
 name = "base64"
 version = "0.22.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -614,7 +645,7 @@
 checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9"
 dependencies = [
  "ciborium-io",
- "half",
+ "half 2.4.1",
 ]
 
 [[package]]
@@ -868,7 +899,7 @@
  "ident_case",
  "proc-macro2",
  "quote",
- "strsim",
+ "strsim 0.10.0",
  "syn 2.0.43",
 ]
 
@@ -909,6 +940,7 @@
 checksum = "fffa369a668c8af7dbf8b5e56c9f744fbd399949ed171606040001947de40b1c"
 dependencies = [
  "const-oid",
+ "zeroize",
 ]
 
 [[package]]
@@ -1371,6 +1403,12 @@
 
 [[package]]
 name = "half"
+version = "1.8.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1b43ede17f21864e81be2fa654110bf1e793774238d86ef8555c37e6519c0403"
+
+[[package]]
+name = "half"
 version = "2.4.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888"
@@ -2087,7 +2125,7 @@
 dependencies = [
  "cfg-if",
  "libc",
- "redox_syscall",
+ "redox_syscall 0.4.1",
  "smallvec",
  "windows-targets 0.48.5",
 ]
@@ -2430,6 +2468,15 @@
 
 [[package]]
 name = "redox_syscall"
+version = "0.2.16"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a"
+dependencies = [
+ "bitflags 1.3.2",
+]
+
+[[package]]
+name = "redox_syscall"
 version = "0.4.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa"
@@ -2461,6 +2508,15 @@
 checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1"
 
 [[package]]
+name = "remove_dir_all"
+version = "0.5.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7"
+dependencies = [
+ "winapi",
+]
+
+[[package]]
 name = "rusqlite"
 version = "0.29.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -2588,19 +2644,47 @@
 checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
 
 [[package]]
-name = "serde"
-version = "1.0.193"
+name = "sec1"
+version = "0.7.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "25dd9975e68d0cb5aa1120c288333fc98731bd1dd12f561e468ea4728c042b89"
+checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc"
+dependencies = [
+ "base16ct",
+ "der",
+ "generic-array",
+ "zeroize",
+]
+
+[[package]]
+name = "semver"
+version = "1.0.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b97ed7a9823b74f99c7742f5336af7be5ecd3eeafcb1507d1fa93347b1d589b0"
+
+[[package]]
+name = "serde"
+version = "1.0.158"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "771d4d9c4163ee138805e12c710dd365e4f44be8be0503cb1bb9eb989425d9c9"
 dependencies = [
  "serde_derive",
 ]
 
 [[package]]
-name = "serde_derive"
-version = "1.0.193"
+name = "serde_cbor"
+version = "0.11.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "43576ca501357b9b071ac53cdc7da8ef0cbd9493d8df094cd821777ea6e894d3"
+checksum = "2bef2ebfde456fb76bbcf9f59315333decc4fda0b2b44b420243c11e0f5ec1f5"
+dependencies = [
+ "half 1.8.3",
+ "serde",
+]
+
+[[package]]
+name = "serde_derive"
+version = "1.0.158"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e801c1712f48475582b7696ac71e0ca34ebb30e09338425384269d9717c62cad"
 dependencies = [
  "proc-macro2",
  "quote",
@@ -2609,9 +2693,9 @@
 
 [[package]]
 name = "serde_json"
-version = "1.0.109"
+version = "1.0.99"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cb0652c533506ad7a2e353cce269330d6afd8bdfb6d75e0ace5b35aacbd7b9e9"
+checksum = "46266871c240a00b8f503b877622fe33430b3c7d963bdc0f2adc511e54a1eae3"
 dependencies = [
  "itoa",
  "ryu",
@@ -2620,9 +2704,9 @@
 
 [[package]]
 name = "serde_path_to_error"
-version = "0.1.14"
+version = "0.1.12"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4beec8bce849d58d06238cb50db2e1c417cfeafa4c63f692b15c82b7c80f8335"
+checksum = "0b1b6471d7496b051e03f1958802a73f88b947866f5146f329e47e36554f4e55"
 dependencies = [
  "itoa",
  "serde",
@@ -2658,6 +2742,16 @@
 checksum = "5f179d4e11094a893b82fff208f74d448a7512f99f5a0acbd5c679b705f83ed9"
 
 [[package]]
+name = "shared_library"
+version = "0.1.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5a9e7e0f2bfae24d8a5b5a66c5b257a83c7412304311512a0c054cd5e619da11"
+dependencies = [
+ "lazy_static",
+ "libc",
+]
+
+[[package]]
 name = "shlex"
 version = "1.3.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -2674,18 +2768,30 @@
 
 [[package]]
 name = "smallvec"
-version = "1.13.2"
+version = "1.13.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67"
+checksum = "e6ecd384b10a64542d77071bd64bd7b231f4ed5940fba55e98c3de13824cf3d7"
+
+[[package]]
+name = "smawk"
+version = "0.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b7c388c1b5e93756d0c740965c41e8822f866621d41acbdf6336a6a168f8840c"
+
+[[package]]
+name = "smccc"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "617d17f088ec733e5a6b86da6ce4cce1414e6e856d6061c16dda51cceae6f68c"
 
 [[package]]
 name = "socket2"
-version = "0.5.7"
+version = "0.5.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c"
+checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9"
 dependencies = [
  "libc",
- "windows-sys 0.52.0",
+ "windows-sys 0.48.0",
 ]
 
 [[package]]
@@ -2753,6 +2859,31 @@
 checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623"
 
 [[package]]
+name = "strsim"
+version = "0.11.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5ee073c9e4cd00e28217186dbe12796d692868f432bf2e97ee73bed0c56dfa01"
+
+[[package]]
+name = "strum"
+version = "0.25.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "290d54ea6f91c969195bdbcd7442c8c2a2ba87da8bf60a7ee86a235d4bc1e125"
+
+[[package]]
+name = "strum_macros"
+version = "0.25.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "23dc1fa9ac9c169a78ba62f0b841814b7abae11bdd047b9c58f893439e309ea0"
+dependencies = [
+ "heck",
+ "proc-macro2",
+ "quote",
+ "rustversion",
+ "syn 2.0.43",
+]
+
+[[package]]
 name = "syn"
 version = "1.0.109"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -2775,12 +2906,55 @@
 ]
 
 [[package]]
+name = "syn-mid"
+version = "0.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b5dc35bb08dd1ca3dfb09dce91fd2d13294d6711c88897d9a9d60acf39bce049"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.43",
+]
+
+[[package]]
 name = "sync_wrapper"
 version = "0.1.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160"
 
 [[package]]
+name = "sync_wrapper"
+version = "1.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394"
+
+[[package]]
+name = "synstructure"
+version = "0.12.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 1.0.109",
+ "unicode-xid",
+]
+
+[[package]]
+name = "tempfile"
+version = "3.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4"
+dependencies = [
+ "cfg-if",
+ "fastrand",
+ "libc",
+ "redox_syscall 0.2.16",
+ "remove_dir_all",
+ "winapi",
+]
+
+[[package]]
 name = "termcolor"
 version = "1.4.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -2797,24 +2971,29 @@
 
 [[package]]
 name = "textwrap"
-version = "0.16.1"
+version = "0.16.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "23d434d3f8967a09480fb04132ebe0a3e088c173e6d0ee7897abbdf4eab0f8b9"
+checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d"
+dependencies = [
+ "smawk",
+ "unicode-linebreak",
+ "unicode-width",
+]
 
 [[package]]
 name = "thiserror"
-version = "1.0.55"
+version = "1.0.49"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6e3de26b0965292219b4287ff031fcba86837900fe9cd2b34ea8ad893c0953d2"
+checksum = "1177e8c6d7ede7afde3585fd2513e611227efd6481bd78d2e82ba1ce16557ed4"
 dependencies = [
  "thiserror-impl",
 ]
 
 [[package]]
 name = "thiserror-impl"
-version = "1.0.55"
+version = "1.0.49"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "268026685b2be38d7103e9e507c938a1fcb3d7e6eb15e87870b617bf37b6d581"
+checksum = "10712f02019e9288794769fba95cd6847df9874d49d871d062172f9dd41bc4cc"
 dependencies = [
  "proc-macro2",
  "quote",
@@ -2822,6 +3001,16 @@
 ]
 
 [[package]]
+name = "thread_local"
+version = "1.1.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152"
+dependencies = [
+ "cfg-if",
+ "once_cell",
+]
+
+[[package]]
 name = "tinytemplate"
 version = "1.2.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -2862,6 +3051,17 @@
 ]
 
 [[package]]
+name = "tokio-macros"
+version = "2.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.43",
+]
+
+[[package]]
 name = "tokio-util"
 version = "0.7.11"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -2980,6 +3180,12 @@
 checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
 
 [[package]]
+name = "unicode-linebreak"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3b09c83c3c29d37506a3e260c08c03743a6bb66a9cd432c6934ab501a190571f"
+
+[[package]]
 name = "unicode-normalization"
 version = "0.1.23"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -2995,6 +3201,12 @@
 checksum = "0336d538f7abc86d282a4189614dfaa90810dfc2c6f6427eaf88e16311dd225d"
 
 [[package]]
+name = "unicode-xid"
+version = "0.2.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "229730647fbc343e3a80e463c1db7f78f3855d3f3739bee0dda773c9a037c90a"
+
+[[package]]
 name = "uninit"
 version = "0.5.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -3211,6 +3423,15 @@
 
 [[package]]
 name = "windows-sys"
+version = "0.48.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9"
+dependencies = [
+ "windows-targets 0.48.5",
+]
+
+[[package]]
+name = "windows-sys"
 version = "0.52.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d"
diff --git a/pseudo_crate/Cargo.toml b/pseudo_crate/Cargo.toml
index e40f43a..4192296 100644
--- a/pseudo_crate/Cargo.toml
+++ b/pseudo_crate/Cargo.toml
@@ -168,3 +168,28 @@
 rustc-hash = "=1.1.0"
 rustversion = "=1.0.14"
 scopeguard = "=1.2.0"
+sec1 = "=0.7.3"
+semver = "=1.0.21"
+serde_cbor = "=0.11.2"
+serde_derive = "=1.0.158"
+shared_library = "=0.1.9"
+slab = "=0.4.9"
+smallvec = "=1.13.1"
+smccc = "=0.1.1"
+socket2 = "=0.5.5"
+spin = "=0.9.8"
+spki = "=0.7.3"
+strsim = "=0.11.0"
+strum = "=0.25.0"
+strum_macros = "=0.25.3"
+syn-mid = "=0.6.0"
+sync_wrapper = "=1.0.1"
+synstructure = "=0.12.6"
+tempfile = "=3.3.0"
+termcolor = "=1.4.1"
+termtree = "=0.4.1"
+textwrap = "=0.16.0"
+thiserror = "=1.0.49"
+thread_local = "=1.1.7"
+tinytemplate = "=1.2.1"
+tokio-macros = "=2.1.0"