Import standback-0.2.11

Bug: 169611678
Test: make
Change-Id: I067953808a7082793faf1fd3419e7c91fd2e199d
diff --git a/.cargo_vcs_info.json b/.cargo_vcs_info.json
new file mode 100644
index 0000000..d11bde3
--- /dev/null
+++ b/.cargo_vcs_info.json
@@ -0,0 +1,5 @@
+{
+  "git": {
+    "sha1": "552677461211a255da1538b089aef3bf35a87f0c"
+  }
+}
diff --git a/CHANGELOG.md b/CHANGELOG.md
new file mode 100644
index 0000000..86fd733
--- /dev/null
+++ b/CHANGELOG.md
@@ -0,0 +1,65 @@
+# Changelog
+
+All notable changes to the standback project will be documented in this file.
+
+The format is based on [Keep a Changelog]. This project adheres to [Semantic
+Versioning].
+
+---
+
+## 0.2.10 [2020-08-23]
+
+- Support for Rust 1.45.0 and 1.46.0 APIs has been added.
+- `float::to_int_unchecked` has been added (stabilized in Rust 1.44.0).
+
+## 0.2.9 [2020-06-04]
+
+- Support for Rust 1.44.0 APIs has been added.
+- Non-stable releases are now supported. All releases are treated as equivalent
+  to the stable that was out at the time.
+
+## 0.2.8 [2020-04-27]
+
+Removed the `primitive` module, as it caused internal compiler errors in older
+compilers.
+
+## 0.2.7 [2020-04-25]
+
+_This version has been yanked from crates.io._
+
+Additional modules and constants were added for Rust 1.43.0.
+
+## 0.2.6 [2020-04-21]
+
+Support for Rust 1.43.0 APIs has been added.
+
+## 0.2.5 [2020-04-20]
+
+- `TryFrom`, `TryInto` and correctly re-exported.
+- `TryFromIntError` has been moved to `mod num`.
+
+## 0.2.4 [2020-04-20]
+
+- `TryFrom` identity is implemented for some primitives.
+
+## 0.2.3 [2020-04-18]
+
+Embedded and `#![no_std]` now have full support and are checked in CI.
+
+## 0.2.2 [2020-04-02]
+
+The version of rustc being used will now respect the $RUSTC environment
+variable.
+
+## 0.2.1 [2020-03-13]
+
+Support for Rust 1.42.0 APIs has been added.
+
+## 0.2.0 [2020-03-10]
+
+- `MaybeUninit` is restricted to `Copy` types, eliminating undefined behavior.
+- `todo!` has been moved to the prelude.
+
+## 0.1.0 [2020-03-05]
+
+Initial release.
diff --git a/Cargo.toml b/Cargo.toml
new file mode 100644
index 0000000..acd124b
--- /dev/null
+++ b/Cargo.toml
@@ -0,0 +1,31 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies
+#
+# If you believe there's an error in this file please file an
+# issue against the rust-lang/cargo repository. If you're
+# editing this file be aware that the upstream Cargo.toml
+# will likely look very different (and much more reasonable)
+
+[package]
+edition = "2018"
+name = "standback"
+version = "0.2.11"
+authors = ["Jacob Pratt <the.z.cuber@gmail.com>", "The Rust Project Developers"]
+build = "build.rs"
+include = ["src/**/*", "LICENSE-*", "README.md", "CHANGELOG.md", "build.rs"]
+description = "New standard library, old compiler."
+readme = "README.md"
+keywords = ["std", "back-compatible", "polyfill"]
+categories = ["no-std"]
+license = "MIT OR Apache-2.0"
+repository = "https://github.com/jhpratt/standback"
+[build-dependencies.version_check]
+version = "0.9.2"
+
+[features]
+default = ["std"]
+std = []
diff --git a/Cargo.toml.orig b/Cargo.toml.orig
new file mode 100644
index 0000000..7182dc6
--- /dev/null
+++ b/Cargo.toml.orig
@@ -0,0 +1,20 @@
+[package]
+name = "standback"
+version = "0.2.11"
+authors = ["Jacob Pratt <the.z.cuber@gmail.com>", "The Rust Project Developers"]
+edition = "2018"
+repository = "https://github.com/jhpratt/standback"
+keywords = ["std", "back-compatible", "polyfill"]
+categories = ["no-std"]
+readme = "README.md"
+license = "MIT OR Apache-2.0"
+description = "New standard library, old compiler."
+build = "build.rs"
+include = ["src/**/*", "LICENSE-*", "README.md", "CHANGELOG.md", "build.rs"]
+
+[features]
+default = ["std"]
+std = []
+
+[build-dependencies]
+version_check = "0.9.2"
diff --git a/LICENSE b/LICENSE
new file mode 120000
index 0000000..a1c76e3
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1 @@
+LICENSE-Apache
\ No newline at end of file
diff --git a/LICENSE-Apache b/LICENSE-Apache
new file mode 100644
index 0000000..3d2fb46
--- /dev/null
+++ b/LICENSE-Apache
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright 2019 Jacob Pratt
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/LICENSE-MIT b/LICENSE-MIT
new file mode 100644
index 0000000..1ddffb9
--- /dev/null
+++ b/LICENSE-MIT
@@ -0,0 +1,19 @@
+Copyright (c) 2019 Jacob Pratt
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/METADATA b/METADATA
new file mode 100644
index 0000000..9de191f
--- /dev/null
+++ b/METADATA
@@ -0,0 +1,19 @@
+name: "standback"
+description: "New standard library, old compiler."
+third_party {
+  url {
+    type: HOMEPAGE
+    value: "https://crates.io/crates/standback"
+  }
+  url {
+    type: ARCHIVE
+    value: "https://static.crates.io/crates/standback/standback-0.2.11.crate"
+  }
+  version: "0.2.11"
+  license_type: NOTICE
+  last_upgrade_date {
+    year: 2020
+    month: 10
+    day: 21
+  }
+}
diff --git a/MODULE_LICENSE_APACHE2 b/MODULE_LICENSE_APACHE2
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/MODULE_LICENSE_APACHE2
diff --git a/OWNERS b/OWNERS
new file mode 100644
index 0000000..46fc303
--- /dev/null
+++ b/OWNERS
@@ -0,0 +1 @@
+include platform/prebuilts/rust:/OWNERS
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..2b306c8
--- /dev/null
+++ b/README.md
@@ -0,0 +1,34 @@
+# Standback
+
+![build status](https://github.com/jhpratt/standback/workflows/Build/badge.svg?branch=master&event=push)
+![license](https://img.shields.io/badge/license-MIT%20or%20Apache--2-brightgreen)
+![version](https://img.shields.io/crates/v/standback)
+![rustc 1.31.0](https://img.shields.io/badge/rustc-1.31.0-blue)
+
+[Documentation](https://docs.rs/standback)
+
+Standback exists to allow the usage of various APIs that have been stabilized
+since rustc 1.31.0 _without_ having to require users to upgrade their compiler.
+The best part? Only old features are built from scratch; anything stable on the
+compiler in use will just be re-exported.
+
+Note that it is sometimes the case that newly stabilized methods would require
+internal methods, direct access to fields, or nightly features to work. As such,
+not every feature is backported. Found a neat way to implement a method or type
+that is possible on stable? Pull requests are accepted!
+
+## License
+
+A majority of this code comes directly from the Rust standard library, where its
+license is the following. All new code is also released under this license.
+
+This project is licensed under either of
+
+- [Apache License, Version 2.0](https://github.com/jhpratt/standback/blob/master/LICENSE-Apache)
+- [MIT license](https://github.com/jhpratt/standback/blob/master/LICENSE-MIT)
+
+at your option.
+
+Unless you explicitly state otherwise, any contribution intentionally submitted
+for inclusion in time by you, as defined in the Apache-2.0 license, shall be
+dual licensed as above, without any additional terms or conditions.
diff --git a/build.rs b/build.rs
new file mode 100644
index 0000000..6754aac
--- /dev/null
+++ b/build.rs
@@ -0,0 +1,40 @@
+use version_check::{Channel, Version};
+
+// We assume that features are never stabilized in patch versions.
+// If a "Rust 2.0" is ever released, we'll have to handle that explicitly.
+const MSRV_MINOR: u16 = 31;
+const CURRENT_MINOR: u16 = 47;
+
+fn main() {
+    let msrv = Version::from_mmp(1, MSRV_MINOR, 0);
+
+    let mut minor_used = match Version::read() {
+        Some(version) => version,
+        None => {
+            println!(
+                "cargo:warning=Unable to determine rustc version. Assuming rustc {}.",
+                msrv
+            );
+            msrv
+        }
+    }
+    .to_mmp()
+    .1;
+
+    // Treat as the stable release, even if not on it.
+    let channel = Channel::read();
+    match channel {
+        Some(channel) if channel.is_beta() => minor_used -= 1,
+        Some(channel) if channel.is_nightly() => minor_used -= 2,
+        Some(channel) if channel.is_dev() => minor_used -= 3,
+        _ => {}
+    }
+
+    for minor in (MSRV_MINOR + 1)..=CURRENT_MINOR {
+        if minor <= minor_used {
+            println!("cargo:rustc-cfg=__standback_since_1_{}", minor);
+        } else {
+            println!("cargo:rustc-cfg=__standback_before_1_{}", minor);
+        }
+    }
+}
diff --git a/src/lib.rs b/src/lib.rs
new file mode 100644
index 0000000..cf20a59
--- /dev/null
+++ b/src/lib.rs
@@ -0,0 +1,724 @@
+#![allow(non_camel_case_types, unstable_name_collisions)]
+#![cfg_attr(not(feature = "std"), no_std)]
+
+//! Standback backports a number of methods, structs, and macros that have been
+//! stabilized in the Rust standard library since 1.31.0. This allows crate
+//! authors to depend on Standback rather than forcing downstream users to
+//! upgrade their compiler (or not use the new feature at all).
+//!
+//! Due to a variety of restrictions in the Rust, it is not possible to
+//! implement everything that has been stabilized.
+//!
+//! # Usage
+//!
+//! If you are using methods on already-existing structs, you should use the
+//! following:
+//!
+//! ```rust,no_run
+//! use standback::prelude::*;
+//! ```
+//!
+//! Additionally, if you are using newly stabilized structs, types, or anything
+//! else that would normally have to be imported, use `standback` instead of
+//! `std`:
+//!
+//! ```rust,no_run
+//! use standback::mem::take;
+//! ```
+//!
+//! It is _highly_ recommended to use `#![allow(unstable_name_collisions)]`, as
+//! that's the whole point of this crate. Just be extra-careful to not do it for
+//! anything that _can't_ be backported.
+//!
+//! # `#![no_std]` support
+//!
+//! By default, there standard library is used where necessary. If support for
+//! `#![no_std]` is required, use `default-features = false`.
+//!
+//! An allocator is not required for any backported item. If any require an
+//! allocator in the future, it will be gated under an `alloc` feature.
+//!
+//! # Methods on existing structs
+//!
+//! The following methods and constants are available via the prelude:
+//!
+//! ```rust,ignore
+//! // 1.47
+//! Range::is_empty
+//! Result::as_deref
+//! Result::as_deref_mut
+//! Vec::leak
+//! f32::TAU
+//! f64::TAU
+//!
+//! // 1.46
+//! i8::leading_ones
+//! i8::trailing_ones
+//! i16::leading_ones
+//! i16::trailing_ones
+//! i32::leading_ones
+//! i32::trailing_ones
+//! i64::leading_ones
+//! i64::trailing_ones
+//! i128::leading_ones
+//! i128::trailing_ones
+//! isize::leading_ones
+//! isize::trailing_ones
+//! u8::leading_ones
+//! u8::trailing_ones
+//! u16::leading_ones
+//! u16::trailing_ones
+//! u32::leading_ones
+//! u32::trailing_ones
+//! u64::leading_ones
+//! u64::trailing_ones
+//! u128::leading_ones
+//! u128::trailing_ones
+//! usize::leading_ones
+//! usize::trailing_ones
+//! Option::zip
+//!
+//! // 1.45
+//! i8::saturating_abs
+//! i8::saturating_neg
+//! i16::saturating_abs
+//! i16::saturating_neg
+//! i32::saturating_abs
+//! i32::saturating_neg
+//! i64::saturating_abs
+//! i64::saturating_neg
+//! i128::saturating_abs
+//! i128::saturating_neg
+//! isize::saturating_abs
+//! isize::saturating_neg
+//!
+//! // 1.44
+//! PathBuf::with_capacity
+//! PathBuf::capacity
+//! PathBuf::clear
+//! PathBuf::reserve
+//! PathBuf::reserve_exact
+//! PathBuf::shrink_to_fit
+//! Layout::align_to
+//! Layout::pad_to_align
+//! Layout::array
+//! Layout::extend
+//! f32::to_int_unchecked
+//! f64::to_int_unchecked
+//!
+//! // 1.43
+//! f32::RADIX
+//! f32::MANTISSA_DIGITS
+//! f32::DIGITS
+//! f32::EPSILON
+//! f32::MIN
+//! f32::MIN_POSITIVE
+//! f32::MAX
+//! f32::MIN_EXP
+//! f32::MAX_EXP
+//! f32::MIN_10_EXP
+//! f32::MAX_10_EXP
+//! f32::NAN
+//! f32::INFINITY
+//! f32::NEG_INFINITY
+//! f64::RADIX
+//! f64::MANTISSA_DIGITS
+//! f64::DIGITS
+//! f64::EPSILON
+//! f64::MIN
+//! f64::MIN_POSITIVE
+//! f64::MAX
+//! f64::MIN_EXP
+//! f64::MAX_EXP
+//! f64::MIN_10_EXP
+//! f64::MAX_10_EXP
+//! f64::NAN
+//! f64::INFINITY
+//! f64::NEG_INFINITY
+//! u8::MIN
+//! u8::MAX
+//! u16::MIN
+//! u16::MAX
+//! u32::MIN
+//! u32::MAX
+//! u64::MIN
+//! u64::MAX
+//! u128::MIN
+//! u128::MAX
+//! usize::MIN
+//! usize::MAX
+//! i8::MIN
+//! i8::MAX
+//! i16::MIN
+//! i16::MAX
+//! i32::MIN
+//! i32::MAX
+//! i64::MIN
+//! i64::MAX
+//! i128::MIN
+//! i128::MAX
+//! isize::MIN
+//! isize::MAX
+//!
+//! // 1.42
+//! CondVar::wait_while
+//! CondVar::wait_timeout_while
+//! ManuallyDrop::take
+//!
+//! // 1.41
+//! Result::map_or
+//! Result::map_or_else
+//!
+//! // 1.40
+//! Option::as_deref
+//! Option::as_deref_mut
+//! f32::to_be_bytes
+//! f32::to_le_bytes
+//! f32::to_ne_bytes
+//! f64::to_be_bytes
+//! f64::to_le_bytes
+//! f64::to_ne_bytes
+//! f32::from_be_bytes
+//! f32::from_le_bytes
+//! f32::from_ne_bytes
+//! f64::from_be_bytes
+//! f64::from_le_bytes
+//! f64::from_ne_bytes
+//! slice::repeat
+//!
+//! // 1.39
+//! // None :(
+//!
+//! // 1.38
+//! <*const T>::cast
+//! <*mut T>::cast
+//! Duration::as_secs_f32
+//! Duration::as_secs_f64
+//! Duration::div_f32
+//! Duration::div_f64
+//! Duration::from_secs_f32
+//! Duration::from_secs_f64
+//! Duration::mul_f32
+//! Duration::mul_f64
+//! i8::rem_euclid
+//! i8::checked_rem_euclid
+//! i8::wrapping_rem_euclid
+//! i8::overflowing_rem_euclid
+//! i8::div_euclid
+//! i8::checked_div_euclid
+//! i8::wrapping_div_euclid
+//! i8::overflowing_div_euclid
+//! i16::rem_euclid
+//! i16::checked_rem_euclid
+//! i16::wrapping_rem_euclid
+//! i16::overflowing_rem_euclid
+//! i16::div_euclid
+//! i16::checked_div_euclid
+//! i16::wrapping_div_euclid
+//! i16::overflowing_div_euclid
+//! i32::rem_euclid
+//! i32::checked_rem_euclid
+//! i32::wrapping_rem_euclid
+//! i32::overflowing_rem_euclid
+//! i32::div_euclid
+//! i32::checked_div_euclid
+//! i32::wrapping_div_euclid
+//! i32::overflowing_div_euclid
+//! i64::rem_euclid
+//! i64::checked_rem_euclid
+//! i64::wrapping_rem_euclid
+//! i64::overflowing_rem_euclid
+//! i64::div_euclid
+//! i64::checked_div_euclid
+//! i64::wrapping_div_euclid
+//! i64::overflowing_div_euclid
+//! i128::rem_euclid
+//! i128::checked_rem_euclid
+//! i128::wrapping_rem_euclid
+//! i128::overflowing_rem_euclid
+//! i128::div_euclid
+//! i128::checked_div_euclid
+//! i128::wrapping_div_euclid
+//! i128::overflowing_div_euclid
+//! isize::rem_euclid
+//! isize::checked_rem_euclid
+//! isize::wrapping_rem_euclid
+//! isize::overflowing_rem_euclid
+//! isize::div_euclid
+//! isize::checked_div_euclid
+//! isize::wrapping_div_euclid
+//! isize::overflowing_div_euclid
+//! u8::rem_euclid
+//! u8::checked_rem_euclid
+//! u8::wrapping_rem_euclid
+//! u8::overflowing_rem_euclid
+//! u8::div_euclid
+//! u8::checked_div_euclid
+//! u8::wrapping_div_euclid
+//! u8::overflowing_div_euclid
+//! u16::rem_euclid
+//! u16::checked_rem_euclid
+//! u16::wrapping_rem_euclid
+//! u16::overflowing_rem_euclid
+//! u16::div_euclid
+//! u16::checked_div_euclid
+//! u16::wrapping_div_euclid
+//! u16::overflowing_div_euclid
+//! u32::rem_euclid
+//! u32::checked_rem_euclid
+//! u32::wrapping_rem_euclid
+//! u32::overflowing_rem_euclid
+//! u32::div_euclid
+//! u32::checked_div_euclid
+//! u32::wrapping_div_euclid
+//! u32::overflowing_div_euclid
+//! u64::rem_euclid
+//! u64::checked_rem_euclid
+//! u64::wrapping_rem_euclid
+//! u64::overflowing_rem_euclid
+//! u64::div_euclid
+//! u64::checked_div_euclid
+//! u64::wrapping_div_euclid
+//! u64::overflowing_div_euclid
+//! u128::rem_euclid
+//! u128::checked_rem_euclid
+//! u128::wrapping_rem_euclid
+//! u128::overflowing_rem_euclid
+//! u128::div_euclid
+//! u128::checked_div_euclid
+//! u128::wrapping_div_euclid
+//! u128::overflowing_div_euclid
+//! usize::rem_euclid
+//! usize::checked_rem_euclid
+//! usize::wrapping_rem_euclid
+//! usize::overflowing_rem_euclid
+//! usize::div_euclid
+//! usize::checked_div_euclid
+//! usize::wrapping_div_euclid
+//! usize::overflowing_div_euclid
+//! f32::rem_euclid
+//! f32::div_euclid
+//! f64::rem_euclid
+//! f64::div_euclid
+//!
+//! // 1.37
+//! Cell::from_mut
+//! Cell<[T]>::as_slice_of_cells
+//! DoubleEndedIterator::nth_back
+//! Option::xor
+//! slice::copy_within
+//!
+//! // 1.36
+//! Iterator::copied
+//! mem::MaybeUninit
+//! task::Context
+//! task::RawWaker
+//! task::RawWakerVTable
+//! task::Waker
+//! task::Poll
+//!
+//! // 1.35
+//! RefCell::replace_with
+//! ptr::hash
+//! Range::contains
+//! RangeFrom::contains
+//! RangeTo::contains
+//! RangeInclusive::contains
+//! RangeToInclusive::contains
+//! Option::copied
+//!
+//! // 1.34
+//! slice::sort_by_cached_key
+//! i8::checked_pow
+//! i8::saturating_pow
+//! i8::wrapping_pow
+//! i8::overflowing_pow
+//! i16::checked_pow
+//! i16::saturating_pow
+//! i16::wrapping_pow
+//! i16::overflowing_pow
+//! i32::checked_pow
+//! i32::saturating_pow
+//! i32::wrapping_pow
+//! i32::overflowing_pow
+//! i64::checked_pow
+//! i64::saturating_pow
+//! i64::wrapping_pow
+//! i64::overflowing_pow
+//! i128::checked_pow
+//! i128::saturating_pow
+//! i128::wrapping_pow
+//! i128::overflowing_pow
+//! isize::checked_pow
+//! isize::saturating_pow
+//! isize::wrapping_pow
+//! isize::overflowing_pow
+//! u8::checked_pow
+//! u8::saturating_pow
+//! u8::wrapping_pow
+//! u8::overflowing_pow
+//! u16::checked_pow
+//! u16::saturating_pow
+//! u16::wrapping_pow
+//! u16::overflowing_pow
+//! u32::checked_pow
+//! u32::saturating_pow
+//! u32::wrapping_pow
+//! u32::overflowing_pow
+//! u64::checked_pow
+//! u64::saturating_pow
+//! u64::wrapping_pow
+//! u64::overflowing_pow
+//! u128::checked_pow
+//! u128::saturating_pow
+//! u128::wrapping_pow
+//! u128::overflowing_pow
+//! usize::checked_pow
+//! usize::saturating_pow
+//! usize::wrapping_pow
+//! usize::overflowing_pow
+//!
+//! // 1.33
+//! os::unix::fs::FileExt::read_exact_at
+//! os::unix::fs::FileExt::write_all_at
+//! Option::transpose
+//! Result::transpose
+//! VecDeque::resize_with
+//! Duration::as_millis
+//! Duration::as_micros
+//! Duration::as_nanos
+//!
+//! // 1.32
+//! i8::to_be_bytes
+//! i8::to_le_bytes
+//! i8::to_ne_bytes
+//! i8::from_be_bytes
+//! i8::from_le_bytes
+//! i8::from_ne_bytes
+//! i16::to_be_bytes
+//! i16::to_le_bytes
+//! i16::to_ne_bytes
+//! i16::from_be_bytes
+//! i16::from_le_bytes
+//! i16::from_ne_bytes
+//! i32::to_be_bytes
+//! i32::to_le_bytes
+//! i32::to_ne_bytes
+//! i32::from_be_bytes
+//! i32::from_le_bytes
+//! i32::from_ne_bytes
+//! i64::to_be_bytes
+//! i64::to_le_bytes
+//! i64::to_ne_bytes
+//! i64::from_be_bytes
+//! i64::from_le_bytes
+//! i64::from_ne_bytes
+//! i128::to_be_bytes
+//! i128::to_le_bytes
+//! i128::to_ne_bytes
+//! i128::from_be_bytes
+//! i128::from_le_bytes
+//! i128::from_ne_bytes
+//! isize::to_be_bytes
+//! isize::to_le_bytes
+//! isize::to_ne_bytes
+//! isize::from_be_bytes
+//! isize::from_le_bytes
+//! isize::from_ne_bytes
+//! u8::to_be_bytes
+//! u8::to_le_bytes
+//! u8::to_ne_bytes
+//! u8::from_be_bytes
+//! u8::from_le_bytes
+//! u8::from_ne_bytes
+//! u16::to_be_bytes
+//! u16::to_le_bytes
+//! u16::to_ne_bytes
+//! u16::from_be_bytes
+//! u16::from_le_bytes
+//! u16::from_ne_bytes
+//! u32::to_be_bytes
+//! u32::to_le_bytes
+//! u32::to_ne_bytes
+//! u32::from_be_bytes
+//! u32::from_le_bytes
+//! u32::from_ne_bytes
+//! u64::to_be_bytes
+//! u64::to_le_bytes
+//! u64::to_ne_bytes
+//! u64::from_be_bytes
+//! u64::from_le_bytes
+//! u64::from_ne_bytes
+//! u128::to_be_bytes
+//! u128::to_le_bytes
+//! u128::to_ne_bytes
+//! u128::from_be_bytes
+//! u128::from_le_bytes
+//! u128::from_ne_bytes
+//! usize::to_be_bytes
+//! usize::to_le_bytes
+//! usize::to_ne_bytes
+//! usize::from_be_bytes
+//! usize::from_le_bytes
+//! usize::from_ne_bytes
+//! ```
+//!
+//! # Other APIs implemented
+//!
+//! ```rust,ignore
+//! char::UNICODE_VERSION // 1.45
+//! f32::LOG10_2 // 1.43
+//! f32::LOG2_10 // 1.43
+//! f64::LOG10_2 // 1.43
+//! f64::LOG2_10 // 1.43
+//! iter::once_with // 1.43
+//! mem::take // 1.40
+//! iterator::Copied // 1.36
+//! array::TryFromSliceError // 1.36
+//! iter::from_fn // 1.34
+//! iter::successors // 1.34
+//! convert::TryFrom // 1.34
+//! convert::TryInto // 1.34
+//! num::TryFromIntError // 1.34
+//! convert::identity // 1.33
+//! pin::Pin // 1.33
+//! marker::Unpin // 1.33
+//! ```
+//!
+//! # Macros
+//!
+//! Macros should not be imported directly, but rather through the prelude.
+//!
+//! ```rust,ignore
+//! todo! // 1.39
+//! matches! // 1.42
+//! ```
+
+#![deny(rust_2018_idioms, unused_qualifications)]
+
+// A few traits to make sealing other traits simpler.
+mod traits {
+    pub trait Sealed<T: ?Sized> {}
+    impl<T: ?Sized> Sealed<T> for T {}
+
+    macro_rules! impl_trait_for_all {
+        ($trait:ident => $($type:ty)+) => {$(
+            impl $trait for $type {}
+        )+};
+    }
+
+    pub trait Integer: Sized {}
+    impl_trait_for_all!(Integer => i8 i16 i32 i64 i128 isize u8 u16 u32 u64 u128 usize);
+
+    pub trait SignedInteger {}
+    impl_trait_for_all!(SignedInteger => i8 i16 i32 i64 i128 isize);
+
+    pub trait UnsignedInteger {}
+    impl_trait_for_all!(UnsignedInteger => u8 u16 u32 u64 u128 usize);
+
+    pub trait Float {}
+    impl_trait_for_all!(Float => f32 f64);
+}
+
+#[cfg(__standback_before_1_32)]
+mod v1_32;
+#[cfg(__standback_before_1_33)]
+mod v1_33;
+#[cfg(__standback_before_1_34)]
+mod v1_34;
+#[cfg(__standback_before_1_35)]
+mod v1_35;
+#[cfg(__standback_before_1_36)]
+mod v1_36;
+#[cfg(__standback_before_1_37)]
+mod v1_37;
+#[cfg(__standback_before_1_38)]
+mod v1_38;
+#[cfg(__standback_before_1_40)]
+mod v1_40;
+#[cfg(__standback_before_1_41)]
+mod v1_41;
+#[cfg(__standback_before_1_42)]
+mod v1_42;
+#[cfg(__standback_before_1_43)]
+mod v1_43;
+#[cfg(__standback_before_1_44)]
+mod v1_44;
+#[cfg(__standback_before_1_45)]
+mod v1_45;
+#[cfg(__standback_before_1_46)]
+mod v1_46;
+#[cfg(__standback_before_1_47)]
+mod v1_47;
+
+pub mod prelude {
+    #[cfg(__standback_before_1_42)]
+    pub use crate::matches;
+    #[cfg(__standback_before_1_32)]
+    pub use crate::v1_32::{
+        i128_v1_32, i16_v1_32, i32_v1_32, i64_v1_32, i8_v1_32, isize_v1_32, u128_v1_32, u16_v1_32,
+        u32_v1_32, u64_v1_32, u8_v1_32, usize_v1_32,
+    };
+    #[cfg(all(feature = "std", __standback_before_1_33, target_family = "unix"))]
+    pub use crate::v1_33::UnixFileExt_v1_33;
+    #[cfg(all(feature = "std", __standback_before_1_33))]
+    pub use crate::v1_33::VecDeque_v1_33;
+    #[cfg(__standback_before_1_33)]
+    pub use crate::v1_33::{Duration_v1_33, Option_v1_33, Result_v1_33};
+    #[cfg(__standback_before_1_34)]
+    pub use crate::v1_34::{Pow_v1_34, Slice_v1_34};
+    #[cfg(__standback_before_1_35)]
+    pub use crate::v1_35::{Option_v1_35, RangeBounds_v1_35, RefCell_v1_35};
+    #[cfg(__standback_before_1_36)]
+    pub use crate::v1_36::{str_v1_36, Iterator_v1_36};
+    #[cfg(__standback_before_1_37)]
+    pub use crate::v1_37::{
+        Cell_v1_37, Cell_v1_37_, DoubleEndedIterator_v1_37, Option_v1_37, Slice_v1_37,
+    };
+    #[cfg(__standback_before_1_38)]
+    pub use crate::v1_38::{
+        ConstPtr_v1_38, Duration_v1_38, EuclidFloat_v1_38, Euclid_v1_38, MutPtr_v1_38,
+    };
+    #[cfg(all(feature = "std", __standback_before_1_40))]
+    pub use crate::v1_40::slice_v1_40;
+    #[cfg(__standback_before_1_40)]
+    pub use crate::v1_40::{f32_v1_40, f64_v1_40, Option_v1_40, Option_v1_40_};
+    #[cfg(__standback_before_1_41)]
+    pub use crate::v1_41::Result_v1_41;
+    #[cfg(all(__standback_before_1_42, feature = "std"))]
+    pub use crate::v1_42::Condvar_v1_42;
+    #[cfg(__standback_before_1_42)]
+    pub use crate::v1_42::ManuallyDrop_v1_42;
+    #[cfg(__standback_before_1_43)]
+    pub use crate::v1_43::{float_v1_43, int_v1_43};
+    #[cfg(__standback_before_1_44)]
+    pub use crate::v1_44::Layout_v1_44;
+    #[cfg(all(__standback_before_1_44, feature = "std"))]
+    pub use crate::v1_44::PathBuf_v1_44;
+    #[cfg(__standback_before_1_45)]
+    pub use crate::v1_45::int_v1_45;
+    #[cfg(__standback_before_1_46)]
+    pub use crate::v1_46::{int_v1_46, Option_v1_46};
+    #[cfg(all(feature = "std", __standback_before_1_47))]
+    pub use crate::v1_47::Vec_v1_47;
+    #[cfg(__standback_before_1_47)]
+    pub use crate::v1_47::{Range_v1_47, Result_v1_47};
+    #[cfg(__standback_before_1_39)]
+    pub use core::unimplemented as todo;
+}
+
+pub mod mem {
+    #[cfg(__standback_before_1_40)]
+    pub use crate::v1_40::take;
+    #[cfg(__standback_since_1_40)]
+    pub use core::mem::take;
+
+    #[cfg(__standback_before_1_36)]
+    pub use crate::v1_36::MaybeUninit;
+    #[cfg(__standback_since_1_36)]
+    pub use core::mem::MaybeUninit;
+}
+pub mod convert {
+    #[cfg(__standback_before_1_33)]
+    pub use crate::v1_33::identity;
+    #[cfg(__standback_since_1_33)]
+    pub use core::convert::identity;
+
+    #[cfg(__standback_before_1_34)]
+    pub use crate::v1_34::Infallible;
+    #[cfg(__standback_since_1_34)]
+    pub use core::convert::Infallible;
+
+    #[cfg(__standback_before_1_34)]
+    pub use crate::v1_34::{TryFrom, TryInto};
+    #[cfg(__standback_since_1_34)]
+    pub use core::convert::{TryFrom, TryInto};
+}
+pub mod num {
+    #[cfg(__standback_before_1_34)]
+    pub use crate::v1_34::TryFromIntError;
+    #[cfg(__standback_since_1_34)]
+    pub use core::num::TryFromIntError;
+}
+pub mod iter {
+    #[cfg(__standback_before_1_36)]
+    pub use crate::v1_36::Copied;
+    #[cfg(__standback_since_1_36)]
+    pub use core::iter::Copied;
+
+    #[cfg(__standback_before_1_34)]
+    pub use crate::v1_34::{from_fn, successors};
+    #[cfg(__standback_since_1_34)]
+    pub use core::iter::{from_fn, successors};
+
+    #[cfg(__standback_before_1_43)]
+    pub use crate::v1_43::{once_with, OnceWith};
+    #[cfg(__standback_since_1_43)]
+    pub use core::iter::{once_with, OnceWith};
+}
+pub mod marker {
+    #[cfg(__standback_before_1_33)]
+    pub use crate::v1_33::Unpin;
+    #[cfg(__standback_since_1_33)]
+    pub use core::marker::Unpin;
+}
+pub mod pin {
+    #[cfg(__standback_before_1_33)]
+    pub use crate::v1_33::Pin;
+    #[cfg(__standback_since_1_33)]
+    pub use core::pin::Pin;
+}
+pub mod task {
+    #[cfg(__standback_before_1_36)]
+    pub use crate::v1_36::{Context, Poll, RawWaker, RawWakerVTable, Waker};
+    #[cfg(__standback_since_1_36)]
+    pub use core::task::{Context, Poll, RawWaker, RawWakerVTable, Waker};
+}
+pub mod ptr {
+    #[cfg(__standback_before_1_35)]
+    pub use crate::v1_35::hash;
+    #[cfg(__standback_since_1_35)]
+    pub use core::ptr::hash;
+}
+pub mod array {
+    #[cfg(__standback_before_1_36)]
+    pub use crate::v1_36::TryFromSliceError;
+    #[cfg(__standback_since_1_36)]
+    pub use core::array::TryFromSliceError;
+}
+pub mod f32 {
+    pub mod consts {
+        #[cfg(__standback_before_1_43)]
+        pub use crate::v1_43::f32::{LOG10_2, LOG2_10};
+        #[cfg(__standback_since_1_43)]
+        pub use core::f32::consts::{LOG10_2, LOG2_10};
+
+        #[cfg(__standback_before_1_47)]
+        pub use crate::v1_47::f32::TAU;
+        #[cfg(__standback_since_1_47)]
+        pub use core::f32::consts::TAU;
+    }
+}
+pub mod f64 {
+    pub mod consts {
+        #[cfg(__standback_before_1_43)]
+        pub use crate::v1_43::f64::{LOG10_2, LOG2_10};
+        #[cfg(__standback_since_1_43)]
+        pub use core::f64::consts::{LOG10_2, LOG2_10};
+
+        #[cfg(__standback_before_1_47)]
+        pub use crate::v1_47::f64::TAU;
+        #[cfg(__standback_since_1_47)]
+        pub use core::f64::consts::TAU;
+    }
+}
+pub mod char {
+    #[cfg(__standback_before_1_38)]
+    pub const UNICODE_VERSION: (u8, u8, u8) = (11, 0, 0);
+    #[cfg(all(__standback_since_1_38, __standback_before_1_44))]
+    pub const UNICODE_VERSION: (u8, u8, u8) = (12, 1, 0);
+    #[cfg(all(__standback_since_1_44, __standback_before_1_45))]
+    pub const UNICODE_VERSION: (u8, u8, u8) = (13, 0, 0);
+    #[cfg(__standback_since_1_45)]
+    pub use core::char::UNICODE_VERSION;
+}
diff --git a/src/v1_32.rs b/src/v1_32.rs
new file mode 100644
index 0000000..8488e9e
--- /dev/null
+++ b/src/v1_32.rs
@@ -0,0 +1,62 @@
+use crate::traits::Sealed;
+use core::mem::{size_of, transmute};
+
+macro_rules! impl_int_v1_32 {
+    ($(($trait:ident, $type:ty)),+) => {$(
+        pub trait $trait: Sealed<$type> {
+            fn to_be_bytes(self) -> [u8; size_of::<$type>()];
+            fn to_le_bytes(self) -> [u8; size_of::<$type>()];
+            fn to_ne_bytes(self) -> [u8; size_of::<$type>()];
+            fn from_be_bytes(bytes: [u8; size_of::<$type>()]) -> Self;
+            fn from_le_bytes(bytes: [u8; size_of::<$type>()]) -> Self;
+            fn from_ne_bytes(bytes: [u8; size_of::<$type>()]) -> Self;
+        }
+
+        impl $trait for $type {
+            #[inline]
+            fn to_be_bytes(self) -> [u8; size_of::<Self>()] {
+                self.to_be().to_ne_bytes()
+            }
+
+            #[inline]
+            fn to_le_bytes(self) -> [u8; size_of::<Self>()] {
+                self.to_le().to_ne_bytes()
+            }
+
+            #[inline]
+            fn to_ne_bytes(self) -> [u8; size_of::<Self>()] {
+                unsafe { transmute(self) }
+            }
+
+            #[inline]
+            fn from_be_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
+                Self::from_be(Self::from_ne_bytes(bytes))
+            }
+
+            #[inline]
+            fn from_le_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
+                Self::from_le(Self::from_ne_bytes(bytes))
+            }
+
+            #[inline]
+            fn from_ne_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
+                unsafe { transmute(bytes) }
+            }
+        }
+    )+};
+}
+
+impl_int_v1_32![
+    (u8_v1_32, u8),
+    (u16_v1_32, u16),
+    (u32_v1_32, u32),
+    (u64_v1_32, u64),
+    (u128_v1_32, u128),
+    (usize_v1_32, usize),
+    (i8_v1_32, i8),
+    (i16_v1_32, i16),
+    (i32_v1_32, i32),
+    (i64_v1_32, i64),
+    (i128_v1_32, i128),
+    (isize_v1_32, isize)
+];
diff --git a/src/v1_33.rs b/src/v1_33.rs
new file mode 100644
index 0000000..12a4b38
--- /dev/null
+++ b/src/v1_33.rs
@@ -0,0 +1,138 @@
+mod pin;
+
+pub use self::pin::Pin;
+use crate::traits::Sealed;
+use core::time::Duration;
+#[cfg(feature = "std")]
+use std::collections::VecDeque;
+#[cfg(all(feature = "std", target_family = "unix"))]
+use std::{io, os::unix};
+
+#[inline]
+pub const fn identity<T>(x: T) -> T {
+    x
+}
+
+pub trait Unpin {}
+impl<'a, T: ?Sized + 'a> Unpin for &'a T {}
+impl<'a, T: ?Sized + 'a> Unpin for &'a mut T {}
+
+#[cfg(all(feature = "std", target_family = "unix"))]
+pub trait UnixFileExt_v1_33: unix::fs::FileExt {
+    fn read_exact_at(&self, mut buf: &mut [u8], mut offset: u64) -> io::Result<()> {
+        while !buf.is_empty() {
+            match self.read_at(buf, offset) {
+                Ok(0) => break,
+                Ok(n) => {
+                    let tmp = buf;
+                    buf = &mut tmp[n..];
+                    offset += n as u64;
+                }
+                Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {}
+                Err(e) => return Err(e),
+            }
+        }
+        if !buf.is_empty() {
+            Err(io::Error::new(
+                io::ErrorKind::UnexpectedEof,
+                "failed to fill whole buffer",
+            ))
+        } else {
+            Ok(())
+        }
+    }
+
+    fn write_all_at(&self, mut buf: &[u8], mut offset: u64) -> io::Result<()> {
+        while !buf.is_empty() {
+            match self.write_at(buf, offset) {
+                Ok(0) => {
+                    return Err(io::Error::new(
+                        io::ErrorKind::WriteZero,
+                        "failed to write whole buffer",
+                    ));
+                }
+                Ok(n) => {
+                    buf = &buf[n..];
+                    offset += n as u64
+                }
+                Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {}
+                Err(e) => return Err(e),
+            }
+        }
+        Ok(())
+    }
+}
+
+#[cfg(all(feature = "std", target_family = "unix"))]
+impl<F: unix::fs::FileExt> UnixFileExt_v1_33 for F {}
+
+pub trait Option_v1_33<T, E>: Sealed<Option<Result<T, E>>> {
+    fn transpose(self) -> Result<Option<T>, E>;
+}
+
+impl<T, E> Option_v1_33<T, E> for Option<Result<T, E>> {
+    #[inline]
+    fn transpose(self) -> Result<Option<T>, E> {
+        match self {
+            Some(Ok(x)) => Ok(Some(x)),
+            Some(Err(e)) => Err(e),
+            None => Ok(None),
+        }
+    }
+}
+
+pub trait Result_v1_33<T, E>: Sealed<Result<Option<T>, E>> {
+    fn transpose(self) -> Option<Result<T, E>>;
+}
+
+impl<T, E> Result_v1_33<T, E> for Result<Option<T>, E> {
+    #[inline]
+    fn transpose(self) -> Option<Result<T, E>> {
+        match self {
+            Ok(Some(x)) => Some(Ok(x)),
+            Ok(None) => None,
+            Err(e) => Some(Err(e)),
+        }
+    }
+}
+
+#[cfg(feature = "std")]
+pub trait VecDeque_v1_33<T>: Sealed<VecDeque<T>> {
+    fn resize_with(&mut self, new_len: usize, generator: impl FnMut() -> T);
+}
+
+#[cfg(feature = "std")]
+impl<T> VecDeque_v1_33<T> for VecDeque<T> {
+    fn resize_with(&mut self, new_len: usize, generator: impl FnMut() -> T) {
+        let len = self.len();
+
+        if new_len > len {
+            self.extend(core::iter::repeat_with(generator).take(new_len - len))
+        } else {
+            self.truncate(new_len);
+        }
+    }
+}
+
+pub trait Duration_v1_33: Sealed<Duration> {
+    fn as_millis(&self) -> u128;
+    fn as_micros(&self) -> u128;
+    fn as_nanos(&self) -> u128;
+}
+
+impl Duration_v1_33 for Duration {
+    #[inline]
+    fn as_millis(&self) -> u128 {
+        self.as_secs() as u128 * 1_000 + (self.subsec_nanos() / 1_000_000) as u128
+    }
+
+    #[inline]
+    fn as_micros(&self) -> u128 {
+        self.as_secs() as u128 * 1_000_000 + (self.subsec_nanos() / 1_000) as u128
+    }
+
+    #[inline]
+    fn as_nanos(&self) -> u128 {
+        self.as_secs() as u128 * 1_000_000_000 + self.subsec_nanos() as u128
+    }
+}
diff --git a/src/v1_33/pin.rs b/src/v1_33/pin.rs
new file mode 100644
index 0000000..6dc56a5
--- /dev/null
+++ b/src/v1_33/pin.rs
@@ -0,0 +1,126 @@
+use crate::marker::Unpin;
+use core::{
+    fmt,
+    ops::{Deref, DerefMut},
+};
+
+#[repr(transparent)]
+#[derive(Copy, Clone)]
+pub struct Pin<P> {
+    pointer: P,
+}
+
+impl<P: Deref<Target = T>, T: Unpin> Pin<P> {
+    #[inline(always)]
+    pub fn new(pointer: P) -> Pin<P> {
+        unsafe { Pin::new_unchecked(pointer) }
+    }
+}
+
+impl<P: Deref> Pin<P> {
+    #[inline(always)]
+    pub unsafe fn new_unchecked(pointer: P) -> Pin<P> {
+        Pin { pointer }
+    }
+
+    #[inline(always)]
+    pub fn as_ref(&self) -> Pin<&P::Target> {
+        unsafe { Pin::new_unchecked(&*self.pointer) }
+    }
+}
+
+impl<P: DerefMut> Pin<P> {
+    #[inline(always)]
+    pub fn as_mut(&mut self) -> Pin<&mut P::Target> {
+        unsafe { Pin::new_unchecked(&mut *self.pointer) }
+    }
+
+    #[inline(always)]
+    pub fn set(&mut self, value: P::Target)
+    where
+        P::Target: Sized,
+    {
+        *(self.pointer) = value;
+    }
+}
+
+impl<'a, T: ?Sized> Pin<&'a T> {
+    pub unsafe fn map_unchecked<U, F>(self, func: F) -> Pin<&'a U>
+    where
+        U: ?Sized,
+        F: FnOnce(&T) -> &U,
+    {
+        let pointer = &*self.pointer;
+        let new_pointer = func(pointer);
+        Pin::new_unchecked(new_pointer)
+    }
+
+    #[inline(always)]
+    pub fn get_ref(self) -> &'a T {
+        self.pointer
+    }
+}
+
+impl<'a, T: ?Sized> Pin<&'a mut T> {
+    #[inline(always)]
+    pub fn into_ref(self) -> Pin<&'a T> {
+        Pin {
+            pointer: self.pointer,
+        }
+    }
+
+    #[inline(always)]
+    pub fn get_mut(self) -> &'a mut T
+    where
+        T: Unpin,
+    {
+        self.pointer
+    }
+
+    #[inline(always)]
+    pub unsafe fn get_unchecked_mut(self) -> &'a mut T {
+        self.pointer
+    }
+
+    pub unsafe fn map_unchecked_mut<U, F>(self, func: F) -> Pin<&'a mut U>
+    where
+        U: ?Sized,
+        F: FnOnce(&mut T) -> &mut U,
+    {
+        let pointer = Pin::get_unchecked_mut(self);
+        let new_pointer = func(pointer);
+        Pin::new_unchecked(new_pointer)
+    }
+}
+
+impl<P: Deref> Deref for Pin<P> {
+    type Target = P::Target;
+
+    fn deref(&self) -> &P::Target {
+        Pin::get_ref(Pin::as_ref(self))
+    }
+}
+
+impl<P: DerefMut<Target = T>, T: Unpin> DerefMut for Pin<P> {
+    fn deref_mut(&mut self) -> &mut P::Target {
+        Pin::get_mut(Pin::as_mut(self))
+    }
+}
+
+impl<P: fmt::Debug> fmt::Debug for Pin<P> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        fmt::Debug::fmt(&self.pointer, f)
+    }
+}
+
+impl<P: fmt::Display> fmt::Display for Pin<P> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        fmt::Display::fmt(&self.pointer, f)
+    }
+}
+
+impl<P: fmt::Pointer> fmt::Pointer for Pin<P> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        fmt::Pointer::fmt(&self.pointer, f)
+    }
+}
diff --git a/src/v1_34.rs b/src/v1_34.rs
new file mode 100644
index 0000000..bee28e8
--- /dev/null
+++ b/src/v1_34.rs
@@ -0,0 +1,349 @@
+mod try_from;
+
+pub use self::try_from::{TryFrom, TryFromIntError, TryInto};
+pub use crate::array::TryFromSliceError;
+use crate::traits::{Integer, Sealed};
+#[cfg(feature = "std")]
+use core::mem;
+use core::{
+    fmt,
+    hash::{Hash, Hasher},
+    iter::FusedIterator,
+};
+
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
+pub enum Infallible {}
+
+impl fmt::Debug for Infallible {
+    fn fmt(&self, _: &mut fmt::Formatter<'_>) -> fmt::Result {
+        match *self {}
+    }
+}
+
+impl fmt::Display for Infallible {
+    fn fmt(&self, _: &mut fmt::Formatter<'_>) -> fmt::Result {
+        match *self {}
+    }
+}
+
+impl Hash for Infallible {
+    fn hash<H: Hasher>(&self, _: &mut H) {
+        match *self {}
+    }
+}
+
+#[inline]
+pub fn from_fn<T, F>(f: F) -> FromFn<F>
+where
+    F: FnMut() -> Option<T>,
+{
+    FromFn(f)
+}
+
+#[derive(Clone)]
+pub struct FromFn<F>(F);
+
+impl<T, F> Iterator for FromFn<F>
+where
+    F: FnMut() -> Option<T>,
+{
+    type Item = T;
+
+    #[inline]
+    fn next(&mut self) -> Option<Self::Item> {
+        (self.0)()
+    }
+}
+
+impl<F> fmt::Debug for FromFn<F> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        f.debug_struct("FromFn").finish()
+    }
+}
+
+pub fn successors<T, F>(first: Option<T>, succ: F) -> Successors<T, F>
+where
+    F: FnMut(&T) -> Option<T>,
+{
+    Successors { next: first, succ }
+}
+
+#[derive(Clone)]
+pub struct Successors<T, F> {
+    next: Option<T>,
+    succ: F,
+}
+
+impl<T, F> Iterator for Successors<T, F>
+where
+    F: FnMut(&T) -> Option<T>,
+{
+    type Item = T;
+
+    #[inline]
+    fn next(&mut self) -> Option<Self::Item> {
+        let item = self.next.take()?;
+        self.next = (self.succ)(&item);
+        Some(item)
+    }
+
+    #[inline]
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        if self.next.is_some() {
+            (1, None)
+        } else {
+            (0, Some(0))
+        }
+    }
+}
+
+impl<T, F> FusedIterator for Successors<T, F> where F: FnMut(&T) -> Option<T> {}
+
+impl<T: fmt::Debug, F> fmt::Debug for Successors<T, F> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        f.debug_struct("Successors")
+            .field("next", &self.next)
+            .finish()
+    }
+}
+
+pub trait Slice_v1_34<T>: Sealed<[T]> {
+    fn sort_by_cached_key<K, F>(&mut self, f: F)
+    where
+        F: FnMut(&T) -> K,
+        K: Ord;
+}
+
+#[cfg(feature = "std")]
+impl<T> Slice_v1_34<T> for [T] {
+    #[inline]
+    fn sort_by_cached_key<K, F>(&mut self, f: F)
+    where
+        F: FnMut(&T) -> K,
+        K: Ord,
+    {
+        macro_rules! sort_by_key {
+            ($t:ty, $slice:ident, $f:ident) => {{
+                let mut indices: Vec<_> = $slice
+                    .iter()
+                    .map($f)
+                    .enumerate()
+                    .map(|(i, k)| (k, i as $t))
+                    .collect();
+                indices.sort_unstable();
+                for i in 0..$slice.len() {
+                    let mut index = indices[i].1;
+                    while (index as usize) < i {
+                        index = indices[index as usize].1;
+                    }
+                    indices[i].1 = index;
+                    $slice.swap(i, index as usize);
+                }
+            }};
+        }
+
+        let sz_u8 = mem::size_of::<(K, u8)>();
+        let sz_u16 = mem::size_of::<(K, u16)>();
+        let sz_u32 = mem::size_of::<(K, u32)>();
+        let sz_usize = mem::size_of::<(K, usize)>();
+
+        let len = self.len();
+        if len < 2 {
+            return;
+        }
+        if sz_u8 < sz_u16 && len <= (u8::max_value() as usize) {
+            return sort_by_key!(u8, self, f);
+        }
+        if sz_u16 < sz_u32 && len <= (u16::max_value() as usize) {
+            return sort_by_key!(u16, self, f);
+        }
+        if sz_u32 < sz_usize && len <= (u32::max_value() as usize) {
+            return sort_by_key!(u32, self, f);
+        }
+        sort_by_key!(usize, self, f)
+    }
+}
+
+pub trait Pow_v1_34: Integer {
+    fn checked_pow(self, exp: u32) -> Option<Self>;
+    fn saturating_pow(self, exp: u32) -> Self;
+    fn wrapping_pow(self, exp: u32) -> Self;
+    fn overflowing_pow(self, exp: u32) -> (Self, bool);
+}
+
+macro_rules! impl_pow_for_signed {
+    ($($type:ty)+) => {$(
+        impl Pow_v1_34 for $type {
+            #[must_use = "this returns the result of the operation, without modifying the original"]
+            #[inline]
+            fn checked_pow(self, mut exp: u32) -> Option<Self> {
+                let mut base = self;
+                let mut acc: Self = 1;
+
+                while exp > 1 {
+                    if (exp & 1) == 1 {
+                        acc = acc.checked_mul(base)?;
+                    }
+                    exp /= 2;
+                    base = base.checked_mul(base)?;
+                }
+
+                if exp == 1 {
+                    acc = acc.checked_mul(base)?;
+                }
+
+                Some(acc)
+            }
+
+            #[must_use = "this returns the result of the operation, without modifying the original"]
+            #[inline]
+            fn saturating_pow(self, exp: u32) -> Self {
+                match self.checked_pow(exp) {
+                    Some(x) => x,
+                    None if self < 0 && exp % 2 == 1 => Self::min_value(),
+                    None => Self::max_value(),
+                }
+            }
+
+            #[must_use = "this returns the result of the operation, without modifying the original"]
+            #[inline]
+            fn wrapping_pow(self, mut exp: u32) -> Self {
+                let mut base = self;
+                let mut acc: Self = 1;
+
+                while exp > 1 {
+                    if (exp & 1) == 1 {
+                        acc = acc.wrapping_mul(base);
+                    }
+                    exp /= 2;
+                    base = base.wrapping_mul(base);
+                }
+
+                if exp == 1 {
+                    acc = acc.wrapping_mul(base);
+                }
+
+                acc
+            }
+
+            #[must_use = "this returns the result of the operation, without modifying the original"]
+            #[inline]
+            fn overflowing_pow(self, mut exp: u32) -> (Self, bool) {
+                let mut base = self;
+                let mut acc: Self = 1;
+                let mut overflown = false;
+                let mut r;
+
+                while exp > 1 {
+                    if (exp & 1) == 1 {
+                        r = acc.overflowing_mul(base);
+                        acc = r.0;
+                        overflown |= r.1;
+                    }
+                    exp /= 2;
+                    r = base.overflowing_mul(base);
+                    base = r.0;
+                    overflown |= r.1;
+                }
+
+                if exp == 1 {
+                    r = acc.overflowing_mul(base);
+                    acc = r.0;
+                    overflown |= r.1;
+                }
+
+                (acc, overflown)
+            }
+        }
+    )+};
+}
+
+impl_pow_for_signed![i8 i16 i32 i64 i128 isize];
+
+macro_rules! impl_pow_for_unsigned {
+    ($($type:ty)+) => {$(
+        impl Pow_v1_34 for $type {
+            #[must_use = "this returns the result of the operation, without modifying the original"]
+            #[inline]
+            fn checked_pow(self, mut exp: u32) -> Option<Self> {
+                let mut base = self;
+                let mut acc: Self = 1;
+
+                while exp > 1 {
+                    if (exp & 1) == 1 {
+                        acc = acc.checked_mul(base)?;
+                    }
+                    exp /= 2;
+                    base = base.checked_mul(base)?;
+                }
+
+                if exp == 1 {
+                    acc = acc.checked_mul(base)?;
+                }
+
+                Some(acc)
+            }
+
+            #[must_use = "this returns the result of the operation, without modifying the original"]
+            #[inline]
+            fn saturating_pow(self, exp: u32) -> Self {
+                match self.checked_pow(exp) {
+                    Some(x) => x,
+                    None => Self::max_value(),
+                }
+            }
+
+            #[must_use = "this returns the result of the operation, without modifying the original"]
+            #[inline]
+            fn wrapping_pow(self, mut exp: u32) -> Self {
+                let mut base = self;
+                let mut acc: Self = 1;
+
+                while exp > 1 {
+                    if (exp & 1) == 1 {
+                        acc = acc.wrapping_mul(base);
+                    }
+                    exp /= 2;
+                    base = base.wrapping_mul(base);
+                }
+
+                if exp == 1 {
+                    acc = acc.wrapping_mul(base);
+                }
+
+                acc
+            }
+
+            #[must_use = "this returns the result of the operation, without modifying the original"]
+            #[inline]
+            fn overflowing_pow(self, mut exp: u32) -> (Self, bool) {
+                let mut base = self;
+                let mut acc: Self = 1;
+                let mut overflown = false;
+                let mut r;
+
+                while exp > 1 {
+                    if (exp & 1) == 1 {
+                        r = acc.overflowing_mul(base);
+                        acc = r.0;
+                        overflown |= r.1;
+                    }
+                    exp /= 2;
+                    r = base.overflowing_mul(base);
+                    base = r.0;
+                    overflown |= r.1;
+                }
+
+                if exp == 1 {
+                    r = acc.overflowing_mul(base);
+                    acc = r.0;
+                    overflown |= r.1;
+                }
+
+                (acc, overflown)
+            }
+        }
+    )+};
+}
+
+impl_pow_for_unsigned![u8 u16 u32 u64 u128 usize];
diff --git a/src/v1_34/try_from.rs b/src/v1_34/try_from.rs
new file mode 100644
index 0000000..957ca55
--- /dev/null
+++ b/src/v1_34/try_from.rs
@@ -0,0 +1,300 @@
+use crate::{array::TryFromSliceError, convert::Infallible};
+use core::fmt;
+
+#[derive(Debug, Copy, Clone, PartialEq, Eq)]
+pub struct TryFromIntError(pub(crate) ());
+
+impl TryFromIntError {
+    #[doc(hidden)]
+    pub fn __description(&self) -> &str {
+        "out of range integral type conversion attempted"
+    }
+}
+
+impl fmt::Display for TryFromIntError {
+    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+        self.__description().fmt(fmt)
+    }
+}
+
+impl From<Infallible> for TryFromIntError {
+    fn from(x: Infallible) -> TryFromIntError {
+        match x {}
+    }
+}
+
+pub trait TryFrom<T>: Sized {
+    type Error;
+    fn try_from(value: T) -> Result<Self, Self::Error>;
+}
+
+pub trait TryInto<T>: Sized {
+    type Error;
+    fn try_into(self) -> Result<T, Self::Error>;
+}
+
+impl<T, U> TryInto<U> for T
+where
+    U: TryFrom<T>,
+{
+    type Error = U::Error;
+
+    fn try_into(self) -> Result<U, U::Error> {
+        U::try_from(self)
+    }
+}
+
+macro_rules! try_from_unbounded {
+    ($source:ty, $($target:ty),*) => {$(
+        impl TryFrom<$source> for $target {
+            type Error = TryFromIntError;
+
+            #[inline]
+            fn try_from(value: $source) -> Result<Self, Self::Error> {
+                Ok(value as $target)
+            }
+        }
+    )*}
+}
+
+macro_rules! try_from_lower_bounded {
+    ($source:ty, $($target:ty),*) => {$(
+        impl TryFrom<$source> for $target {
+            type Error = TryFromIntError;
+
+            #[inline]
+            fn try_from(u: $source) -> Result<$target, TryFromIntError> {
+                if u >= 0 {
+                    Ok(u as $target)
+                } else {
+                    Err(TryFromIntError(()))
+                }
+            }
+        }
+    )*}
+}
+
+macro_rules! try_from_upper_bounded {
+    ($source:ty, $($target:ty),*) => {$(
+        impl TryFrom<$source> for $target {
+            type Error = TryFromIntError;
+
+            #[inline]
+            fn try_from(u: $source) -> Result<$target, TryFromIntError> {
+                if u > (<$target>::max_value() as $source) {
+                    Err(TryFromIntError(()))
+                } else {
+                    Ok(u as $target)
+                }
+            }
+        }
+    )*}
+}
+
+macro_rules! try_from_both_bounded {
+    ($source:ty, $($target:ty),*) => {$(
+        impl TryFrom<$source> for $target {
+            type Error = TryFromIntError;
+
+            #[inline]
+            fn try_from(u: $source) -> Result<$target, TryFromIntError> {
+                let min = <$target>::min_value() as $source;
+                let max = <$target>::max_value() as $source;
+                if u < min || u > max {
+                    Err(TryFromIntError(()))
+                } else {
+                    Ok(u as $target)
+                }
+            }
+        }
+    )*}
+}
+
+macro_rules! rev {
+    ($mac:ident, $source:ty, $($target:ty),*) => {$(
+        $mac!($target, $source);
+    )*}
+}
+
+try_from_upper_bounded!(u16, u8);
+try_from_upper_bounded!(u32, u16, u8);
+try_from_upper_bounded!(u64, u32, u16, u8);
+try_from_upper_bounded!(u128, u64, u32, u16, u8);
+
+try_from_both_bounded!(i16, i8);
+try_from_both_bounded!(i32, i16, i8);
+try_from_both_bounded!(i64, i32, i16, i8);
+try_from_both_bounded!(i128, i64, i32, i16, i8);
+
+try_from_upper_bounded!(u8, i8);
+try_from_upper_bounded!(u16, i8, i16);
+try_from_upper_bounded!(u32, i8, i16, i32);
+try_from_upper_bounded!(u64, i8, i16, i32, i64);
+try_from_upper_bounded!(u128, i8, i16, i32, i64, i128);
+
+try_from_lower_bounded!(i8, u8, u16, u32, u64, u128);
+try_from_lower_bounded!(i16, u16, u32, u64, u128);
+try_from_lower_bounded!(i32, u32, u64, u128);
+try_from_lower_bounded!(i64, u64, u128);
+try_from_lower_bounded!(i128, u128);
+try_from_both_bounded!(i16, u8);
+try_from_both_bounded!(i32, u16, u8);
+try_from_both_bounded!(i64, u32, u16, u8);
+try_from_both_bounded!(i128, u64, u32, u16, u8);
+
+try_from_upper_bounded!(usize, isize);
+try_from_lower_bounded!(isize, usize);
+
+#[cfg(target_pointer_width = "16")]
+mod ptr_try_from_impls {
+    use super::{TryFrom, TryFromIntError};
+
+    try_from_upper_bounded!(usize, u8);
+    try_from_unbounded!(usize, u16, u32, u64, u128);
+    try_from_upper_bounded!(usize, i8, i16);
+    try_from_unbounded!(usize, i32, i64, i128);
+
+    try_from_both_bounded!(isize, u8);
+    try_from_lower_bounded!(isize, u16, u32, u64, u128);
+    try_from_both_bounded!(isize, i8);
+    try_from_unbounded!(isize, i16, i32, i64, i128);
+
+    rev!(try_from_upper_bounded, usize, u32, u64, u128);
+    rev!(try_from_lower_bounded, usize, i8, i16);
+    rev!(try_from_both_bounded, usize, i32, i64, i128);
+
+    rev!(try_from_upper_bounded, isize, u16, u32, u64, u128);
+    rev!(try_from_both_bounded, isize, i32, i64, i128);
+}
+
+#[cfg(target_pointer_width = "32")]
+mod ptr_try_from_impls {
+    use super::{TryFrom, TryFromIntError};
+
+    try_from_upper_bounded!(usize, u8, u16);
+    try_from_unbounded!(usize, u32, u64, u128);
+    try_from_upper_bounded!(usize, i8, i16, i32);
+    try_from_unbounded!(usize, i64, i128);
+
+    try_from_both_bounded!(isize, u8, u16);
+    try_from_lower_bounded!(isize, u32, u64, u128);
+    try_from_both_bounded!(isize, i8, i16);
+    try_from_unbounded!(isize, i32, i64, i128);
+
+    rev!(try_from_unbounded, usize, u32);
+    rev!(try_from_upper_bounded, usize, u64, u128);
+    rev!(try_from_lower_bounded, usize, i8, i16, i32);
+    rev!(try_from_both_bounded, usize, i64, i128);
+
+    rev!(try_from_unbounded, isize, u16);
+    rev!(try_from_upper_bounded, isize, u32, u64, u128);
+    rev!(try_from_unbounded, isize, i32);
+    rev!(try_from_both_bounded, isize, i64, i128);
+}
+
+#[cfg(target_pointer_width = "64")]
+mod ptr_try_from_impls {
+    use super::{TryFrom, TryFromIntError};
+
+    try_from_upper_bounded!(usize, u8, u16, u32);
+    try_from_unbounded!(usize, u64, u128);
+    try_from_upper_bounded!(usize, i8, i16, i32, i64);
+    try_from_unbounded!(usize, i128);
+
+    try_from_both_bounded!(isize, u8, u16, u32);
+    try_from_lower_bounded!(isize, u64, u128);
+    try_from_both_bounded!(isize, i8, i16, i32);
+    try_from_unbounded!(isize, i64, i128);
+
+    rev!(try_from_unbounded, usize, u32, u64);
+    rev!(try_from_upper_bounded, usize, u128);
+    rev!(try_from_lower_bounded, usize, i8, i16, i32, i64);
+    rev!(try_from_both_bounded, usize, i128);
+
+    rev!(try_from_unbounded, isize, u16, u32);
+    rev!(try_from_upper_bounded, isize, u64, u128);
+    rev!(try_from_unbounded, isize, i32, i64);
+    rev!(try_from_both_bounded, isize, i128);
+}
+
+macro_rules! impl_length_at_most_32 {
+    ($($n:expr),+) => {$(
+        impl<T> TryFrom<&[T]> for [T; $n]
+        where
+            T: Copy,
+        {
+            type Error = TryFromSliceError;
+
+            fn try_from(slice: &[T]) -> Result<[T; $n], TryFromSliceError> {
+                <&Self>::try_from(slice).map(|r| *r)
+            }
+        }
+
+        impl<'a, T> TryFrom<&'a [T]> for &'a [T; $n] {
+            type Error = TryFromSliceError;
+
+            fn try_from(slice: &[T]) -> Result<&[T; $n], TryFromSliceError> {
+                if slice.len() == $n {
+                    let ptr = slice.as_ptr() as *const [T; $n];
+                    unsafe { Ok(&*ptr) }
+                } else {
+                    Err(TryFromSliceError(()))
+                }
+            }
+        }
+
+        impl<'a, T> TryFrom<&'a mut [T]> for &'a mut [T; $n] {
+            type Error = TryFromSliceError;
+
+            fn try_from(slice: &mut [T]) -> Result<&mut [T; $n], TryFromSliceError> {
+                if slice.len() == $n {
+                    let ptr = slice.as_mut_ptr() as *mut [T; $n];
+                    unsafe { Ok(&mut *ptr) }
+                } else {
+                    Err(TryFromSliceError(()))
+                }
+            }
+        }
+    )+}
+}
+
+impl_length_at_most_32![
+    0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
+    26, 27, 28, 29, 30, 31, 32
+];
+
+// Although we aren't able to `impl<T, U: Into<T>> TryFrom<U> for T`, we are
+// able to implement it for any given type.
+macro_rules! impl_identity {
+    ($($type:ty),*) => {$(
+        impl TryFrom<$type> for $type {
+            type Error = Infallible;
+
+            fn try_from(value: $type) -> Result<Self, Self::Error> {
+                Ok(value)
+            }
+        }
+    )*}
+}
+
+// Implement for some primitives. Other types can be trivially added upon
+// request.
+impl_identity![
+    (),
+    bool,
+    char,
+    i8,
+    i16,
+    i32,
+    i64,
+    i128,
+    isize,
+    u8,
+    u16,
+    u32,
+    u64,
+    u128,
+    usize,
+    f32,
+    f64
+];
diff --git a/src/v1_35.rs b/src/v1_35.rs
new file mode 100644
index 0000000..d99a4db
--- /dev/null
+++ b/src/v1_35.rs
@@ -0,0 +1,67 @@
+use crate::traits::Sealed;
+use core::{
+    cell::RefCell,
+    hash::{Hash, Hasher},
+    mem,
+    ops::{Bound, RangeBounds},
+};
+
+pub trait RefCell_v1_35<T>: Sealed<RefCell<T>> {
+    fn replace_with<F: FnOnce(&mut T) -> T>(&self, f: F) -> T;
+}
+
+impl<T> RefCell_v1_35<T> for RefCell<T> {
+    #[inline]
+    fn replace_with<F: FnOnce(&mut T) -> T>(&self, f: F) -> T {
+        let mut_borrow = &mut *self.borrow_mut();
+        let replacement = f(mut_borrow);
+        mem::replace(mut_borrow, replacement)
+    }
+}
+
+pub trait Option_v1_35<'a, T: Copy + 'a>: Sealed<Option<&'a T>> {
+    fn copied(self) -> Option<T>;
+}
+
+impl<'a, T: Copy + 'a> Option_v1_35<'a, T> for Option<&'a T> {
+    fn copied(self) -> Option<T> {
+        self.map(|&t| t)
+    }
+}
+
+pub fn hash<T: ?Sized, S: Hasher>(hashee: *const T, into: &mut S) {
+    hashee.hash(into);
+}
+
+pub trait RangeBounds_v1_35<T>: RangeBounds<T> {
+    fn contains<U>(&self, item: &U) -> bool
+    where
+        T: PartialOrd<U>,
+        U: ?Sized + PartialOrd<T>;
+}
+
+impl<T: PartialOrd<T>, RB: RangeBounds<T>> RangeBounds_v1_35<T> for RB {
+    fn contains<U>(&self, item: &U) -> bool
+    where
+        T: PartialOrd<U>,
+        U: ?Sized + PartialOrd<T>,
+    {
+        contains(self, item)
+    }
+}
+
+fn contains<T, U>(range: &impl RangeBounds<T>, item: &U) -> bool
+where
+    T: ?Sized + PartialOrd<U>,
+    U: ?Sized + PartialOrd<T>,
+{
+    (match range.start_bound() {
+        Bound::Included(ref start) => *start <= item,
+        Bound::Excluded(ref start) => *start < item,
+        Bound::Unbounded => true,
+    }) && (match range.end_bound() {
+        Bound::Included(ref end) => item <= *end,
+        Bound::Excluded(ref end) => item < *end,
+        Bound::Unbounded => true,
+    })
+}
diff --git a/src/v1_36.rs b/src/v1_36.rs
new file mode 100644
index 0000000..0631da9
--- /dev/null
+++ b/src/v1_36.rs
@@ -0,0 +1,43 @@
+mod iterator_copied;
+mod maybe_uninit;
+mod poll;
+mod waker;
+
+use crate::traits::Sealed;
+use core::fmt;
+
+pub use self::{
+    iterator_copied::{Copied, Iterator_v1_36},
+    maybe_uninit::MaybeUninit,
+    poll::Poll,
+    waker::{Context, RawWaker, RawWakerVTable, Waker},
+};
+
+pub trait str_v1_36: Sealed<str> {
+    fn as_mut_ptr(&mut self) -> *mut u8;
+}
+
+impl str_v1_36 for str {
+    #[inline]
+    fn as_mut_ptr(&mut self) -> *mut u8 {
+        self as *mut str as *mut u8
+    }
+}
+
+#[derive(Debug, Copy, Clone)]
+pub struct TryFromSliceError(pub(crate) ());
+
+impl fmt::Display for TryFromSliceError {
+    #[inline]
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        fmt::Display::fmt(self.__description(), f)
+    }
+}
+
+impl TryFromSliceError {
+    #[inline]
+    #[doc(hidden)]
+    pub fn __description(&self) -> &str {
+        "could not convert slice to array"
+    }
+}
diff --git a/src/v1_36/iterator_copied.rs b/src/v1_36/iterator_copied.rs
new file mode 100644
index 0000000..d5fcc78
--- /dev/null
+++ b/src/v1_36/iterator_copied.rs
@@ -0,0 +1,93 @@
+#[cfg(__standback_before_1_35)]
+use crate::prelude::*;
+use core::iter::FusedIterator;
+
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[derive(Clone, Debug)]
+pub struct Copied<I> {
+    it: I,
+}
+
+impl<I> Copied<I> {
+    fn new(it: I) -> Copied<I> {
+        Copied { it }
+    }
+}
+
+fn copy_fold<T: Copy, Acc>(mut f: impl FnMut(Acc, T) -> Acc) -> impl FnMut(Acc, &T) -> Acc {
+    move |acc, &elt| f(acc, elt)
+}
+
+impl<'a, I, T: 'a> Iterator for Copied<I>
+where
+    I: Iterator<Item = &'a T>,
+    T: Copy,
+{
+    type Item = T;
+
+    fn next(&mut self) -> Option<T> {
+        self.it.next().copied()
+    }
+
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        self.it.size_hint()
+    }
+
+    fn fold<Acc, F>(self, init: Acc, f: F) -> Acc
+    where
+        F: FnMut(Acc, Self::Item) -> Acc,
+    {
+        self.it.fold(init, copy_fold(f))
+    }
+}
+
+impl<'a, I, T: 'a> DoubleEndedIterator for Copied<I>
+where
+    I: DoubleEndedIterator<Item = &'a T>,
+    T: Copy,
+{
+    fn next_back(&mut self) -> Option<T> {
+        self.it.next_back().copied()
+    }
+
+    fn rfold<Acc, F>(self, init: Acc, f: F) -> Acc
+    where
+        F: FnMut(Acc, Self::Item) -> Acc,
+    {
+        self.it.rfold(init, copy_fold(f))
+    }
+}
+
+impl<'a, I, T: 'a> ExactSizeIterator for Copied<I>
+where
+    I: ExactSizeIterator<Item = &'a T>,
+    T: Copy,
+{
+    fn len(&self) -> usize {
+        self.it.len()
+    }
+}
+
+impl<'a, I, T: 'a> FusedIterator for Copied<I>
+where
+    I: FusedIterator<Item = &'a T>,
+    T: Copy,
+{
+}
+
+pub trait Iterator_v1_36: Iterator {
+    fn copied<'a, T: 'a>(self) -> Copied<Self>
+    where
+        Self: Sized + Iterator<Item = &'a T>,
+        T: Copy;
+}
+
+impl<Iter: Iterator> Iterator_v1_36 for Iter {
+    fn copied<'a, T: 'a>(self) -> Copied<Self>
+    where
+        Self: Sized + Iterator<Item = &'a T>,
+        T: Copy,
+    {
+        Copied::new(self)
+    }
+}
diff --git a/src/v1_36/maybe_uninit.rs b/src/v1_36/maybe_uninit.rs
new file mode 100644
index 0000000..91f35ee
--- /dev/null
+++ b/src/v1_36/maybe_uninit.rs
@@ -0,0 +1,52 @@
+use core::mem::ManuallyDrop;
+
+#[derive(Copy)]
+pub union MaybeUninit<T: Copy> {
+    uninit: (),
+    value: ManuallyDrop<T>,
+}
+
+impl<T: Copy> Clone for MaybeUninit<T> {
+    #[inline(always)]
+    fn clone(&self) -> Self {
+        *self
+    }
+}
+
+impl<T: Copy> MaybeUninit<T> {
+    #[inline(always)]
+    pub fn new(val: T) -> MaybeUninit<T> {
+        MaybeUninit {
+            value: ManuallyDrop::new(val),
+        }
+    }
+
+    #[inline(always)]
+    pub fn uninit() -> MaybeUninit<T> {
+        MaybeUninit { uninit: () }
+    }
+
+    #[inline]
+    pub fn zeroed() -> MaybeUninit<T> {
+        let mut u = MaybeUninit::<T>::uninit();
+        unsafe {
+            u.as_mut_ptr().write_bytes(0u8, 1);
+        }
+        u
+    }
+
+    #[inline(always)]
+    pub fn as_ptr(&self) -> *const T {
+        unsafe { &*self.value as *const T }
+    }
+
+    #[inline(always)]
+    pub fn as_mut_ptr(&mut self) -> *mut T {
+        unsafe { &mut *self.value as *mut T }
+    }
+
+    #[inline(always)]
+    pub unsafe fn assume_init(self) -> T {
+        ManuallyDrop::into_inner(self.value)
+    }
+}
diff --git a/src/v1_36/poll.rs b/src/v1_36/poll.rs
new file mode 100644
index 0000000..d07c04b
--- /dev/null
+++ b/src/v1_36/poll.rs
@@ -0,0 +1,61 @@
+#[must_use = "this `Poll` may be a `Pending` variant, which should be handled"]
+#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
+pub enum Poll<T> {
+    Ready(T),
+    Pending,
+}
+
+impl<T> Poll<T> {
+    pub fn map<U, F>(self, f: F) -> Poll<U>
+    where
+        F: FnOnce(T) -> U,
+    {
+        match self {
+            Poll::Ready(t) => Poll::Ready(f(t)),
+            Poll::Pending => Poll::Pending,
+        }
+    }
+
+    #[inline]
+    pub fn is_ready(&self) -> bool {
+        match *self {
+            Poll::Ready(_) => true,
+            _ => false,
+        }
+    }
+
+    #[inline]
+    pub fn is_pending(&self) -> bool {
+        !self.is_ready()
+    }
+}
+
+impl<T, E> Poll<Result<T, E>> {
+    pub fn map_ok<U, F>(self, f: F) -> Poll<Result<U, E>>
+    where
+        F: FnOnce(T) -> U,
+    {
+        match self {
+            Poll::Ready(Ok(t)) => Poll::Ready(Ok(f(t))),
+            Poll::Ready(Err(e)) => Poll::Ready(Err(e)),
+            Poll::Pending => Poll::Pending,
+        }
+    }
+
+    pub fn map_err<U, F>(self, f: F) -> Poll<Result<T, U>>
+    where
+        F: FnOnce(E) -> U,
+    {
+        match self {
+            Poll::Ready(Ok(t)) => Poll::Ready(Ok(t)),
+            Poll::Ready(Err(e)) => Poll::Ready(Err(f(e))),
+            Poll::Pending => Poll::Pending,
+        }
+    }
+}
+
+impl<T> From<T> for Poll<T> {
+    fn from(t: T) -> Poll<T> {
+        Poll::Ready(t)
+    }
+}
diff --git a/src/v1_36/waker.rs b/src/v1_36/waker.rs
new file mode 100644
index 0000000..1fb8ba9
--- /dev/null
+++ b/src/v1_36/waker.rs
@@ -0,0 +1,128 @@
+use crate::marker::Unpin;
+use core::{fmt, marker::PhantomData, mem};
+
+#[derive(PartialEq, Debug)]
+pub struct RawWaker {
+    data: *const (),
+    vtable: &'static RawWakerVTable,
+}
+
+impl RawWaker {
+    pub const fn new(data: *const (), vtable: &'static RawWakerVTable) -> RawWaker {
+        RawWaker { data, vtable }
+    }
+}
+
+#[derive(PartialEq, Copy, Clone, Debug)]
+pub struct RawWakerVTable {
+    clone: unsafe fn(*const ()) -> RawWaker,
+    wake: unsafe fn(*const ()),
+    wake_by_ref: unsafe fn(*const ()),
+    drop: unsafe fn(*const ()),
+}
+
+impl RawWakerVTable {
+    pub fn new(
+        clone: unsafe fn(*const ()) -> RawWaker,
+        wake: unsafe fn(*const ()),
+        wake_by_ref: unsafe fn(*const ()),
+        drop: unsafe fn(*const ()),
+    ) -> Self {
+        Self {
+            clone,
+            wake,
+            wake_by_ref,
+            drop,
+        }
+    }
+}
+
+pub struct Context<'a> {
+    waker: &'a Waker,
+    _marker: PhantomData<fn(&'a ()) -> &'a ()>,
+}
+
+impl<'a> Context<'a> {
+    #[inline]
+    pub fn from_waker(waker: &'a Waker) -> Self {
+        Context {
+            waker,
+            _marker: PhantomData,
+        }
+    }
+
+    #[inline]
+    pub fn waker(&self) -> &'a Waker {
+        &self.waker
+    }
+}
+
+impl fmt::Debug for Context<'_> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        f.debug_struct("Context")
+            .field("waker", &self.waker)
+            .finish()
+    }
+}
+
+#[repr(transparent)]
+pub struct Waker {
+    waker: RawWaker,
+}
+
+impl Unpin for Waker {}
+unsafe impl Send for Waker {}
+unsafe impl Sync for Waker {}
+
+impl Waker {
+    #[inline]
+    pub fn wake(self) {
+        let wake = self.waker.vtable.wake;
+        let data = self.waker.data;
+
+        mem::forget(self);
+
+        unsafe { (wake)(data) };
+    }
+
+    #[inline]
+    pub fn wake_by_ref(&self) {
+        unsafe { (self.waker.vtable.wake_by_ref)(self.waker.data) }
+    }
+
+    #[inline]
+    pub fn will_wake(&self, other: &Waker) -> bool {
+        self.waker == other.waker
+    }
+
+    #[inline]
+    pub unsafe fn from_raw(waker: RawWaker) -> Waker {
+        Waker { waker }
+    }
+}
+
+impl Clone for Waker {
+    #[inline]
+    fn clone(&self) -> Self {
+        Waker {
+            waker: unsafe { (self.waker.vtable.clone)(self.waker.data) },
+        }
+    }
+}
+
+impl Drop for Waker {
+    #[inline]
+    fn drop(&mut self) {
+        unsafe { (self.waker.vtable.drop)(self.waker.data) }
+    }
+}
+
+impl fmt::Debug for Waker {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        let vtable_ptr = self.waker.vtable as *const RawWakerVTable;
+        f.debug_struct("Waker")
+            .field("data", &self.waker.data)
+            .field("vtable", &vtable_ptr)
+            .finish()
+    }
+}
diff --git a/src/v1_37.rs b/src/v1_37.rs
new file mode 100644
index 0000000..70d477b
--- /dev/null
+++ b/src/v1_37.rs
@@ -0,0 +1,104 @@
+use crate::traits::Sealed;
+use core::{
+    cell::Cell,
+    ops::{Bound, RangeBounds},
+    ptr,
+};
+
+pub trait Cell_v1_37<T>: Sealed<Cell<T>> {
+    fn from_mut(t: &mut T) -> &Cell<T>;
+}
+
+impl<T> Cell_v1_37<T> for Cell<T> {
+    #[inline]
+    fn from_mut(t: &mut T) -> &Cell<T> {
+        unsafe { &*(t as *mut T as *const Cell<T>) }
+    }
+}
+
+pub trait Cell_v1_37_<T>: Sealed<Cell<[T]>> {
+    fn as_slice_of_cells(&self) -> &[Cell<T>];
+}
+
+impl<T> Cell_v1_37_<T> for Cell<[T]> {
+    fn as_slice_of_cells(&self) -> &[Cell<T>] {
+        unsafe { &*(self as *const Cell<[T]> as *const [Cell<T>]) }
+    }
+}
+
+pub trait Option_v1_37<T>: Sealed<Option<T>> {
+    fn xor(self, optb: Option<T>) -> Option<T>;
+}
+
+impl<T> Option_v1_37<T> for Option<T> {
+    #[inline]
+    fn xor(self, optb: Option<T>) -> Option<T> {
+        match (self, optb) {
+            (Some(a), None) => Some(a),
+            (None, Some(b)) => Some(b),
+            _ => None,
+        }
+    }
+}
+
+pub trait Slice_v1_37<T>: Sealed<[T]> {
+    fn copy_within<R: RangeBounds<usize>>(&mut self, src: R, dest: usize)
+    where
+        T: Copy;
+}
+
+impl<T> Slice_v1_37<T> for [T] {
+    fn copy_within<R: RangeBounds<usize>>(&mut self, src: R, dest: usize)
+    where
+        T: Copy,
+    {
+        let src_start = match src.start_bound() {
+            Bound::Included(&n) => n,
+            Bound::Excluded(&n) => n
+                .checked_add(1)
+                .unwrap_or_else(|| slice_index_overflow_fail()),
+            Bound::Unbounded => 0,
+        };
+        let src_end = match src.end_bound() {
+            Bound::Included(&n) => n
+                .checked_add(1)
+                .unwrap_or_else(|| slice_index_overflow_fail()),
+            Bound::Excluded(&n) => n,
+            Bound::Unbounded => self.len(),
+        };
+        assert!(src_start <= src_end, "src end is before src start");
+        assert!(src_end <= self.len(), "src is out of bounds");
+        let count = src_end - src_start;
+        assert!(dest <= self.len() - count, "dest is out of bounds");
+        unsafe {
+            ptr::copy(
+                self.as_ptr().add(src_start),
+                self.as_mut_ptr().add(dest),
+                count,
+            );
+        }
+    }
+}
+
+#[inline(never)]
+#[cold]
+fn slice_index_overflow_fail() -> ! {
+    panic!("attempted to index slice up to maximum usize");
+}
+
+pub trait DoubleEndedIterator_v1_37: DoubleEndedIterator {
+    fn nth_back(&mut self, n: usize) -> Option<Self::Item>;
+}
+
+impl<Iter: DoubleEndedIterator> DoubleEndedIterator_v1_37 for Iter {
+    #[inline]
+    fn nth_back(&mut self, mut n: usize) -> Option<Self::Item> {
+        for x in self.rev() {
+            if n == 0 {
+                return Some(x);
+            }
+            n -= 1;
+        }
+        None
+    }
+}
diff --git a/src/v1_38.rs b/src/v1_38.rs
new file mode 100644
index 0000000..9c06f6a
--- /dev/null
+++ b/src/v1_38.rs
@@ -0,0 +1,320 @@
+use crate::traits::{Float, Integer, Sealed};
+use core::time::Duration;
+
+pub trait ConstPtr_v1_38<T>: Sealed<*const T> {
+    fn cast<U>(self) -> *const U;
+}
+
+impl<T> ConstPtr_v1_38<T> for *const T {
+    #[inline]
+    fn cast<U>(self) -> *const U {
+        self as _
+    }
+}
+
+pub trait MutPtr_v1_38<T>: Sealed<*mut T> {
+    fn cast<U>(self) -> *mut U;
+}
+
+impl<T> MutPtr_v1_38<T> for *mut T {
+    #[inline]
+    fn cast<U>(self) -> *mut U {
+        self as _
+    }
+}
+
+pub trait Duration_v1_38: Sealed<Duration> {
+    fn as_secs_f32(&self) -> f32;
+    fn as_secs_f64(&self) -> f64;
+    fn div_f32(&self, rhs: f32) -> Self;
+    fn div_f64(&self, rhs: f64) -> Self;
+    fn from_secs_f32(secs: f32) -> Self;
+    fn from_secs_f64(secs: f64) -> Self;
+    fn mul_f32(&self, rhs: f32) -> Self;
+    fn mul_f64(&self, rhs: f64) -> Self;
+}
+
+impl Duration_v1_38 for Duration {
+    #[inline]
+    fn as_secs_f32(&self) -> f32 {
+        (self.as_secs() as f32) + (self.subsec_nanos() as f32) / 1_000_000_000.
+    }
+
+    #[inline]
+    fn as_secs_f64(&self) -> f64 {
+        (self.as_secs() as f64) + (self.subsec_nanos() as f64) / 1_000_000_000.
+    }
+
+    #[inline]
+    fn div_f32(&self, rhs: f32) -> Self {
+        Self::from_secs_f32(self.as_secs_f32() / rhs)
+    }
+
+    #[inline]
+    fn div_f64(&self, rhs: f64) -> Self {
+        Self::from_secs_f64(self.as_secs_f64() / rhs)
+    }
+
+    #[inline]
+    fn from_secs_f32(secs: f32) -> Self {
+        const MAX_NANOS_F32: f32 = ((u64::max_value() as u128 + 1) * 1_000_000_000) as f32;
+        let nanos = secs * 1_000_000_000.;
+        if !nanos.is_finite() {
+            panic!("got non-finite value when converting float to duration");
+        }
+        if nanos >= MAX_NANOS_F32 {
+            panic!("overflow when converting float to duration");
+        }
+        if nanos < 0.0 {
+            panic!("underflow when converting float to duration");
+        }
+        let nanos = nanos as u128;
+        Self::new(
+            (nanos / 1_000_000_000) as u64,
+            (nanos % 1_000_000_000) as u32,
+        )
+    }
+
+    #[inline]
+    fn from_secs_f64(secs: f64) -> Self {
+        const MAX_NANOS_F64: f64 = ((u64::max_value() as u128 + 1) * 1_000_000_000) as f64;
+        let nanos = secs * 1_000_000_000.;
+        if !nanos.is_finite() {
+            panic!("got non-finite value when converting float to duration");
+        }
+        if nanos >= MAX_NANOS_F64 {
+            panic!("overflow when converting float to duration");
+        }
+        if nanos < 0.0 {
+            panic!("underflow when converting float to duration");
+        }
+        let nanos = nanos as u128;
+        Self::new(
+            (nanos / 1_000_000_000) as u64,
+            (nanos % 1_000_000_000) as u32,
+        )
+    }
+
+    #[inline]
+    fn mul_f32(&self, rhs: f32) -> Self {
+        Self::from_secs_f32(rhs * self.as_secs_f32())
+    }
+
+    #[inline]
+    fn mul_f64(&self, rhs: f64) -> Self {
+        Self::from_secs_f64(rhs * self.as_secs_f64())
+    }
+}
+
+pub trait Euclid_v1_38: Integer {
+    fn rem_euclid(self, rhs: Self) -> Self;
+    fn checked_rem_euclid(self, rhs: Self) -> Option<Self>;
+    fn wrapping_rem_euclid(self, rhs: Self) -> Self;
+    fn overflowing_rem_euclid(self, rhs: Self) -> (Self, bool);
+    fn div_euclid(self, rhs: Self) -> Self;
+    fn checked_div_euclid(self, rhs: Self) -> Option<Self>;
+    fn wrapping_div_euclid(self, rhs: Self) -> Self;
+    fn overflowing_div_euclid(self, rhs: Self) -> (Self, bool);
+}
+
+macro_rules! impl_euclid_for_signed {
+    ($($type:ty)+) => {$(
+        impl Euclid_v1_38 for $type {
+            #[must_use = "this returns the result of the operation, without modifying the original"]
+            #[inline]
+            fn rem_euclid(self, rhs: Self) -> Self {
+                let r = self % rhs;
+                if r < 0 {
+                    if rhs < 0 {
+                        r - rhs
+                    } else {
+                        r + rhs
+                    }
+                } else {
+                    r
+                }
+            }
+
+            #[must_use = "this returns the result of the operation, without modifying the original"]
+            #[inline]
+            fn checked_rem_euclid(self, rhs: Self) -> Option<Self> {
+                if rhs == 0 || (self == Self::min_value() && rhs == -1) {
+                    None
+                } else {
+                    Some(self.rem_euclid(rhs))
+                }
+            }
+
+            #[must_use = "this returns the result of the operation, without modifying the original"]
+            #[inline]
+            fn wrapping_rem_euclid(self, rhs: Self) -> Self {
+                self.overflowing_rem_euclid(rhs).0
+            }
+
+            #[must_use = "this returns the result of the operation, without modifying the original"]
+            #[inline]
+            fn overflowing_rem_euclid(self, rhs: Self) -> (Self, bool) {
+                if self == Self::min_value() && rhs == -1 {
+                    (0, true)
+                } else {
+                    (self.rem_euclid(rhs), false)
+                }
+            }
+
+            #[must_use = "this returns the result of the operation, without modifying the original"]
+            #[inline]
+            fn div_euclid(self, rhs: Self) -> Self {
+                let q = self / rhs;
+                if self % rhs < 0 {
+                    return if rhs > 0 { q - 1 } else { q + 1 };
+                }
+                q
+            }
+
+            #[must_use = "this returns the result of the operation, without modifying the original"]
+            #[inline]
+            fn checked_div_euclid(self, rhs: Self) -> Option<Self> {
+                if rhs == 0 || (self == Self::min_value() && rhs == -1) {
+                    None
+                } else {
+                    Some(self.div_euclid(rhs))
+                }
+            }
+
+            #[must_use = "this returns the result of the operation, without modifying the original"]
+            #[inline]
+            fn wrapping_div_euclid(self, rhs: Self) -> Self {
+                self.overflowing_div_euclid(rhs).0
+            }
+
+            #[must_use = "this returns the result of the operation, without modifying the original"]
+            #[inline]
+            fn overflowing_div_euclid(self, rhs: Self) -> (Self, bool) {
+                if self == Self::min_value() && rhs == -1 {
+                    (self, true)
+                } else {
+                    (self.div_euclid(rhs), false)
+                }
+            }
+        }
+    )+};
+}
+
+impl_euclid_for_signed![i8 i16 i32 i64 i128 isize];
+
+macro_rules! impl_euclid_for_unsigned {
+    ($($type:ty)+) => {$(
+        impl Euclid_v1_38 for $type {
+            #[must_use = "this returns the result of the operation, without modifying the original"]
+            #[inline]
+            fn rem_euclid(self, rhs: Self) -> Self {
+                self % rhs
+            }
+
+            #[must_use = "this returns the result of the operation, without modifying the original"]
+            #[inline]
+            fn checked_rem_euclid(self, rhs: Self) -> Option<Self> {
+                if rhs == 0 {
+                    None
+                } else {
+                    Some(self.rem_euclid(rhs))
+                }
+            }
+
+            #[must_use = "this returns the result of the operation, without modifying the original"]
+            #[inline]
+            fn wrapping_rem_euclid(self, rhs: Self) -> Self {
+                self % rhs
+            }
+
+            #[must_use = "this returns the result of the operation, without modifying the original"]
+            #[inline]
+            fn overflowing_rem_euclid(self, rhs: Self) -> (Self, bool) {
+                (self % rhs, false)
+            }
+
+            #[must_use = "this returns the result of the operation, without modifying the original"]
+            #[inline]
+            fn div_euclid(self, rhs: Self) -> Self {
+                self / rhs
+            }
+
+            #[must_use = "this returns the result of the operation, without modifying the original"]
+            #[inline]
+            fn checked_div_euclid(self, rhs: Self) -> Option<Self> {
+                if rhs == 0 {
+                    None
+                } else {
+                    Some(self.div_euclid(rhs))
+                }
+            }
+
+            #[must_use = "this returns the result of the operation, without modifying the original"]
+            #[inline]
+            fn wrapping_div_euclid(self, rhs: Self) -> Self {
+                self / rhs
+            }
+
+            #[must_use = "this returns the result of the operation, without modifying the original"]
+            #[inline]
+            fn overflowing_div_euclid(self, rhs: Self) -> (Self, bool) {
+                (self / rhs, false)
+            }
+        }
+    )+};
+}
+
+impl_euclid_for_unsigned![u8 u16 u32 u64 u128 usize];
+
+pub trait EuclidFloat_v1_38: Float {
+    fn rem_euclid(self, rhs: Self) -> Self;
+    fn div_euclid(self, rhs: Self) -> Self;
+}
+
+#[cfg(feature = "std")]
+impl EuclidFloat_v1_38 for f32 {
+    #[must_use = "method returns a new number and does not mutate the original value"]
+    #[inline]
+    fn rem_euclid(self, rhs: f32) -> f32 {
+        let r = self % rhs;
+        if r < 0.0 {
+            r + rhs.abs()
+        } else {
+            r
+        }
+    }
+
+    #[must_use = "method returns a new number and does not mutate the original value"]
+    #[inline]
+    fn div_euclid(self, rhs: f32) -> f32 {
+        let q = (self / rhs).trunc();
+        if self % rhs < 0.0 {
+            return if rhs > 0.0 { q - 1.0 } else { q + 1.0 };
+        }
+        q
+    }
+}
+
+#[cfg(feature = "std")]
+impl EuclidFloat_v1_38 for f64 {
+    #[must_use = "method returns a new number and does not mutate the original value"]
+    #[inline]
+    fn rem_euclid(self, rhs: f64) -> f64 {
+        let r = self % rhs;
+        if r < 0.0 {
+            r + rhs.abs()
+        } else {
+            r
+        }
+    }
+
+    #[must_use = "method returns a new number and does not mutate the original value"]
+    #[inline]
+    fn div_euclid(self, rhs: f64) -> f64 {
+        let q = (self / rhs).trunc();
+        if self % rhs < 0.0 {
+            return if rhs > 0.0 { q - 1.0 } else { q + 1.0 };
+        }
+        q
+    }
+}
diff --git a/src/v1_40.rs b/src/v1_40.rs
new file mode 100644
index 0000000..c7767d7
--- /dev/null
+++ b/src/v1_40.rs
@@ -0,0 +1,183 @@
+use crate::traits::Sealed;
+#[cfg(__standback_before_1_32)]
+use crate::v1_32::{u32_v1_32, u64_v1_32};
+use core::ops::DerefMut;
+
+#[cfg(feature = "std")]
+use core::ptr;
+
+pub trait Option_v1_40<T: DerefMut>: Sealed<Option<T>> {
+    fn as_deref_mut(&mut self) -> Option<&mut T::Target>;
+    fn as_deref(&self) -> Option<&T::Target>;
+}
+
+impl<T: DerefMut> Option_v1_40<T> for Option<T> {
+    fn as_deref_mut(&mut self) -> Option<&mut T::Target> {
+        self.as_mut().map(|t| t.deref_mut())
+    }
+
+    fn as_deref(&self) -> Option<&T::Target> {
+        self.as_ref().map(|t| t.deref())
+    }
+}
+
+pub trait Option_v1_40_<T>: Sealed<Option<Option<T>>> {
+    fn flatten(self) -> Option<T>;
+}
+
+impl<T> Option_v1_40_<T> for Option<Option<T>> {
+    fn flatten(self) -> Option<T> {
+        self.and_then(crate::convert::identity)
+    }
+}
+
+pub trait f32_v1_40: Sealed<f32> {
+    fn to_be_bytes(self) -> [u8; 4];
+    fn to_le_bytes(self) -> [u8; 4];
+    fn to_ne_bytes(self) -> [u8; 4];
+    fn from_be_bytes(bytes: [u8; 4]) -> Self;
+    fn from_le_bytes(bytes: [u8; 4]) -> Self;
+    fn from_ne_bytes(bytes: [u8; 4]) -> Self;
+}
+
+impl f32_v1_40 for f32 {
+    #[inline]
+    fn to_be_bytes(self) -> [u8; 4] {
+        self.to_bits().to_be_bytes()
+    }
+
+    #[inline]
+    fn to_le_bytes(self) -> [u8; 4] {
+        self.to_bits().to_le_bytes()
+    }
+
+    #[inline]
+    fn to_ne_bytes(self) -> [u8; 4] {
+        self.to_bits().to_ne_bytes()
+    }
+
+    #[inline]
+    fn from_be_bytes(bytes: [u8; 4]) -> Self {
+        Self::from_bits(u32::from_be_bytes(bytes))
+    }
+
+    #[inline]
+    fn from_le_bytes(bytes: [u8; 4]) -> Self {
+        Self::from_bits(u32::from_le_bytes(bytes))
+    }
+
+    #[inline]
+    fn from_ne_bytes(bytes: [u8; 4]) -> Self {
+        Self::from_bits(u32::from_ne_bytes(bytes))
+    }
+}
+
+pub trait f64_v1_40: Sealed<f64> {
+    fn to_be_bytes(self) -> [u8; 8];
+    fn to_le_bytes(self) -> [u8; 8];
+    fn to_ne_bytes(self) -> [u8; 8];
+    fn from_be_bytes(bytes: [u8; 8]) -> Self;
+    fn from_le_bytes(bytes: [u8; 8]) -> Self;
+    fn from_ne_bytes(bytes: [u8; 8]) -> Self;
+}
+
+impl f64_v1_40 for f64 {
+    #[inline]
+    fn to_be_bytes(self) -> [u8; 8] {
+        self.to_bits().to_be_bytes()
+    }
+
+    #[inline]
+    fn to_le_bytes(self) -> [u8; 8] {
+        self.to_bits().to_le_bytes()
+    }
+
+    #[inline]
+    fn to_ne_bytes(self) -> [u8; 8] {
+        self.to_bits().to_ne_bytes()
+    }
+
+    #[inline]
+    fn from_be_bytes(bytes: [u8; 8]) -> Self {
+        Self::from_bits(u64::from_be_bytes(bytes))
+    }
+
+    #[inline]
+    fn from_le_bytes(bytes: [u8; 8]) -> Self {
+        Self::from_bits(u64::from_le_bytes(bytes))
+    }
+
+    #[inline]
+    fn from_ne_bytes(bytes: [u8; 8]) -> Self {
+        Self::from_bits(u64::from_ne_bytes(bytes))
+    }
+}
+
+pub fn take<T: Default>(dest: &mut T) -> T {
+    core::mem::replace(dest, T::default())
+}
+
+#[cfg(feature = "std")]
+pub trait slice_v1_40<T>: Sealed<[T]> {
+    fn repeat(&self, n: usize) -> Vec<T>
+    where
+        T: Copy;
+}
+
+#[cfg(feature = "std")]
+impl<T: Copy> slice_v1_40<T> for [T] {
+    fn repeat(&self, n: usize) -> Vec<T> {
+        if n == 0 {
+            return Vec::new();
+        }
+
+        // If `n` is larger than zero, it can be split as
+        // `n = 2^expn + rem (2^expn > rem, expn >= 0, rem >= 0)`.
+        // `2^expn` is the number represented by the leftmost '1' bit of `n`,
+        // and `rem` is the remaining part of `n`.
+
+        // Using `Vec` to access `set_len()`.
+        let mut buf = Vec::with_capacity(self.len().checked_mul(n).expect("capacity overflow"));
+
+        // `2^expn` repetition is done by doubling `buf` `expn`-times.
+        buf.extend(self);
+        {
+            let mut m = n >> 1;
+            // If `m > 0`, there are remaining bits up to the leftmost '1'.
+            while m > 0 {
+                // `buf.extend(buf)`:
+                unsafe {
+                    ptr::copy_nonoverlapping(
+                        buf.as_ptr(),
+                        (buf.as_mut_ptr() as *mut T).add(buf.len()),
+                        buf.len(),
+                    );
+                    // `buf` has capacity of `self.len() * n`.
+                    let buf_len = buf.len();
+                    buf.set_len(buf_len * 2);
+                }
+
+                m >>= 1;
+            }
+        }
+
+        // `rem` (`= n - 2^expn`) repetition is done by copying
+        // first `rem` repetitions from `buf` itself.
+        let rem_len = self.len() * n - buf.len(); // `self.len() * rem`
+        if rem_len > 0 {
+            // `buf.extend(buf[0 .. rem_len])`:
+            unsafe {
+                // This is non-overlapping since `2^expn > rem`.
+                ptr::copy_nonoverlapping(
+                    buf.as_ptr(),
+                    (buf.as_mut_ptr() as *mut T).add(buf.len()),
+                    rem_len,
+                );
+                // `buf.len() + rem_len` equals to `buf.capacity()` (`= self.len() * n`).
+                let buf_cap = buf.capacity();
+                buf.set_len(buf_cap);
+            }
+        }
+        buf
+    }
+}
diff --git a/src/v1_41.rs b/src/v1_41.rs
new file mode 100644
index 0000000..1fa883c
--- /dev/null
+++ b/src/v1_41.rs
@@ -0,0 +1,24 @@
+use crate::traits::Sealed;
+
+pub trait Result_v1_41<T, E>: Sealed<Result<T, E>> {
+    fn map_or<U, F: FnOnce(T) -> U>(self, default: U, f: F) -> U;
+    fn map_or_else<U, D: FnOnce(E) -> U, F: FnOnce(T) -> U>(self, default: D, f: F) -> U;
+}
+
+impl<T, E> Result_v1_41<T, E> for Result<T, E> {
+    #[inline]
+    fn map_or<U, F: FnOnce(T) -> U>(self, default: U, f: F) -> U {
+        match self {
+            Ok(t) => f(t),
+            Err(_) => default,
+        }
+    }
+
+    #[inline]
+    fn map_or_else<U, D: FnOnce(E) -> U, F: FnOnce(T) -> U>(self, default: D, f: F) -> U {
+        match self {
+            Ok(t) => f(t),
+            Err(e) => default(e),
+        }
+    }
+}
diff --git a/src/v1_42.rs b/src/v1_42.rs
new file mode 100644
index 0000000..bceba2f
--- /dev/null
+++ b/src/v1_42.rs
@@ -0,0 +1,104 @@
+use crate::traits::Sealed;
+use core::{mem::ManuallyDrop, ptr};
+#[cfg(feature = "std")]
+use std::{
+    sync::{Condvar, LockResult, MutexGuard, WaitTimeoutResult},
+    time::{Duration, Instant},
+};
+
+#[cfg(feature = "std")]
+#[inline(always)]
+fn new_wait_timeout_result(value: bool) -> WaitTimeoutResult {
+    // Safety: WaitTimeoutResult is a thin wrapper around a boolean. As the
+    // structure is not public, we must transmute the provided boolean to
+    // construct the desired value. If the internal size changes in the future,
+    // this will stop compiling.
+    unsafe { core::mem::transmute(value) }
+}
+
+#[cfg(feature = "std")]
+pub trait Condvar_v1_42: Sealed<Condvar> {
+    fn wait_while<'a, T, F>(
+        &self,
+        guard: MutexGuard<'a, T>,
+        condition: F,
+    ) -> LockResult<MutexGuard<'a, T>>
+    where
+        F: FnMut(&mut T) -> bool;
+    fn wait_timeout_while<'a, T, F>(
+        &self,
+        guard: MutexGuard<'a, T>,
+        dur: Duration,
+        condition: F,
+    ) -> LockResult<(MutexGuard<'a, T>, WaitTimeoutResult)>
+    where
+        F: FnMut(&mut T) -> bool;
+}
+
+#[cfg(feature = "std")]
+impl Condvar_v1_42 for Condvar {
+    fn wait_while<'a, T, F>(
+        &self,
+        mut guard: MutexGuard<'a, T>,
+        mut condition: F,
+    ) -> LockResult<MutexGuard<'a, T>>
+    where
+        F: FnMut(&mut T) -> bool,
+    {
+        while condition(&mut *guard) {
+            guard = self.wait(guard)?;
+        }
+        Ok(guard)
+    }
+
+    fn wait_timeout_while<'a, T, F>(
+        &self,
+        mut guard: MutexGuard<'a, T>,
+        dur: Duration,
+        mut condition: F,
+    ) -> LockResult<(MutexGuard<'a, T>, WaitTimeoutResult)>
+    where
+        F: FnMut(&mut T) -> bool,
+    {
+        let start = Instant::now();
+        loop {
+            if !condition(&mut *guard) {
+                return Ok((guard, new_wait_timeout_result(false)));
+            }
+            let timeout = match dur.checked_sub(start.elapsed()) {
+                Some(timeout) => timeout,
+                None => return Ok((guard, new_wait_timeout_result(true))),
+            };
+            guard = self.wait_timeout(guard, timeout)?.0;
+        }
+    }
+}
+
+pub trait ManuallyDrop_v1_42<T>: Sealed<ManuallyDrop<T>> {
+    unsafe fn take(slot: &mut ManuallyDrop<T>) -> T;
+}
+
+impl<T> ManuallyDrop_v1_42<T> for ManuallyDrop<T> {
+    #[must_use = "if you don't need the value, you can use `ManuallyDrop::drop` instead"]
+    #[inline]
+    unsafe fn take(slot: &mut ManuallyDrop<T>) -> T {
+        ptr::read(slot as *mut _ as *const _)
+    }
+}
+
+#[macro_export]
+macro_rules! matches {
+    ($expression:expr, $( $pattern:pat )|+) => {
+        match $expression {
+            $( $pattern )|+ => true,
+            _ => false,
+        }
+    };
+
+    ($expression:expr, $( $pattern:pat )|+ if $guard:expr) => {
+        match $expression {
+            $( $pattern )|+ if $guard => true,
+            _ => false
+        }
+    }
+}
diff --git a/src/v1_43.rs b/src/v1_43.rs
new file mode 100644
index 0000000..a13b384
--- /dev/null
+++ b/src/v1_43.rs
@@ -0,0 +1,123 @@
+use crate::traits::{Float, Integer};
+use core::iter::FusedIterator;
+
+pub mod f32 {
+    pub const LOG10_2: f32 = 0.301029995663981195213738894724493027_f32;
+    pub const LOG2_10: f32 = 3.32192809488736234787031942948939018_f32;
+}
+
+pub mod f64 {
+    pub const LOG10_2: f64 = 0.301029995663981195213738894724493027_f64;
+    pub const LOG2_10: f64 = 3.32192809488736234787031942948939018_f64;
+}
+
+#[inline]
+pub fn once_with<A, F: FnOnce() -> A>(gen: F) -> OnceWith<F> {
+    OnceWith { gen: Some(gen) }
+}
+
+#[derive(Copy, Clone, Debug)]
+pub struct OnceWith<F> {
+    gen: Option<F>,
+}
+
+impl<A, F: FnOnce() -> A> Iterator for OnceWith<F> {
+    type Item = A;
+
+    #[inline]
+    fn next(&mut self) -> Option<A> {
+        let f = self.gen.take()?;
+        Some(f())
+    }
+
+    #[inline]
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        self.gen.iter().size_hint()
+    }
+}
+
+impl<A, F: FnOnce() -> A> DoubleEndedIterator for OnceWith<F> {
+    fn next_back(&mut self) -> Option<A> {
+        self.next()
+    }
+}
+
+impl<A, F: FnOnce() -> A> ExactSizeIterator for OnceWith<F> {
+    fn len(&self) -> usize {
+        self.gen.iter().len()
+    }
+}
+
+impl<A, F: FnOnce() -> A> FusedIterator for OnceWith<F> {}
+
+pub trait float_v1_43: Float {
+    const RADIX: u32;
+    const MANTISSA_DIGITS: u32;
+    const DIGITS: u32;
+    const EPSILON: Self;
+    const MIN: Self;
+    const MIN_POSITIVE: Self;
+    const MAX: Self;
+    const MIN_EXP: i32;
+    const MAX_EXP: i32;
+    const MIN_10_EXP: i32;
+    const MAX_10_EXP: i32;
+    const NAN: Self;
+    const INFINITY: Self;
+    const NEG_INFINITY: Self;
+}
+
+impl float_v1_43 for f32 {
+    const DIGITS: u32 = 6;
+    const EPSILON: f32 = 1.19209290e-07_f32;
+    const INFINITY: f32 = 1.0_f32 / 0.0_f32;
+    const MANTISSA_DIGITS: u32 = 24;
+    const MAX: f32 = 3.40282347e+38_f32;
+    const MAX_10_EXP: i32 = 38;
+    const MAX_EXP: i32 = 128;
+    const MIN: f32 = -3.40282347e+38_f32;
+    const MIN_10_EXP: i32 = -37;
+    const MIN_EXP: i32 = -125;
+    const MIN_POSITIVE: f32 = 1.17549435e-38_f32;
+    const NAN: f32 = 0.0_f32 / 0.0_f32;
+    const NEG_INFINITY: f32 = -1.0_f32 / 0.0_f32;
+    const RADIX: u32 = 2;
+}
+
+impl float_v1_43 for f64 {
+    const DIGITS: u32 = 15;
+    const EPSILON: f64 = 2.2204460492503131e-16_f64;
+    const INFINITY: f64 = 1.0_f64 / 0.0_f64;
+    const MANTISSA_DIGITS: u32 = 53;
+    const MAX: f64 = 1.7976931348623157e+308_f64;
+    const MAX_10_EXP: i32 = 308;
+    const MAX_EXP: i32 = 1024;
+    const MIN: f64 = -1.7976931348623157e+308_f64;
+    const MIN_10_EXP: i32 = -307;
+    const MIN_EXP: i32 = -1021;
+    const MIN_POSITIVE: f64 = 2.2250738585072014e-308_f64;
+    const NAN: f64 = 0.0_f64 / 0.0_f64;
+    const NEG_INFINITY: f64 = -1.0_f64 / 0.0_f64;
+    const RADIX: u32 = 2;
+}
+
+pub trait int_v1_43: Integer {
+    const MIN: Self;
+    const MAX: Self;
+}
+
+macro_rules! impl_int_v1_43 {
+    ($($signed_type:ty, $unsigned_type:ty),*) => {$(
+        impl int_v1_43 for $signed_type {
+            const MIN: Self = !0 ^ ((!0 as $unsigned_type) >> 1) as Self;
+            const MAX: Self = !Self::MIN;
+        }
+
+        impl int_v1_43 for $unsigned_type {
+            const MIN: Self = 0;
+            const MAX: Self = !0;
+        }
+    )*}
+}
+
+impl_int_v1_43![i8, u8, i16, u16, i32, u32, i64, u64, i128, u128, isize, usize];
diff --git a/src/v1_44.rs b/src/v1_44.rs
new file mode 100644
index 0000000..27d31a2
--- /dev/null
+++ b/src/v1_44.rs
@@ -0,0 +1,159 @@
+use crate::traits::{Float, Sealed};
+use core::{
+    alloc::{Layout, LayoutErr},
+    cmp,
+    mem::{self, transmute},
+};
+#[cfg(feature = "std")]
+use std::ffi::OsString;
+#[cfg(feature = "std")]
+use std::path::PathBuf;
+
+#[cfg(feature = "std")]
+pub trait PathBuf_v1_44: Sealed<PathBuf> {
+    fn with_capacity(capacity: usize) -> PathBuf;
+    fn capacity(&self) -> usize;
+    fn clear(&mut self);
+    fn reserve(&mut self, additional: usize);
+    fn reserve_exact(&mut self, additional: usize);
+    fn shrink_to_fit(&mut self);
+}
+
+#[cfg(feature = "std")]
+impl PathBuf_v1_44 for PathBuf {
+    fn with_capacity(capacity: usize) -> PathBuf {
+        OsString::with_capacity(capacity).into()
+    }
+
+    fn capacity(&self) -> usize {
+        unsafe { transmute::<_, &OsString>(self) }.capacity()
+    }
+
+    fn clear(&mut self) {
+        unsafe { transmute::<_, &mut OsString>(self) }.clear()
+    }
+
+    fn reserve(&mut self, additional: usize) {
+        unsafe { transmute::<_, &mut OsString>(self) }.reserve(additional)
+    }
+
+    fn reserve_exact(&mut self, additional: usize) {
+        unsafe { transmute::<_, &mut OsString>(self) }.reserve_exact(additional)
+    }
+
+    fn shrink_to_fit(&mut self) {
+        unsafe { transmute::<_, &mut OsString>(self) }.shrink_to_fit()
+    }
+}
+
+pub trait Layout_v1_44: Sealed<Layout> {
+    fn align_to(&self, align: usize) -> Result<Layout, LayoutErr>;
+    fn pad_to_align(&self) -> Layout;
+    fn array<T>(n: usize) -> Result<Layout, LayoutErr>;
+    fn extend(&self, next: Layout) -> Result<(Layout, usize), LayoutErr>;
+}
+
+impl Layout_v1_44 for Layout {
+    #[inline]
+    fn align_to(&self, align: usize) -> Result<Self, LayoutErr> {
+        Layout::from_size_align(self.size(), cmp::max(self.align(), align))
+    }
+
+    #[inline]
+    fn pad_to_align(&self) -> Layout {
+        let pad = padding_needed_for(self, self.align());
+        let new_size = self.size() + pad;
+        Layout::from_size_align(new_size, self.align()).unwrap()
+    }
+
+    #[inline]
+    fn array<T>(n: usize) -> Result<Self, LayoutErr> {
+        repeat(&Layout::new::<T>(), n).map(|(k, offs)| {
+            debug_assert!(offs == mem::size_of::<T>());
+            k
+        })
+    }
+
+    #[inline]
+    fn extend(&self, next: Self) -> Result<(Self, usize), LayoutErr> {
+        let new_align = cmp::max(self.align(), next.align());
+        let pad = padding_needed_for(self, next.align());
+
+        let offset = self.size().checked_add(pad).ok_or(layout_err())?;
+        let new_size = offset.checked_add(next.size()).ok_or(layout_err())?;
+
+        let layout = Layout::from_size_align(new_size, new_align)?;
+        Ok((layout, offset))
+    }
+}
+
+fn padding_needed_for(zelf: &Layout, align: usize) -> usize {
+    let len = zelf.size();
+    let len_rounded_up = len.wrapping_add(align).wrapping_sub(1) & !align.wrapping_sub(1);
+    len_rounded_up.wrapping_sub(len)
+}
+
+#[inline]
+fn repeat(zelf: &Layout, n: usize) -> Result<(Layout, usize), LayoutErr> {
+    let padded_size = zelf.size() + padding_needed_for(zelf, zelf.align());
+    let alloc_size = padded_size.checked_mul(n).ok_or(layout_err())?;
+
+    unsafe {
+        Ok((
+            Layout::from_size_align_unchecked(alloc_size, zelf.align()),
+            padded_size,
+        ))
+    }
+}
+
+#[inline(always)]
+fn layout_err() -> LayoutErr {
+    // We can safely transmute this, as zero-sized types have no actual memory
+    // representation. If `LayoutErr` ever has the addition of a field, this
+    // will stop compiling (rather than creating undefined behavior).
+    unsafe { transmute(()) }
+}
+
+mod sealed {
+    pub trait FloatToInt<Int> {
+        unsafe fn to_int_unchecked(self) -> Int;
+    }
+
+    macro_rules! impl_float_to_int {
+        ($float:ident => $($int:ident)+) => {$(
+            impl FloatToInt<$int> for $float {
+                #[inline]
+                unsafe fn to_int_unchecked(self) -> $int {
+                    self as $int
+                }
+            }
+        )+}
+    }
+
+    impl_float_to_int!(f32 => u8 u16 u32 u64 u128 usize i8 i16 i32 i64 i128 isize);
+    impl_float_to_int!(f64 => u8 u16 u32 u64 u128 usize i8 i16 i32 i64 i128 isize);
+}
+
+pub trait float_v1_44: Float {
+    unsafe fn to_int_unchecked<Int>(self) -> Int
+    where
+        Self: sealed::FloatToInt<Int>;
+}
+
+impl float_v1_44 for f32 {
+    unsafe fn to_int_unchecked<Int>(self) -> Int
+    where
+        f32: sealed::FloatToInt<Int>,
+    {
+        sealed::FloatToInt::to_int_unchecked(self)
+    }
+}
+
+impl float_v1_44 for f64 {
+    unsafe fn to_int_unchecked<Int>(self) -> Int
+    where
+        f64: sealed::FloatToInt<Int>,
+    {
+        sealed::FloatToInt::to_int_unchecked(self)
+    }
+}
diff --git a/src/v1_45.rs b/src/v1_45.rs
new file mode 100644
index 0000000..b18fb8d
--- /dev/null
+++ b/src/v1_45.rs
@@ -0,0 +1,32 @@
+use crate::traits::SignedInteger;
+#[cfg(__standback_before_1_43)]
+use crate::v1_43::int_v1_43;
+
+pub trait int_v1_45: SignedInteger {
+    fn saturating_neg(self) -> Self;
+    fn saturating_abs(self) -> Self;
+}
+
+macro_rules! impl_int_v1_45 {
+    ($($type:ty),*) => {$(
+        impl int_v1_45 for $type {
+            fn saturating_neg(self) -> Self {
+                if self == Self::MIN {
+                    Self::MAX
+                } else {
+                    -self
+                }
+            }
+
+            fn saturating_abs(self) -> Self {
+                if self.is_negative() {
+                    self.saturating_neg()
+                } else {
+                    self
+                }
+            }
+        }
+    )*};
+}
+
+impl_int_v1_45![i8, i16, i32, i64, i128, isize];
diff --git a/src/v1_46.rs b/src/v1_46.rs
new file mode 100644
index 0000000..17b727a
--- /dev/null
+++ b/src/v1_46.rs
@@ -0,0 +1,49 @@
+use crate::traits::{Integer, Sealed};
+
+pub trait int_v1_46: Integer {
+    fn leading_ones(self) -> u32;
+    fn trailing_ones(self) -> u32;
+}
+
+macro_rules! impl_int_v1_46 {
+    ($($signed_type:ty, $unsigned_type:ty),*) => {$(
+        impl int_v1_46 for $signed_type {
+            #[inline]
+            fn leading_ones(self) -> u32 {
+                (self as $unsigned_type).leading_ones()
+            }
+
+            #[inline]
+            fn trailing_ones(self) -> u32 {
+                (self as $unsigned_type).trailing_ones()
+            }
+        }
+
+        impl int_v1_46 for $unsigned_type {
+            #[inline]
+            fn leading_ones(self) -> u32 {
+                (!self).leading_zeros()
+            }
+
+            #[inline]
+            fn trailing_ones(self) -> u32 {
+                (!self).trailing_zeros()
+            }
+        }
+    )*};
+}
+
+impl_int_v1_46![i8, u8, i16, u16, i32, u32, i64, u64, i128, u128, isize, usize];
+
+pub trait Option_v1_46<T>: Sealed<Option<T>> {
+    fn zip<U>(self, other: Option<U>) -> Option<(T, U)>;
+}
+
+impl<T> Option_v1_46<T> for Option<T> {
+    fn zip<U>(self, other: Option<U>) -> Option<(T, U)> {
+        match (self, other) {
+            (Some(a), Some(b)) => Some((a, b)),
+            _ => None,
+        }
+    }
+}
diff --git a/src/v1_47.rs b/src/v1_47.rs
new file mode 100644
index 0000000..d628abd
--- /dev/null
+++ b/src/v1_47.rs
@@ -0,0 +1,53 @@
+use crate::traits::Sealed;
+use core::ops::{DerefMut, Range};
+
+pub trait Range_v1_47<Idx>: Sealed<Range<Idx>> {
+    fn is_empty(&self) -> bool;
+}
+
+impl<Idx: PartialOrd<Idx>> Range_v1_47<Idx> for Range<Idx> {
+    fn is_empty(&self) -> bool {
+        !(self.start < self.end)
+    }
+}
+
+pub trait Result_v1_47<T: DerefMut, E>: Sealed<Result<T, E>> {
+    fn as_deref(&self) -> Result<&T::Target, &E>;
+    fn as_deref_mut(&mut self) -> Result<&mut T::Target, &mut E>;
+}
+
+impl<T: DerefMut, E> Result_v1_47<T, E> for Result<T, E> {
+    fn as_deref(&self) -> Result<&T::Target, &E> {
+        self.as_ref().map(|t| t.deref())
+    }
+
+    fn as_deref_mut(&mut self) -> Result<&mut T::Target, &mut E> {
+        self.as_mut().map(|t| t.deref_mut())
+    }
+}
+
+#[cfg(feature = "std")]
+pub trait Vec_v1_47<T>: Sealed<Vec<T>> {
+    fn leak<'a>(self) -> &'a mut [T]
+    where
+        T: 'a;
+}
+
+#[cfg(feature = "std")]
+impl<T> Vec_v1_47<T> for Vec<T> {
+    #[inline]
+    fn leak<'a>(self) -> &'a mut [T]
+    where
+        T: 'a,
+    {
+        Box::leak(self.into_boxed_slice())
+    }
+}
+
+pub mod f32 {
+    pub const TAU: f32 = 6.28318530717958647692528676655900577_f32;
+}
+
+pub mod f64 {
+    pub const TAU: f64 = 6.28318530717958647692528676655900577_f64;
+}