Importing rustc-1.74.1

Bug: 310977762
Test: ./build.py --lto=thin
Change-Id: I4d0e591d75383a34a0f05a5695fe102cbe4e6f79
diff --git a/Cargo.lock b/Cargo.lock
index 266f34e..476f557 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -4,21 +4,12 @@
 
 [[package]]
 name = "addr2line"
-version = "0.20.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f4fa78e18c64fce05e902adecd7a5eed15a5e0a3439f7b0e169f0252214865e3"
-dependencies = [
- "gimli 0.27.3",
-]
-
-[[package]]
-name = "addr2line"
 version = "0.21.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb"
 dependencies = [
  "compiler_builtins",
- "gimli 0.28.0",
+ "gimli",
  "rustc-std-workspace-alloc",
  "rustc-std-workspace-core",
 ]
@@ -117,17 +108,25 @@
 ]
 
 [[package]]
-name = "anstream"
-version = "0.3.2"
+name = "ansi_term"
+version = "0.12.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0ca84f3628370c59db74ee214b3263d58f9aadd9b4fe7e711fd87dc452b7f163"
+checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2"
+dependencies = [
+ "winapi",
+]
+
+[[package]]
+name = "anstream"
+version = "0.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b1f58811cfac344940f1a400b6e6231ce35171f614f26439e80f8c1465c5cc0c"
 dependencies = [
  "anstyle",
  "anstyle-parse",
  "anstyle-query",
  "anstyle-wincon",
  "colorchoice",
- "is-terminal",
  "utf8parse",
 ]
 
@@ -152,17 +151,17 @@
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b"
 dependencies = [
- "windows-sys",
+ "windows-sys 0.48.0",
 ]
 
 [[package]]
 name = "anstyle-wincon"
-version = "1.0.1"
+version = "2.1.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "180abfa45703aebe0093f79badacc01b8fd4ea2e35118747e5811127f926e188"
+checksum = "58f54d10c6dfa51283a066ceab3ec1ab78d13fae00aa49243a45e4571fb79dfd"
 dependencies = [
  "anstyle",
- "windows-sys",
+ "windows-sys 0.48.0",
 ]
 
 [[package]]
@@ -180,7 +179,7 @@
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "9792d37ca5173d7e7f4fe453739a0671d0557915a030a383d6b866476bbc3e71"
 dependencies = [
- "object 0.32.0",
+ "object",
 ]
 
 [[package]]
@@ -218,7 +217,7 @@
  "proc-macro2",
  "quote",
  "serde",
- "syn 2.0.27",
+ "syn 2.0.29",
 ]
 
 [[package]]
@@ -246,16 +245,16 @@
 
 [[package]]
 name = "backtrace"
-version = "0.3.68"
+version = "0.3.69"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4319208da049c43661739c5fade2ba182f09d1dc2299b32298d3a31692b17e12"
+checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837"
 dependencies = [
- "addr2line 0.20.0",
+ "addr2line",
  "cc",
  "cfg-if",
  "libc",
  "miniz_oxide",
- "object 0.31.1",
+ "object",
  "rustc-demangle",
 ]
 
@@ -291,9 +290,9 @@
 
 [[package]]
 name = "bitflags"
-version = "2.3.3"
+version = "2.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "630be753d4e58660abd17930c71b647fe46c27ea6b63cc59e1e3851406972e42"
+checksum = "b4682ae6287fcf752ecaabbfcc7b6f9b72aa33933dc23a554d853aea8eea8635"
 
 [[package]]
 name = "block-buffer"
@@ -459,25 +458,23 @@
 
 [[package]]
 name = "clap"
-version = "4.3.10"
+version = "4.4.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "384e169cc618c613d5e3ca6404dda77a8685a63e08660dcc64abaf7da7cb0c7a"
+checksum = "b1d7b8d5ec32af0fadc644bf1fd509a688c2103b185644bb1e29d164e0703136"
 dependencies = [
  "clap_builder",
  "clap_derive",
- "once_cell",
 ]
 
 [[package]]
 name = "clap_builder"
-version = "4.3.10"
+version = "4.4.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ef137bbe35aab78bdb468ccfba75a5f4d8321ae011d34063770780545176af2d"
+checksum = "5179bb514e4d7c2051749d8fcefa2ed6d06a9f4e6d69faf3805f5d80b8cf8d56"
 dependencies = [
  "anstream",
  "anstyle",
  "clap_lex",
- "once_cell",
  "strsim",
  "terminal_size",
 ]
@@ -493,14 +490,14 @@
 
 [[package]]
 name = "clap_derive"
-version = "4.3.2"
+version = "4.4.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b8cd2b2a819ad6eec39e8f1d6b53001af1e5469f8c177579cdaeb313115b825f"
+checksum = "0862016ff20d69b84ef8247369fabf5c008a7417002411897d40ee1f4532b873"
 dependencies = [
  "heck",
  "proc-macro2",
  "quote",
- "syn 2.0.27",
+ "syn 2.0.29",
 ]
 
 [[package]]
@@ -511,11 +508,10 @@
 
 [[package]]
 name = "clippy"
-version = "0.1.73"
+version = "0.1.74"
 dependencies = [
  "clippy_lints",
  "clippy_utils",
- "derive-new",
  "filetime",
  "futures",
  "if_chain",
@@ -525,13 +521,13 @@
  "regex",
  "rustc_tools_util",
  "serde",
- "syn 2.0.27",
+ "syn 2.0.29",
  "tempfile",
  "termize",
  "tester",
  "tokio",
  "toml 0.7.5",
- "ui_test",
+ "ui_test 0.20.0",
  "walkdir",
 ]
 
@@ -550,7 +546,7 @@
 
 [[package]]
 name = "clippy_lints"
-version = "0.1.73"
+version = "0.1.74"
 dependencies = [
  "arrayvec",
  "cargo_metadata",
@@ -558,7 +554,6 @@
  "declare_clippy_lint",
  "if_chain",
  "itertools",
- "pulldown-cmark",
  "quine-mc_cluskey",
  "regex",
  "regex-syntax 0.7.2",
@@ -575,7 +570,7 @@
 
 [[package]]
 name = "clippy_utils"
-version = "0.1.73"
+version = "0.1.74"
 dependencies = [
  "arrayvec",
  "if_chain",
@@ -628,20 +623,26 @@
 
 [[package]]
 name = "colored"
-version = "2.0.0"
+version = "2.0.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b3616f750b84d8f0de8a58bda93e08e2a81ad3f523089b05f1dffecab48c6cbd"
+checksum = "2674ec482fbc38012cf31e6c42ba0177b431a0cb6f15fe40efa5aab1bda516f6"
 dependencies = [
- "atty",
+ "is-terminal",
  "lazy_static",
- "winapi",
+ "windows-sys 0.48.0",
 ]
 
 [[package]]
-name = "compiler_builtins"
-version = "0.1.100"
+name = "comma"
+version = "1.0.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d6c0f24437059853f0fa64afc51f338f93647a3de4cf3358ba1bb4171a199775"
+checksum = "55b672471b4e9f9e95499ea597ff64941a309b2cdbffcc46f2cc5e2d971fd335"
+
+[[package]]
+name = "compiler_builtins"
+version = "0.1.101"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "01a6d58e9c3408138099a396a98fd0d0e6cfb25d723594d2ae48b5004513fd5b"
 dependencies = [
  "cc",
  "rustc-std-workspace-core",
@@ -657,6 +658,7 @@
  "diff",
  "getopts",
  "glob",
+ "home",
  "lazycell",
  "libc",
  "miow",
@@ -674,6 +676,19 @@
 ]
 
 [[package]]
+name = "console"
+version = "0.15.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c926e00cc70edefdc64d3a5ff31cc65bb97a3460097762bd23afb4d8145fccf8"
+dependencies = [
+ "encode_unicode",
+ "lazy_static",
+ "libc",
+ "unicode-width",
+ "windows-sys 0.45.0",
+]
+
+[[package]]
 name = "convert_case"
 version = "0.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -704,6 +719,18 @@
 checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa"
 
 [[package]]
+name = "coverage-dump"
+version = "0.1.0"
+dependencies = [
+ "anyhow",
+ "leb128",
+ "md-5",
+ "miniz_oxide",
+ "regex",
+ "rustc-demangle",
+]
+
+[[package]]
 name = "coverage_test_macros"
 version = "0.0.0"
 
@@ -795,7 +822,7 @@
 checksum = "2a011bbe2c35ce9c1f143b7af6f94f29a167beb4cd1d29e6740ce836f723120e"
 dependencies = [
  "nix",
- "windows-sys",
+ "windows-sys 0.48.0",
 ]
 
 [[package]]
@@ -830,12 +857,36 @@
 
 [[package]]
 name = "darling"
+version = "0.14.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850"
+dependencies = [
+ "darling_core 0.14.4",
+ "darling_macro 0.14.4",
+]
+
+[[package]]
+name = "darling"
 version = "0.20.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "0209d94da627ab5605dcccf08bb18afa5009cfbef48d8a8b7d7bdbc79be25c5e"
 dependencies = [
- "darling_core",
- "darling_macro",
+ "darling_core 0.20.3",
+ "darling_macro 0.20.3",
+]
+
+[[package]]
+name = "darling_core"
+version = "0.14.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0"
+dependencies = [
+ "fnv",
+ "ident_case",
+ "proc-macro2",
+ "quote",
+ "strsim",
+ "syn 1.0.109",
 ]
 
 [[package]]
@@ -849,7 +900,18 @@
  "proc-macro2",
  "quote",
  "strsim",
- "syn 2.0.27",
+ "syn 2.0.29",
+]
+
+[[package]]
+name = "darling_macro"
+version = "0.14.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e"
+dependencies = [
+ "darling_core 0.14.4",
+ "quote",
+ "syn 1.0.109",
 ]
 
 [[package]]
@@ -858,9 +920,9 @@
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "836a9bbc7ad63342d6d6e7b815ccab164bc77a2d95d84bc3117a8c0d5c98e2d5"
 dependencies = [
- "darling_core",
+ "darling_core 0.20.3",
  "quote",
- "syn 2.0.27",
+ "syn 2.0.29",
 ]
 
 [[package]]
@@ -871,25 +933,45 @@
 
 [[package]]
 name = "declare_clippy_lint"
-version = "0.1.73"
+version = "0.1.74"
 dependencies = [
  "itertools",
  "quote",
- "syn 2.0.27",
+ "syn 2.0.29",
 ]
 
 [[package]]
-name = "derive-new"
-version = "0.5.9"
+name = "derive_builder"
+version = "0.12.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3418329ca0ad70234b9735dc4ceed10af4df60eff9c8e7b06cb5e520d92c3535"
+checksum = "8d67778784b508018359cbc8696edb3db78160bab2c2a28ba7f56ef6932997f8"
 dependencies = [
+ "derive_builder_macro",
+]
+
+[[package]]
+name = "derive_builder_core"
+version = "0.12.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c11bdc11a0c47bc7d37d582b5285da6849c96681023680b906673c5707af7b0f"
+dependencies = [
+ "darling 0.14.4",
  "proc-macro2",
  "quote",
  "syn 1.0.109",
 ]
 
 [[package]]
+name = "derive_builder_macro"
+version = "0.12.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ebcda35c7a396850a55ffeac740804b40ffec779b98fffbb1738f4033f0ee79e"
+dependencies = [
+ "derive_builder_core",
+ "syn 1.0.109",
+]
+
+[[package]]
 name = "derive_more"
 version = "0.99.17"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -908,10 +990,10 @@
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "4e8ef033054e131169b8f0f9a7af8f5533a9436fadf3c500ed547f730f07090d"
 dependencies = [
- "darling",
+ "darling 0.20.3",
  "proc-macro2",
  "quote",
- "syn 2.0.27",
+ "syn 2.0.29",
 ]
 
 [[package]]
@@ -988,7 +1070,7 @@
 dependencies = [
  "proc-macro2",
  "quote",
- "syn 2.0.27",
+ "syn 2.0.29",
 ]
 
 [[package]]
@@ -1045,6 +1127,12 @@
 ]
 
 [[package]]
+name = "encode_unicode"
+version = "0.3.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f"
+
+[[package]]
 name = "encoding_rs"
 version = "0.8.32"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -1068,19 +1156,6 @@
 
 [[package]]
 name = "env_logger"
-version = "0.9.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a12e6657c4c97ebab115a42dcee77225f7f482cdd841cf7088c657a42e9e00e7"
-dependencies = [
- "atty",
- "humantime 2.1.0",
- "log",
- "regex",
- "termcolor",
-]
-
-[[package]]
-name = "env_logger"
 version = "0.10.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "85cdab6a89accf66733ad5a1693a4dcced6aeff64602b634530dd73c1f3ee9f0"
@@ -1106,7 +1181,7 @@
 dependencies = [
  "errno-dragonfly",
  "libc",
- "windows-sys",
+ "windows-sys 0.48.0",
 ]
 
 [[package]]
@@ -1163,12 +1238,9 @@
 
 [[package]]
 name = "fastrand"
-version = "1.9.0"
+version = "2.0.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be"
-dependencies = [
- "instant",
-]
+checksum = "6999dc1837253364c2ebb0704ba97994bd874e8f195d665c50b7548f6ea92764"
 
 [[package]]
 name = "field-offset"
@@ -1189,7 +1261,7 @@
  "cfg-if",
  "libc",
  "redox_syscall 0.2.16",
- "windows-sys",
+ "windows-sys 0.48.0",
 ]
 
 [[package]]
@@ -1354,7 +1426,7 @@
 dependencies = [
  "proc-macro2",
  "quote",
- "syn 2.0.27",
+ "syn 2.0.29",
 ]
 
 [[package]]
@@ -1437,12 +1509,6 @@
 
 [[package]]
 name = "gimli"
-version = "0.27.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b6c80984affa11d98d1b88b66ac8853f143217b399d3c74116778ff8fdb4ed2e"
-
-[[package]]
-name = "gimli"
 version = "0.28.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "6fb8d784f27acf97159b40fc4db5ecd8aa23b9ad5ef69cdd136d3bc80665f0c0"
@@ -1568,6 +1634,15 @@
 checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70"
 
 [[package]]
+name = "home"
+version = "0.5.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5444c27eef6923071f7ebcc33e3444508466a76f7a2b93da00ed6e19f30c1ddb"
+dependencies = [
+ "windows-sys 0.48.0",
+]
+
+[[package]]
 name = "html-checker"
 version = "0.1.0"
 dependencies = [
@@ -1840,6 +1915,19 @@
 ]
 
 [[package]]
+name = "indicatif"
+version = "0.17.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0b297dc40733f23a0e52728a58fa9489a5b7638a324932de16b41adc3ef80730"
+dependencies = [
+ "console",
+ "instant",
+ "number_prefix",
+ "portable-atomic",
+ "unicode-width",
+]
+
+[[package]]
 name = "indoc"
 version = "1.0.9"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -1888,17 +1976,6 @@
 ]
 
 [[package]]
-name = "io-lifetimes"
-version = "1.0.11"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2"
-dependencies = [
- "hermit-abi 0.3.2",
- "libc",
- "windows-sys",
-]
-
-[[package]]
 name = "ipnet"
 version = "2.7.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -1911,8 +1988,8 @@
 checksum = "24fddda5af7e54bf7da53067d6e802dbcc381d0a8eef629df528e3ebf68755cb"
 dependencies = [
  "hermit-abi 0.3.2",
- "rustix 0.38.2",
- "windows-sys",
+ "rustix",
+ "windows-sys 0.48.0",
 ]
 
 [[package]]
@@ -2010,10 +2087,22 @@
 checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55"
 
 [[package]]
-name = "libc"
-version = "0.2.147"
+name = "leb128"
+version = "0.2.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3"
+checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67"
+
+[[package]]
+name = "levenshtein"
+version = "1.0.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "db13adb97ab515a3691f56e4dbab09283d0b86cb45abd991d8634a9d6f501760"
+
+[[package]]
+name = "libc"
+version = "0.2.148"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9cdc71e17332e86d2e1d38c1f99edcb6288ee11b815fb1a4b049eaa2114d369b"
 dependencies = [
  "rustc-std-workspace-core",
 ]
@@ -2096,15 +2185,9 @@
 
 [[package]]
 name = "linux-raw-sys"
-version = "0.3.8"
+version = "0.4.7"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519"
-
-[[package]]
-name = "linux-raw-sys"
-version = "0.4.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "09fc20d2ca12cb9f044c93e3bd6d32d523e6e2ec3db4f7b2939cd99026ecd3f0"
+checksum = "1a9bad9f94746442c783ca431b22403b519cd7fbeed0533fdd6328b2f2212128"
 
 [[package]]
 name = "litemap"
@@ -2305,7 +2388,7 @@
 dependencies = [
  "libc",
  "wasi",
- "windows-sys",
+ "windows-sys 0.48.0",
 ]
 
 [[package]]
@@ -2314,7 +2397,7 @@
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "359f76430b20a79f9e20e115b3428614e654f04fab314482fc0fda0ebd3c6044"
 dependencies = [
- "windows-sys",
+ "windows-sys 0.48.0",
 ]
 
 [[package]]
@@ -2323,7 +2406,7 @@
 dependencies = [
  "colored",
  "ctrlc",
- "env_logger 0.9.3",
+ "env_logger 0.10.0",
  "getrandom",
  "lazy_static",
  "libc",
@@ -2334,8 +2417,9 @@
  "rand",
  "regex",
  "rustc_version",
+ "serde",
  "smallvec",
- "ui_test",
+ "ui_test 0.21.2",
 ]
 
 [[package]]
@@ -2430,13 +2514,10 @@
 ]
 
 [[package]]
-name = "object"
-version = "0.31.1"
+name = "number_prefix"
+version = "0.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8bda667d9f2b5051b8833f59f3bf748b28ef54f850f4fcb389a252aa383866d1"
-dependencies = [
- "memchr",
-]
+checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3"
 
 [[package]]
 name = "object"
@@ -2503,7 +2584,7 @@
 dependencies = [
  "proc-macro2",
  "quote",
- "syn 2.0.27",
+ "syn 2.0.29",
 ]
 
 [[package]]
@@ -2531,6 +2612,8 @@
  "anyhow",
  "build_helper",
  "camino",
+ "clap",
+ "derive_builder",
  "env_logger 0.10.0",
  "fs_extra",
  "glob",
@@ -2570,6 +2653,15 @@
 ]
 
 [[package]]
+name = "pad"
+version = "0.1.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d2ad9b889f1b12e0b9ee24db044b5129150d5eada288edc800f789928dc8c0e3"
+dependencies = [
+ "unicode-width",
+]
+
+[[package]]
 name = "panic_abort"
 version = "0.0.0"
 dependencies = [
@@ -2637,7 +2729,7 @@
  "libc",
  "redox_syscall 0.3.5",
  "smallvec",
- "windows-targets",
+ "windows-targets 0.48.1",
 ]
 
 [[package]]
@@ -2691,7 +2783,7 @@
  "pest_meta",
  "proc-macro2",
  "quote",
- "syn 2.0.27",
+ "syn 2.0.29",
 ]
 
 [[package]]
@@ -2773,6 +2865,12 @@
 ]
 
 [[package]]
+name = "portable-atomic"
+version = "1.4.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f32154ba0af3a075eefa1eda8bb414ee928f62303a54ea85b8d6638ff1a6ee9e"
+
+[[package]]
 name = "ppv-lite86"
 version = "0.2.17"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -2785,6 +2883,16 @@
 checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c"
 
 [[package]]
+name = "prettydiff"
+version = "0.6.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8ff1fec61082821f8236cf6c0c14e8172b62ce8a72a0eedc30d3b247bb68dc11"
+dependencies = [
+ "ansi_term",
+ "pad",
+]
+
+[[package]]
 name = "proc-macro-hack"
 version = "0.5.20+deprecated"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -2864,6 +2972,27 @@
 ]
 
 [[package]]
+name = "r-efi"
+version = "4.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "575fc2d9b3da54adbdfaddf6eca48fec256d977c8630a1750b8991347d1ac911"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "r-efi-alloc"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "31d6f09fe2b6ad044bc3d2c34ce4979796581afd2f1ebc185837e02421e02fd7"
+dependencies = [
+ "compiler_builtins",
+ "r-efi",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
 name = "rand"
 version = "0.8.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -3116,6 +3245,7 @@
  "rustc_driver",
  "rustc_driver_impl",
  "rustc_smir",
+ "stable_mir",
 ]
 
 [[package]]
@@ -3356,7 +3486,7 @@
  "cstr",
  "libc",
  "measureme",
- "object 0.32.0",
+ "object",
  "rustc-demangle",
  "rustc_ast",
  "rustc_attr",
@@ -3392,7 +3522,7 @@
  "itertools",
  "jobserver",
  "libc",
- "object 0.32.0",
+ "object",
  "pathdiff",
  "regex",
  "rustc_arena",
@@ -3636,7 +3766,7 @@
  "fluent-syntax",
  "proc-macro2",
  "quote",
- "syn 2.0.27",
+ "syn 2.0.29",
  "unic-langid",
 ]
 
@@ -3906,7 +4036,7 @@
 dependencies = [
  "proc-macro2",
  "quote",
- "syn 2.0.27",
+ "syn 2.0.29",
  "synstructure 0.13.0",
 ]
 
@@ -4096,7 +4226,7 @@
 name = "rustc_parse_format"
 version = "0.0.0"
 dependencies = [
- "rustc_data_structures",
+ "rustc_index",
  "rustc_lexer",
 ]
 
@@ -4167,7 +4297,6 @@
  "measureme",
  "memoffset",
  "rustc-rayon-core",
- "rustc_ast",
  "rustc_data_structures",
  "rustc_errors",
  "rustc_hir",
@@ -4274,11 +4403,14 @@
 name = "rustc_smir"
 version = "0.0.0"
 dependencies = [
+ "rustc_driver",
  "rustc_hir",
+ "rustc_interface",
  "rustc_middle",
+ "rustc_session",
  "rustc_span",
  "rustc_target",
- "scoped-tls",
+ "stable_mir",
  "tracing",
 ]
 
@@ -4326,7 +4458,7 @@
 version = "0.0.0"
 dependencies = [
  "bitflags 1.3.2",
- "object 0.32.0",
+ "object",
  "rustc_abi",
  "rustc_data_structures",
  "rustc_feature",
@@ -4373,15 +4505,12 @@
 name = "rustc_traits"
 version = "0.0.0"
 dependencies = [
- "rustc_ast",
  "rustc_data_structures",
  "rustc_hir",
  "rustc_infer",
  "rustc_middle",
  "rustc_span",
- "rustc_target",
  "rustc_trait_selection",
- "smallvec",
  "tracing",
 ]
 
@@ -4514,7 +4643,7 @@
  "proc-macro2",
  "quote",
  "serde",
- "syn 2.0.27",
+ "syn 2.0.29",
 ]
 
 [[package]]
@@ -4548,29 +4677,15 @@
 
 [[package]]
 name = "rustix"
-version = "0.37.22"
+version = "0.38.14"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8818fa822adcc98b18fedbb3632a6a33213c070556b5aa7c4c8cc21cff565c4c"
+checksum = "747c788e9ce8e92b12cd485c49ddf90723550b654b32508f979b71a7b1ecda4f"
 dependencies = [
- "bitflags 1.3.2",
- "errno",
- "io-lifetimes",
- "libc",
- "linux-raw-sys 0.3.8",
- "windows-sys",
-]
-
-[[package]]
-name = "rustix"
-version = "0.38.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "aabcb0461ebd01d6b79945797c27f8529082226cb630a9865a71870ff63532a4"
-dependencies = [
- "bitflags 2.3.3",
+ "bitflags 2.4.0",
  "errno",
  "libc",
- "linux-raw-sys 0.4.3",
- "windows-sys",
+ "linux-raw-sys",
+ "windows-sys 0.48.0",
 ]
 
 [[package]]
@@ -4611,7 +4726,7 @@
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88"
 dependencies = [
- "windows-sys",
+ "windows-sys 0.48.0",
 ]
 
 [[package]]
@@ -4666,22 +4781,22 @@
 
 [[package]]
 name = "serde"
-version = "1.0.164"
+version = "1.0.185"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9e8c8cf938e98f769bc164923b06dce91cea1751522f46f8466461af04c9027d"
+checksum = "be9b6f69f1dfd54c3b568ffa45c310d6973a5e5148fd40cf515acaf38cf5bc31"
 dependencies = [
  "serde_derive",
 ]
 
 [[package]]
 name = "serde_derive"
-version = "1.0.164"
+version = "1.0.185"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d9735b638ccc51c28bf6914d90a2e9725b377144fc612c49a611fddd1b631d68"
+checksum = "dc59dfdcbad1437773485e0367fea4b090a2e0a16d9ffc46af47764536a298ec"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn 2.0.27",
+ "syn 2.0.29",
 ]
 
 [[package]]
@@ -4833,6 +4948,14 @@
 checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3"
 
 [[package]]
+name = "stable_mir"
+version = "0.1.0-preview"
+dependencies = [
+ "scoped-tls",
+ "tracing",
+]
+
+[[package]]
 name = "stacker"
 version = "0.1.15"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -4855,7 +4978,7 @@
 name = "std"
 version = "0.0.0"
 dependencies = [
- "addr2line 0.21.0",
+ "addr2line",
  "alloc",
  "cfg-if",
  "compiler_builtins",
@@ -4866,10 +4989,12 @@
  "hermit-abi 0.3.2",
  "libc",
  "miniz_oxide",
- "object 0.32.0",
+ "object",
  "panic_abort",
  "panic_unwind",
  "profiler_builtins",
+ "r-efi",
+ "r-efi-alloc",
  "rand",
  "rand_xorshift",
  "rustc-demangle",
@@ -4962,9 +5087,9 @@
 
 [[package]]
 name = "syn"
-version = "2.0.27"
+version = "2.0.29"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b60f673f44a8255b9c8c657daf66a596d435f2da81a555b06dc644d080ba45e0"
+checksum = "c324c494eba9d92503e6f1ef2e6df781e78f6a7705a0202d9801b198807d518a"
 dependencies = [
  "proc-macro2",
  "quote",
@@ -4991,7 +5116,7 @@
 dependencies = [
  "proc-macro2",
  "quote",
- "syn 2.0.27",
+ "syn 2.0.29",
  "unicode-xid",
 ]
 
@@ -5031,16 +5156,15 @@
 
 [[package]]
 name = "tempfile"
-version = "3.6.0"
+version = "3.8.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "31c0432476357e58790aaa47a8efb0c5138f137343f3b5f23bd36a27e3b0a6d6"
+checksum = "cb94d2f3cc536af71caac6b6fcebf65860b347e7ce0cc9ebe8f70d3e521054ef"
 dependencies = [
- "autocfg",
  "cfg-if",
  "fastrand",
  "redox_syscall 0.3.5",
- "rustix 0.37.22",
- "windows-sys",
+ "rustix",
+ "windows-sys 0.48.0",
 ]
 
 [[package]]
@@ -5076,12 +5200,12 @@
 
 [[package]]
 name = "terminal_size"
-version = "0.2.6"
+version = "0.3.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8e6bf6f19e9f8ed8d4048dc22981458ebcf406d67e94cd422e5ecd73d63b3237"
+checksum = "21bebf2b7c9e0a515f6e0f8c51dc0f8e4696391e6f1ff30379559f8365fb0df7"
 dependencies = [
- "rustix 0.37.22",
- "windows-sys",
+ "rustix",
+ "windows-sys 0.48.0",
 ]
 
 [[package]]
@@ -5161,7 +5285,7 @@
 dependencies = [
  "proc-macro2",
  "quote",
- "syn 2.0.27",
+ "syn 2.0.29",
 ]
 
 [[package]]
@@ -5170,9 +5294,9 @@
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "4db52ee8fec06e119b692ef3dd2c4cf621a99204c1b8c47407870ed050305b9b"
 dependencies = [
- "gimli 0.28.0",
+ "gimli",
  "hashbrown 0.14.0",
- "object 0.32.0",
+ "object",
  "tracing",
 ]
 
@@ -5280,7 +5404,7 @@
  "num_cpus",
  "pin-project-lite",
  "socket2",
- "windows-sys",
+ "windows-sys 0.48.0",
 ]
 
 [[package]]
@@ -5382,7 +5506,7 @@
 dependencies = [
  "proc-macro2",
  "quote",
- "syn 2.0.27",
+ "syn 2.0.29",
 ]
 
 [[package]]
@@ -5436,11 +5560,11 @@
 
 [[package]]
 name = "tracing-tree"
-version = "0.2.3"
+version = "0.2.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4f9742d8df709837409dbb22aa25dd7769c260406f20ff48a2320b80a4a6aed0"
+checksum = "92d6b63348fad3ae0439b8bebf8d38fb5bda0b115d7a8a7e6f165f12790c58c3"
 dependencies = [
- "atty",
+ "is-terminal",
  "nu-ansi-term",
  "tracing-core",
  "tracing-log",
@@ -5497,18 +5621,50 @@
 
 [[package]]
 name = "ui_test"
-version = "0.11.7"
+version = "0.20.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c21899b59f53717dfad29e4f46e5b21a200a1b6888ab86532a07cfc8b48dd78c"
+checksum = "bfd8fb9b15c8332cf51bfc2dc4830063b2446a9c9d732421b56f2478024a3971"
 dependencies = [
+ "annotate-snippets",
+ "anyhow",
  "bstr",
  "cargo-platform",
  "cargo_metadata",
  "color-eyre",
  "colored",
+ "comma",
  "crossbeam-channel",
- "diff",
+ "indicatif",
  "lazy_static",
+ "levenshtein",
+ "prettydiff",
+ "regex",
+ "rustc_version",
+ "rustfix",
+ "serde",
+ "serde_json",
+ "tempfile",
+]
+
+[[package]]
+name = "ui_test"
+version = "0.21.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "aaf4bf7c184b8dfc7a4d3b90df789b1eb992ee42811cd115f32a7a1eb781058d"
+dependencies = [
+ "annotate-snippets",
+ "anyhow",
+ "bstr",
+ "cargo-platform",
+ "cargo_metadata",
+ "color-eyre",
+ "colored",
+ "comma",
+ "crossbeam-channel",
+ "indicatif",
+ "lazy_static",
+ "levenshtein",
+ "prettydiff",
  "regex",
  "rustc_version",
  "rustfix",
@@ -5777,7 +5933,7 @@
  "once_cell",
  "proc-macro2",
  "quote",
- "syn 2.0.27",
+ "syn 2.0.29",
  "wasm-bindgen-shared",
 ]
 
@@ -5811,7 +5967,7 @@
 dependencies = [
  "proc-macro2",
  "quote",
- "syn 2.0.27",
+ "syn 2.0.29",
  "wasm-bindgen-backend",
  "wasm-bindgen-shared",
 ]
@@ -5869,24 +6025,35 @@
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f"
 dependencies = [
- "windows-targets",
+ "windows-targets 0.48.1",
 ]
 
 [[package]]
 name = "windows-bindgen"
-version = "0.49.0"
+version = "0.51.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b6935fb09b84ee57929ae92518b475f5dfdfbeb87c5334756acc28ee8e202b60"
+checksum = "bc1f16b778125675feee0d15d6dd9f6af0e3ac52b3233d63a10aa39230c1cd75"
 dependencies = [
+ "proc-macro2",
+ "rayon",
+ "syn 2.0.29",
  "windows-metadata",
- "windows-tokens",
 ]
 
 [[package]]
 name = "windows-metadata"
-version = "0.49.0"
+version = "0.51.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2f5bca94a32bf1e6a376522b6601275a3b611ee885ec0f1b6a05f17e8cfd3385"
+checksum = "753135d996f9da437c0b31dbde3032489a61708361929bcc07d4fba0b161000e"
+
+[[package]]
+name = "windows-sys"
+version = "0.45.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0"
+dependencies = [
+ "windows-targets 0.42.2",
+]
 
 [[package]]
 name = "windows-sys"
@@ -5894,7 +6061,22 @@
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9"
 dependencies = [
- "windows-targets",
+ "windows-targets 0.48.1",
+]
+
+[[package]]
+name = "windows-targets"
+version = "0.42.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071"
+dependencies = [
+ "windows_aarch64_gnullvm 0.42.2",
+ "windows_aarch64_msvc 0.42.2",
+ "windows_i686_gnu 0.42.2",
+ "windows_i686_msvc 0.42.2",
+ "windows_x86_64_gnu 0.42.2",
+ "windows_x86_64_gnullvm 0.42.2",
+ "windows_x86_64_msvc 0.42.2",
 ]
 
 [[package]]
@@ -5903,20 +6085,20 @@
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "05d4b17490f70499f20b9e791dcf6a299785ce8af4d709018206dc5b4953e95f"
 dependencies = [
- "windows_aarch64_gnullvm",
- "windows_aarch64_msvc",
- "windows_i686_gnu",
- "windows_i686_msvc",
- "windows_x86_64_gnu",
- "windows_x86_64_gnullvm",
- "windows_x86_64_msvc",
+ "windows_aarch64_gnullvm 0.48.0",
+ "windows_aarch64_msvc 0.48.0",
+ "windows_i686_gnu 0.48.0",
+ "windows_i686_msvc 0.48.0",
+ "windows_x86_64_gnu 0.48.0",
+ "windows_x86_64_gnullvm 0.48.0",
+ "windows_x86_64_msvc 0.48.0",
 ]
 
 [[package]]
-name = "windows-tokens"
-version = "0.48.0"
+name = "windows_aarch64_gnullvm"
+version = "0.42.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b34c9a3b28cb41db7385546f7f9a8179348dffc89923dde66857b1ba5312f6b4"
+checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8"
 
 [[package]]
 name = "windows_aarch64_gnullvm"
@@ -5926,36 +6108,72 @@
 
 [[package]]
 name = "windows_aarch64_msvc"
+version = "0.42.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43"
+
+[[package]]
+name = "windows_aarch64_msvc"
 version = "0.48.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3"
 
 [[package]]
 name = "windows_i686_gnu"
+version = "0.42.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f"
+
+[[package]]
+name = "windows_i686_gnu"
 version = "0.48.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241"
 
 [[package]]
 name = "windows_i686_msvc"
+version = "0.42.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060"
+
+[[package]]
+name = "windows_i686_msvc"
 version = "0.48.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00"
 
 [[package]]
 name = "windows_x86_64_gnu"
+version = "0.42.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36"
+
+[[package]]
+name = "windows_x86_64_gnu"
 version = "0.48.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1"
 
 [[package]]
 name = "windows_x86_64_gnullvm"
+version = "0.42.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3"
+
+[[package]]
+name = "windows_x86_64_gnullvm"
 version = "0.48.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953"
 
 [[package]]
 name = "windows_x86_64_msvc"
+version = "0.42.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0"
+
+[[package]]
+name = "windows_x86_64_msvc"
 version = "0.48.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a"
diff --git a/Cargo.toml b/Cargo.toml
index d2e84d5..9b11ae8 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -43,6 +43,7 @@
   "src/tools/generate-windows-sys",
   "src/tools/rustdoc-gui-test",
   "src/tools/opt-dist",
+  "src/tools/coverage-dump",
 ]
 
 exclude = [
diff --git a/RELEASES.md b/RELEASES.md
index 7674418..0c3a735 100644
--- a/RELEASES.md
+++ b/RELEASES.md
@@ -1,3 +1,125 @@
+Version 1.74.1 (2023-12-07)
+===========================
+
+- [Resolved spurious STATUS_ACCESS_VIOLATIONs in LLVM](https://github.com/rust-lang/rust/pull/118464)
+- [Clarify guarantees for std::mem::discriminant](https://github.com/rust-lang/rust/pull/118006)
+- [Fix some subtyping-related regressions](https://github.com/rust-lang/rust/pull/116415)
+
+Version 1.74.0 (2023-11-16)
+==========================
+
+<a id="1.74.0-Language"></a>
+
+Language
+--------
+
+- [Codify that `std::mem::Discriminant<T>` does not depend on any lifetimes in T](https://github.com/rust-lang/rust/pull/104299/)
+- [Replace `private_in_public` lint with `private_interfaces` and `private_bounds` per RFC 2145](https://github.com/rust-lang/rust/pull/113126/)
+  Read more in [RFC 2145](https://rust-lang.github.io/rfcs/2145-type-privacy.html).
+- [Allow explicit `#[repr(Rust)]`](https://github.com/rust-lang/rust/pull/114201/)
+- [closure field capturing: don't depend on alignment of packed fields](https://github.com/rust-lang/rust/pull/115315/)
+- [Enable MIR-based drop-tracking for `async` blocks](https://github.com/rust-lang/rust/pull/107421/)
+
+<a id="1.74.0-Compiler"></a>
+
+Compiler
+--------
+
+- [stabilize combining +bundle and +whole-archive link modifiers](https://github.com/rust-lang/rust/pull/113301/)
+- [Stabilize `PATH` option for `--print KIND=PATH`](https://github.com/rust-lang/rust/pull/114183/)
+- [Enable ASAN/LSAN/TSAN for `*-apple-ios-macabi`](https://github.com/rust-lang/rust/pull/115644/)
+- [Promote loongarch64-unknown-none* to Tier 2](https://github.com/rust-lang/rust/pull/115368/)
+- [Add `i686-pc-windows-gnullvm` as a tier 3 target](https://github.com/rust-lang/rust/pull/115687/)
+
+<a id="1.74.0-Libraries"></a>
+
+Libraries
+---------
+
+- [Implement `From<OwnedFd/Handle>` for ChildStdin/out/err](https://github.com/rust-lang/rust/pull/98704/)
+- [Implement `From<{&,&mut} [T; N]>` for `Vec<T>` where `T: Clone`](https://github.com/rust-lang/rust/pull/111278/)
+- [impl Step for IP addresses](https://github.com/rust-lang/rust/pull/113748/)
+- [Implement `From<[T; N]>` for `Rc<[T]>` and `Arc<[T]>`](https://github.com/rust-lang/rust/pull/114041/)
+- [`impl TryFrom<char> for u16`](https://github.com/rust-lang/rust/pull/114065/)
+- [Stabilize `io_error_other` feature](https://github.com/rust-lang/rust/pull/115453/)
+- [Stabilize the `Saturating` type](https://github.com/rust-lang/rust/pull/115477/)
+- [Stabilize const_transmute_copy](https://github.com/rust-lang/rust/pull/115520/)
+
+<a id="1.74.0-Stabilized-APIs"></a>
+
+Stabilized APIs
+---------------
+
+- [`core::num::Saturating`](https://doc.rust-lang.org/stable/std/num/struct.Saturating.html)
+- [`impl From<io::Stdout> for std::process::Stdio`](https://doc.rust-lang.org/stable/std/process/struct.Stdio.html#impl-From%3CStdout%3E-for-Stdio)
+- [`impl From<io::Stderr> for std::process::Stdio`](https://doc.rust-lang.org/stable/std/process/struct.Stdio.html#impl-From%3CStderr%3E-for-Stdio)
+- [`impl From<OwnedHandle> for std::process::Child{Stdin, Stdout, Stderr}`](https://doc.rust-lang.org/stable/std/process/struct.Stdio.html#impl-From%3CStderr%3E-for-Stdio)
+- [`impl From<OwnedFd> for std::process::Child{Stdin, Stdout, Stderr}`](https://doc.rust-lang.org/stable/std/process/struct.Stdio.html#impl-From%3CStderr%3E-for-Stdio)
+- [`std::ffi::OsString::from_encoded_bytes_unchecked`](https://doc.rust-lang.org/stable/std/ffi/struct.OsString.html#method.from_encoded_bytes_unchecked)
+- [`std::ffi::OsString::into_encoded_bytes`](https://doc.rust-lang.org/stable/std/ffi/struct.OsString.html#method.into_encoded_bytes)
+- [`std::ffi::OsStr::from_encoded_bytes_unchecked`](https://doc.rust-lang.org/stable/std/ffi/struct.OsStr.html#method.from_encoded_bytes_unchecked)
+- [`std::ffi::OsStr::as_encoded_bytes`](https://doc.rust-lang.org/stable/std/ffi/struct.OsStr.html#method.as_encoded_bytes)
+- [`std::io::Error::other`](https://doc.rust-lang.org/stable/std/io/struct.Error.html#method.other)
+- [`impl TryFrom<char> for u16`](https://doc.rust-lang.org/stable/std/primitive.u16.html#impl-TryFrom%3Cchar%3E-for-u16)
+- [`impl<T: Clone, const N: usize> From<&[T; N]> for Vec<T>`](https://doc.rust-lang.org/stable/std/vec/struct.Vec.html#impl-From%3C%26%5BT;+N%5D%3E-for-Vec%3CT,+Global%3E)
+- [`impl<T: Clone, const N: usize> From<&mut [T; N]> for Vec<T>`](https://doc.rust-lang.org/stable/std/vec/struct.Vec.html#impl-From%3C%26mut+%5BT;+N%5D%3E-for-Vec%3CT,+Global%3E)
+- [`impl<T, const N: usize> From<[T; N]> for Arc<[T]>`](https://doc.rust-lang.org/stable/std/sync/struct.Arc.html#impl-From%3C%5BT;+N%5D%3E-for-Arc%3C%5BT%5D,+Global%3E)
+- [`impl<T, const N: usize> From<[T; N]> for Rc<[T]>`](https://doc.rust-lang.org/stable/std/rc/struct.Rc.html#impl-From%3C%5BT;+N%5D%3E-for-Rc%3C%5BT%5D,+Global%3E)
+
+These APIs are now stable in const contexts:
+
+- [`core::mem::transmute_copy`](https://doc.rust-lang.org/beta/std/mem/fn.transmute_copy.html)
+- [`str::is_ascii`](https://doc.rust-lang.org/beta/std/primitive.str.html#method.is_ascii)
+- [`[u8]::is_ascii`](https://doc.rust-lang.org/beta/std/primitive.slice.html#method.is_ascii)
+
+<a id="1.74.0-Cargo"></a>
+
+Cargo
+-----
+
+- [fix: Set MSRV for internal packages](https://github.com/rust-lang/cargo/pull/12381/)
+- [config: merge lists in precedence order](https://github.com/rust-lang/cargo/pull/12515/)
+- [fix(update): Clarify meaning of --aggressive as --recursive](https://github.com/rust-lang/cargo/pull/12544/)
+- [fix(update): Make `-p` more convenient by being positional](https://github.com/rust-lang/cargo/pull/12545/)
+- [feat(help): Add styling to help output ](https://github.com/rust-lang/cargo/pull/12578/)
+- [feat(pkgid): Allow incomplete versions when unambigious](https://github.com/rust-lang/cargo/pull/12614/)
+- [feat: stabilize credential-process and registry-auth](https://github.com/rust-lang/cargo/pull/12649/)
+- [feat(cli): Add '-n' to dry-run](https://github.com/rust-lang/cargo/pull/12660/)
+- [Add support for `target.'cfg(..)'.linker`](https://github.com/rust-lang/cargo/pull/12535/)
+- [Stabilize `--keep-going`](https://github.com/rust-lang/cargo/pull/12568/)
+- [feat: Stabilize lints](https://github.com/rust-lang/cargo/pull/12648/)
+
+<a id="1.74.0-Rustdoc"></a>
+
+Rustdoc
+-------
+
+- [Add warning block support in rustdoc](https://github.com/rust-lang/rust/pull/106561/)
+- [Accept additional user-defined syntax classes in fenced code blocks](https://github.com/rust-lang/rust/pull/110800/)
+- [rustdoc-search: add support for type parameters](https://github.com/rust-lang/rust/pull/112725/)
+- [rustdoc: show inner enum and struct in type definition for concrete type](https://github.com/rust-lang/rust/pull/114855/)
+
+<a id="1.74.0-Compatibility-Notes"></a>
+
+Compatibility Notes
+-------------------
+
+- [Raise minimum supported Apple OS versions](https://github.com/rust-lang/rust/pull/104385/)
+- [make Cell::swap panic if the Cells partially overlap](https://github.com/rust-lang/rust/pull/114795/)
+- [Reject invalid crate names in `--extern`](https://github.com/rust-lang/rust/pull/116001/)
+- [Don't resolve generic impls that may be shadowed by dyn built-in impls](https://github.com/rust-lang/rust/pull/114941/)
+
+<a id="1.74.0-Internal-Changes"></a>
+
+Internal Changes
+----------------
+
+These changes do not affect any public interfaces of Rust, but they represent
+significant improvements to the performance or internals of rustc and related
+tools.
+
+None this cycle.
+
 Version 1.73.0 (2023-10-05)
 ==========================
 
@@ -11,7 +133,7 @@
 - [Support interpolated block for `try` and `async` in macros.](https://github.com/rust-lang/rust/pull/112953/)
 - [Make `unconditional_recursion` lint detect recursive drops.](https://github.com/rust-lang/rust/pull/113902/)
 - [Future compatibility warning for some impls being incorrectly considered not overlapping.](https://github.com/rust-lang/rust/pull/114023/)
- - [The `invalid_reference_casting` lint is now **deny-by-default** (instead of allow-by-default)](https://github.com/rust-lang/rust/pull/112431)
+- [The `invalid_reference_casting` lint is now **deny-by-default** (instead of allow-by-default)](https://github.com/rust-lang/rust/pull/112431)
 
 <a id="1.73.0-Compiler"></a>
 
@@ -61,7 +183,7 @@
 - [`std::ffi::FromBytesUntilNulError`](https://doc.rust-lang.org/stable/std/ffi/struct.FromBytesUntilNulError.html)
 - [`std::os::unix::fs::chown`](https://doc.rust-lang.org/stable/std/os/unix/fs/fn.chown.html)
 - [`std::os::unix::fs::fchown`](https://doc.rust-lang.org/stable/std/os/unix/fs/fn.fchown.html)
-- [`std::os::unix::fs::lfchown`](https://doc.rust-lang.org/stable/std/os/unix/fs/fn.lchown.html)
+- [`std::os::unix::fs::lchown`](https://doc.rust-lang.org/stable/std/os/unix/fs/fn.lchown.html)
 - [`LocalKey::<Cell<T>>::get`](https://doc.rust-lang.org/stable/std/thread/struct.LocalKey.html#method.get)
 - [`LocalKey::<Cell<T>>::set`](https://doc.rust-lang.org/stable/std/thread/struct.LocalKey.html#method.set)
 - [`LocalKey::<Cell<T>>::take`](https://doc.rust-lang.org/stable/std/thread/struct.LocalKey.html#method.take)
@@ -229,6 +351,7 @@
   this should only impact users of other registries, or people who don't publish
   to a registry.
   [#12291](https://github.com/rust-lang/cargo/pull/12291)
+- [Demoted `mips*-unknown-linux-gnu*` targets from host tier 2 to target tier 3 support.](https://github.com/rust-lang/rust/pull/113274)
 
 Version 1.71.1 (2023-08-03)
 ===========================
diff --git a/compiler/rustc/Cargo.toml b/compiler/rustc/Cargo.toml
index 41003ad..dcb165f 100644
--- a/compiler/rustc/Cargo.toml
+++ b/compiler/rustc/Cargo.toml
@@ -13,6 +13,7 @@
 # Make sure rustc_smir ends up in the sysroot, because this
 # crate is intended to be used by stable MIR consumers, which are not in-tree
 rustc_smir = { path = "../rustc_smir" }
+stable_mir = { path = "../stable_mir" }
 
 [dependencies.jemalloc-sys]
 version = "0.5.0"
diff --git a/compiler/rustc_abi/src/layout.rs b/compiler/rustc_abi/src/layout.rs
index a8a1a90..0706dc1 100644
--- a/compiler/rustc_abi/src/layout.rs
+++ b/compiler/rustc_abi/src/layout.rs
@@ -157,8 +157,10 @@
         // for non-ZST uninhabited data (mostly partial initialization).
         let absent = |fields: &IndexSlice<FieldIdx, Layout<'_>>| {
             let uninhabited = fields.iter().any(|f| f.abi().is_uninhabited());
-            let is_zst = fields.iter().all(|f| f.0.is_zst());
-            uninhabited && is_zst
+            // We cannot ignore alignment; that might lead us to entirely discard a variant and
+            // produce an enum that is less aligned than it should be!
+            let is_1zst = fields.iter().all(|f| f.0.is_1zst());
+            uninhabited && is_1zst
         };
         let (present_first, present_second) = {
             let mut present_variants = variants
@@ -357,10 +359,8 @@
                 // It'll fit, but we need to make some adjustments.
                 match layout.fields {
                     FieldsShape::Arbitrary { ref mut offsets, .. } => {
-                        for (j, offset) in offsets.iter_enumerated_mut() {
-                            if !variants[i][j].0.is_zst() {
-                                *offset += this_offset;
-                            }
+                        for offset in offsets.iter_mut() {
+                            *offset += this_offset;
                         }
                     }
                     _ => {
@@ -504,7 +504,7 @@
                 // to make room for a larger discriminant.
                 for field_idx in st.fields.index_by_increasing_offset() {
                     let field = &field_layouts[FieldIdx::from_usize(field_idx)];
-                    if !field.0.is_zst() || field.align().abi.bytes() != 1 {
+                    if !field.0.is_1zst() {
                         start_align = start_align.min(field.align().abi);
                         break;
                     }
@@ -603,12 +603,15 @@
             abi = Abi::Scalar(tag);
         } else {
             // Try to use a ScalarPair for all tagged enums.
+            // That's possible only if we can find a common primitive type for all variants.
             let mut common_prim = None;
             let mut common_prim_initialized_in_all_variants = true;
             for (field_layouts, layout_variant) in iter::zip(variants, &layout_variants) {
                 let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else {
                     panic!();
                 };
+                // We skip *all* ZST here and later check if we are good in terms of alignment.
+                // This lets us handle some cases involving aligned ZST.
                 let mut fields = iter::zip(field_layouts, offsets).filter(|p| !p.0.0.is_zst());
                 let (field, offset) = match (fields.next(), fields.next()) {
                     (None, None) => {
@@ -954,9 +957,6 @@
                         };
 
                         (
-                            // Place ZSTs first to avoid "interesting offsets", especially with only one
-                            // or two non-ZST fields. This helps Scalar/ScalarPair layouts.
-                            !f.0.is_zst(),
                             // Then place largest alignments first.
                             cmp::Reverse(alignment_group_key(f)),
                             // Then prioritize niche placement within alignment group according to
@@ -1073,9 +1073,10 @@
     let size = min_size.align_to(align.abi);
     let mut layout_of_single_non_zst_field = None;
     let mut abi = Abi::Aggregate { sized };
-    // Unpack newtype ABIs and find scalar pairs.
+    // Try to make this a Scalar/ScalarPair.
     if sized && size.bytes() > 0 {
-        // All other fields must be ZSTs.
+        // We skip *all* ZST here and later check if we are good in terms of alignment.
+        // This lets us handle some cases involving aligned ZST.
         let mut non_zst_fields = fields.iter_enumerated().filter(|&(_, f)| !f.0.is_zst());
 
         match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
diff --git a/compiler/rustc_abi/src/lib.rs b/compiler/rustc_abi/src/lib.rs
index 12dd154..b30ff05 100644
--- a/compiler/rustc_abi/src/lib.rs
+++ b/compiler/rustc_abi/src/lib.rs
@@ -1,5 +1,5 @@
 #![cfg_attr(feature = "nightly", feature(step_trait, rustc_attrs, min_specialization))]
-#![cfg_attr(all(not(bootstrap), feature = "nightly"), allow(internal_features))]
+#![cfg_attr(feature = "nightly", allow(internal_features))]
 
 use std::fmt;
 #[cfg(feature = "nightly")]
@@ -1300,12 +1300,18 @@
         matches!(*self, Abi::Uninhabited)
     }
 
-    /// Returns `true` is this is a scalar type
+    /// Returns `true` if this is a scalar type
     #[inline]
     pub fn is_scalar(&self) -> bool {
         matches!(*self, Abi::Scalar(_))
     }
 
+    /// Returns `true` if this is a bool
+    #[inline]
+    pub fn is_bool(&self) -> bool {
+        matches!(*self, Abi::Scalar(s) if s.is_bool())
+    }
+
     /// Returns the fixed alignment of this ABI, if any is mandated.
     pub fn inherent_align<C: HasDataLayout>(&self, cx: &C) -> Option<AbiAndPrefAlign> {
         Some(match *self {
@@ -1348,6 +1354,23 @@
             Abi::Uninhabited | Abi::Aggregate { .. } => Abi::Aggregate { sized: true },
         }
     }
+
+    pub fn eq_up_to_validity(&self, other: &Self) -> bool {
+        match (self, other) {
+            // Scalar, Vector, ScalarPair have `Scalar` in them where we ignore validity ranges.
+            // We do *not* ignore the sign since it matters for some ABIs (e.g. s390x).
+            (Abi::Scalar(l), Abi::Scalar(r)) => l.primitive() == r.primitive(),
+            (
+                Abi::Vector { element: element_l, count: count_l },
+                Abi::Vector { element: element_r, count: count_r },
+            ) => element_l.primitive() == element_r.primitive() && count_l == count_r,
+            (Abi::ScalarPair(l1, l2), Abi::ScalarPair(r1, r2)) => {
+                l1.primitive() == r1.primitive() && l2.primitive() == r2.primitive()
+            }
+            // Everything else must be strictly identical.
+            _ => self == other,
+        }
+    }
 }
 
 #[derive(PartialEq, Eq, Hash, Clone, Debug)]
@@ -1660,15 +1683,25 @@
 
 impl LayoutS {
     /// Returns `true` if the layout corresponds to an unsized type.
+    #[inline]
     pub fn is_unsized(&self) -> bool {
         self.abi.is_unsized()
     }
 
+    #[inline]
     pub fn is_sized(&self) -> bool {
         self.abi.is_sized()
     }
 
+    /// Returns `true` if the type is sized and a 1-ZST (meaning it has size 0 and alignment 1).
+    pub fn is_1zst(&self) -> bool {
+        self.is_sized() && self.size.bytes() == 0 && self.align.abi.bytes() == 1
+    }
+
     /// Returns `true` if the type is a ZST and not unsized.
+    ///
+    /// Note that this does *not* imply that the type is irrelevant for layout! It can still have
+    /// non-trivial alignment constraints. You probably want to use `is_1zst` instead.
     pub fn is_zst(&self) -> bool {
         match self.abi {
             Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false,
@@ -1676,6 +1709,22 @@
             Abi::Aggregate { sized } => sized && self.size.bytes() == 0,
         }
     }
+
+    /// Checks if these two `Layout` are equal enough to be considered "the same for all function
+    /// call ABIs". Note however that real ABIs depend on more details that are not reflected in the
+    /// `Layout`; the `PassMode` need to be compared as well.
+    pub fn eq_abi(&self, other: &Self) -> bool {
+        // The one thing that we are not capturing here is that for unsized types, the metadata must
+        // also have the same ABI, and moreover that the same metadata leads to the same size. The
+        // 2nd point is quite hard to check though.
+        self.size == other.size
+            && self.is_sized() == other.is_sized()
+            && self.abi.eq_up_to_validity(&other.abi)
+            && self.abi.is_bool() == other.abi.is_bool()
+            && self.align.abi == other.align.abi
+            && self.max_repr_align == other.max_repr_align
+            && self.unadjusted_abi_align == other.unadjusted_abi_align
+    }
 }
 
 #[derive(Copy, Clone, Debug)]
diff --git a/compiler/rustc_arena/src/lib.rs b/compiler/rustc_arena/src/lib.rs
index e45b7c1..23fdd27 100644
--- a/compiler/rustc_arena/src/lib.rs
+++ b/compiler/rustc_arena/src/lib.rs
@@ -24,7 +24,7 @@
 #![deny(unsafe_op_in_unsafe_fn)]
 #![deny(rustc::untranslatable_diagnostic)]
 #![deny(rustc::diagnostic_outside_of_impl)]
-#![cfg_attr(not(bootstrap), allow(internal_features))]
+#![allow(internal_features)]
 #![allow(clippy::mut_from_ref)] // Arena allocators are one of the places where this pattern is fine.
 
 use smallvec::SmallVec;
@@ -37,9 +37,10 @@
 use std::slice;
 use std::{cmp, intrinsics};
 
+/// This calls the passed function while ensuring it won't be inlined into the caller.
 #[inline(never)]
 #[cold]
-fn cold_path<F: FnOnce() -> R, R>(f: F) -> R {
+fn outline<F: FnOnce() -> R, R>(f: F) -> R {
     f()
 }
 
@@ -600,7 +601,7 @@
                 unsafe { self.write_from_iter(iter, len, mem) }
             }
             (_, _) => {
-                cold_path(move || -> &mut [T] {
+                outline(move || -> &mut [T] {
                     let mut vec: SmallVec<[_; 8]> = iter.collect();
                     if vec.is_empty() {
                         return &mut [];
diff --git a/compiler/rustc_ast/src/ast.rs b/compiler/rustc_ast/src/ast.rs
index 58725a0..e8cbd7b 100644
--- a/compiler/rustc_ast/src/ast.rs
+++ b/compiler/rustc_ast/src/ast.rs
@@ -33,7 +33,7 @@
 use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
 use rustc_span::source_map::{respan, Spanned};
 use rustc_span::symbol::{kw, sym, Ident, Symbol};
-use rustc_span::{Span, DUMMY_SP};
+use rustc_span::{ErrorGuaranteed, Span, DUMMY_SP};
 use std::fmt;
 use std::mem;
 use thin_vec::{thin_vec, ThinVec};
@@ -1426,7 +1426,7 @@
     /// of `if` / `while` expressions. (e.g., `if let 0 = x { .. }`).
     ///
     /// `Span` represents the whole `let pat = expr` statement.
-    Let(P<Pat>, P<Expr>, Span),
+    Let(P<Pat>, P<Expr>, Span, Option<ErrorGuaranteed>),
     /// An `if` block, with an optional `else` block.
     ///
     /// `if expr { block } else { expr }`
@@ -2092,6 +2092,10 @@
     Never,
     /// A tuple (`(A, B, C, D,...)`).
     Tup(ThinVec<P<Ty>>),
+    /// An anonymous struct type i.e. `struct { foo: Type }`
+    AnonStruct(ThinVec<FieldDef>),
+    /// An anonymous union type i.e. `union { bar: Type }`
+    AnonUnion(ThinVec<FieldDef>),
     /// A path (`module::module::...::Type`), optionally
     /// "qualified", e.g., `<Vec<T> as SomeTrait>::SomeType`.
     ///
diff --git a/compiler/rustc_ast/src/attr/mod.rs b/compiler/rustc_ast/src/attr/mod.rs
index 19a2b30..db008ea 100644
--- a/compiler/rustc_ast/src/attr/mod.rs
+++ b/compiler/rustc_ast/src/attr/mod.rs
@@ -99,6 +99,22 @@
         }
     }
 
+    pub fn path_matches(&self, name: &[Symbol]) -> bool {
+        match &self.kind {
+            AttrKind::Normal(normal) => {
+                normal.item.path.segments.len() == name.len()
+                    && normal
+                        .item
+                        .path
+                        .segments
+                        .iter()
+                        .zip(name)
+                        .all(|(s, n)| s.args.is_none() && s.ident.name == *n)
+            }
+            AttrKind::DocComment(..) => false,
+        }
+    }
+
     pub fn is_word(&self) -> bool {
         if let AttrKind::Normal(normal) = &self.kind {
             matches!(normal.item.args, AttrArgs::Empty)
diff --git a/compiler/rustc_ast/src/mut_visit.rs b/compiler/rustc_ast/src/mut_visit.rs
index 48e9b18..ba28871 100644
--- a/compiler/rustc_ast/src/mut_visit.rs
+++ b/compiler/rustc_ast/src/mut_visit.rs
@@ -510,6 +510,9 @@
             visit_vec(bounds, |bound| vis.visit_param_bound(bound));
         }
         TyKind::MacCall(mac) => vis.visit_mac_call(mac),
+        TyKind::AnonStruct(fields) | TyKind::AnonUnion(fields) => {
+            fields.flat_map_in_place(|field| vis.flat_map_field_def(field));
+        }
     }
     vis.visit_span(span);
     visit_lazy_tts(tokens, vis);
@@ -1363,7 +1366,7 @@
             vis.visit_ty(ty);
         }
         ExprKind::AddrOf(_, _, ohs) => vis.visit_expr(ohs),
-        ExprKind::Let(pat, scrutinee, _) => {
+        ExprKind::Let(pat, scrutinee, _, _) => {
             vis.visit_pat(pat);
             vis.visit_expr(scrutinee);
         }
diff --git a/compiler/rustc_ast/src/token.rs b/compiler/rustc_ast/src/token.rs
index f4ad0ef..300b148 100644
--- a/compiler/rustc_ast/src/token.rs
+++ b/compiler/rustc_ast/src/token.rs
@@ -486,6 +486,8 @@
             Lt | BinOp(Shl)             | // associated path
             ModSep                      => true, // global path
             Interpolated(ref nt) => matches!(**nt, NtTy(..) | NtPath(..)),
+            // For anonymous structs or unions, which only appear in specific positions
+            // (type of struct fields or union fields), we don't consider them as regular types
             _ => false,
         }
     }
diff --git a/compiler/rustc_ast/src/tokenstream.rs b/compiler/rustc_ast/src/tokenstream.rs
index e9591c7..1e18b12 100644
--- a/compiler/rustc_ast/src/tokenstream.rs
+++ b/compiler/rustc_ast/src/tokenstream.rs
@@ -213,14 +213,10 @@
                         .into_iter()
                 }
                 AttrTokenTree::Attributes(data) => {
-                    let mut outer_attrs = Vec::new();
-                    let mut inner_attrs = Vec::new();
-                    for attr in &data.attrs {
-                        match attr.style {
-                            crate::AttrStyle::Outer => outer_attrs.push(attr),
-                            crate::AttrStyle::Inner => inner_attrs.push(attr),
-                        }
-                    }
+                    let idx = data
+                        .attrs
+                        .partition_point(|attr| matches!(attr.style, crate::AttrStyle::Outer));
+                    let (outer_attrs, inner_attrs) = data.attrs.split_at(idx);
 
                     let mut target_tokens: Vec<_> = data
                         .tokens
@@ -265,10 +261,10 @@
                             "Failed to find trailing delimited group in: {target_tokens:?}"
                         );
                     }
-                    let mut flat: SmallVec<[_; 1]> = SmallVec::new();
+                    let mut flat: SmallVec<[_; 1]> =
+                        SmallVec::with_capacity(target_tokens.len() + outer_attrs.len());
                     for attr in outer_attrs {
-                        // FIXME: Make this more efficient
-                        flat.extend(attr.tokens().0.clone().iter().cloned());
+                        flat.extend(attr.tokens().0.iter().cloned());
                     }
                     flat.extend(target_tokens);
                     flat.into_iter()
diff --git a/compiler/rustc_ast/src/util/classify.rs b/compiler/rustc_ast/src/util/classify.rs
index 607b777..f9f1c0c 100644
--- a/compiler/rustc_ast/src/util/classify.rs
+++ b/compiler/rustc_ast/src/util/classify.rs
@@ -36,7 +36,7 @@
             | AssignOp(_, _, e)
             | Binary(_, _, e)
             | Break(_, Some(e))
-            | Let(_, e, _)
+            | Let(_, e, _, _)
             | Range(_, Some(e), _)
             | Ret(Some(e))
             | Unary(_, e)
diff --git a/compiler/rustc_ast/src/visit.rs b/compiler/rustc_ast/src/visit.rs
index 6d474de..e66c4a9 100644
--- a/compiler/rustc_ast/src/visit.rs
+++ b/compiler/rustc_ast/src/visit.rs
@@ -438,6 +438,9 @@
         TyKind::Infer | TyKind::ImplicitSelf | TyKind::Err => {}
         TyKind::MacCall(mac) => visitor.visit_mac_call(mac),
         TyKind::Never | TyKind::CVarArgs => {}
+        TyKind::AnonStruct(ref fields, ..) | TyKind::AnonUnion(ref fields, ..) => {
+            walk_list!(visitor, visit_field_def, fields)
+        }
     }
 }
 
@@ -824,7 +827,7 @@
             visitor.visit_expr(subexpression);
             visitor.visit_ty(typ)
         }
-        ExprKind::Let(pat, expr, _) => {
+        ExprKind::Let(pat, expr, _, _) => {
             visitor.visit_pat(pat);
             visitor.visit_expr(expr);
         }
diff --git a/compiler/rustc_ast_lowering/messages.ftl b/compiler/rustc_ast_lowering/messages.ftl
index f63a9bf..8115c4b 100644
--- a/compiler/rustc_ast_lowering/messages.ftl
+++ b/compiler/rustc_ast_lowering/messages.ftl
@@ -29,10 +29,6 @@
     argument types not allowed with return type notation
     .suggestion = remove the input types
 
-ast_lowering_bad_return_type_notation_needs_dots =
-    return type notation arguments must be elided with `..`
-    .suggestion = add `..`
-
 ast_lowering_bad_return_type_notation_output =
     return type not allowed with return type notation
     .suggestion = remove the return type
diff --git a/compiler/rustc_ast_lowering/src/expr.rs b/compiler/rustc_ast_lowering/src/expr.rs
index 7408b4f..57c54f8 100644
--- a/compiler/rustc_ast_lowering/src/expr.rs
+++ b/compiler/rustc_ast_lowering/src/expr.rs
@@ -152,13 +152,14 @@
                     let ohs = self.lower_expr(ohs);
                     hir::ExprKind::AddrOf(*k, *m, ohs)
                 }
-                ExprKind::Let(pat, scrutinee, span) => {
+                ExprKind::Let(pat, scrutinee, span, is_recovered) => {
                     hir::ExprKind::Let(self.arena.alloc(hir::Let {
                         hir_id: self.next_id(),
                         span: self.lower_span(*span),
                         pat: self.lower_pat(pat),
                         ty: None,
                         init: self.lower_expr(scrutinee),
+                        is_recovered: *is_recovered,
                     }))
                 }
                 ExprKind::If(cond, then, else_opt) => {
@@ -558,13 +559,14 @@
     fn lower_arm(&mut self, arm: &Arm) -> hir::Arm<'hir> {
         let pat = self.lower_pat(&arm.pat);
         let guard = arm.guard.as_ref().map(|cond| {
-            if let ExprKind::Let(pat, scrutinee, span) = &cond.kind {
+            if let ExprKind::Let(pat, scrutinee, span, is_recovered) = &cond.kind {
                 hir::Guard::IfLet(self.arena.alloc(hir::Let {
                     hir_id: self.next_id(),
                     span: self.lower_span(*span),
                     pat: self.lower_pat(pat),
                     ty: None,
                     init: self.lower_expr(scrutinee),
+                    is_recovered: *is_recovered,
                 }))
             } else {
                 hir::Guard::If(self.lower_expr(cond))
diff --git a/compiler/rustc_ast_lowering/src/index.rs b/compiler/rustc_ast_lowering/src/index.rs
index ce84790..eff362f 100644
--- a/compiler/rustc_ast_lowering/src/index.rs
+++ b/compiler/rustc_ast_lowering/src/index.rs
@@ -2,19 +2,17 @@
 use rustc_data_structures::sorted_map::SortedMap;
 use rustc_hir as hir;
 use rustc_hir::def_id::LocalDefId;
-use rustc_hir::definitions;
 use rustc_hir::intravisit::{self, Visitor};
 use rustc_hir::*;
 use rustc_index::{Idx, IndexVec};
 use rustc_middle::span_bug;
-use rustc_session::Session;
-use rustc_span::source_map::SourceMap;
+use rustc_middle::ty::TyCtxt;
 use rustc_span::{Span, DUMMY_SP};
 
 /// A visitor that walks over the HIR and collects `Node`s into a HIR map.
 pub(super) struct NodeCollector<'a, 'hir> {
-    /// Source map
-    source_map: &'a SourceMap,
+    tcx: TyCtxt<'hir>,
+
     bodies: &'a SortedMap<ItemLocalId, &'hir Body<'hir>>,
 
     /// Outputs
@@ -25,14 +23,11 @@
     parent_node: hir::ItemLocalId,
 
     owner: OwnerId,
-
-    definitions: &'a definitions::Definitions,
 }
 
-#[instrument(level = "debug", skip(sess, definitions, bodies))]
+#[instrument(level = "debug", skip(tcx, bodies))]
 pub(super) fn index_hir<'hir>(
-    sess: &Session,
-    definitions: &definitions::Definitions,
+    tcx: TyCtxt<'hir>,
     item: hir::OwnerNode<'hir>,
     bodies: &SortedMap<ItemLocalId, &'hir Body<'hir>>,
 ) -> (IndexVec<ItemLocalId, Option<ParentedNode<'hir>>>, FxHashMap<LocalDefId, ItemLocalId>) {
@@ -42,8 +37,7 @@
     // used.
     nodes.push(Some(ParentedNode { parent: ItemLocalId::INVALID, node: item.into() }));
     let mut collector = NodeCollector {
-        source_map: sess.source_map(),
-        definitions,
+        tcx,
         owner: item.def_id(),
         parent_node: ItemLocalId::new(0),
         nodes,
@@ -79,11 +73,17 @@
                     span,
                     "inconsistent HirId at `{:?}` for `{:?}`: \
                      current_dep_node_owner={} ({:?}), hir_id.owner={} ({:?})",
-                    self.source_map.span_to_diagnostic_string(span),
+                    self.tcx.sess.source_map().span_to_diagnostic_string(span),
                     node,
-                    self.definitions.def_path(self.owner.def_id).to_string_no_crate_verbose(),
+                    self.tcx
+                        .definitions_untracked()
+                        .def_path(self.owner.def_id)
+                        .to_string_no_crate_verbose(),
                     self.owner,
-                    self.definitions.def_path(hir_id.owner.def_id).to_string_no_crate_verbose(),
+                    self.tcx
+                        .definitions_untracked()
+                        .def_path(hir_id.owner.def_id)
+                        .to_string_no_crate_verbose(),
                     hir_id.owner,
                 )
             }
diff --git a/compiler/rustc_ast_lowering/src/item.rs b/compiler/rustc_ast_lowering/src/item.rs
index a59c83d..edc1e2f 100644
--- a/compiler/rustc_ast_lowering/src/item.rs
+++ b/compiler/rustc_ast_lowering/src/item.rs
@@ -1308,7 +1308,7 @@
 
     fn lower_asyncness(&mut self, a: Async) -> hir::IsAsync {
         match a {
-            Async::Yes { .. } => hir::IsAsync::Async,
+            Async::Yes { span, .. } => hir::IsAsync::Async(span),
             Async::No => hir::IsAsync::NotAsync,
         }
     }
diff --git a/compiler/rustc_ast_lowering/src/lib.rs b/compiler/rustc_ast_lowering/src/lib.rs
index 4a47de1..85ab5e7 100644
--- a/compiler/rustc_ast_lowering/src/lib.rs
+++ b/compiler/rustc_ast_lowering/src/lib.rs
@@ -153,6 +153,7 @@
     fn get_label_res(&self, id: NodeId) -> Option<NodeId>;
     fn get_lifetime_res(&self, id: NodeId) -> Option<LifetimeRes>;
     fn take_extra_lifetime_params(&mut self, id: NodeId) -> Vec<(Ident, NodeId, LifetimeRes)>;
+    fn remap_extra_lifetime_params(&mut self, from: NodeId, to: NodeId);
     fn decl_macro_kind(&self, def_id: LocalDefId) -> MacroKind;
 }
 
@@ -213,6 +214,11 @@
         self.extra_lifetime_params_map.remove(&id).unwrap_or_default()
     }
 
+    fn remap_extra_lifetime_params(&mut self, from: NodeId, to: NodeId) {
+        let lifetimes = self.extra_lifetime_params_map.remove(&from).unwrap_or_default();
+        self.extra_lifetime_params_map.insert(to, lifetimes);
+    }
+
     fn decl_macro_kind(&self, def_id: LocalDefId) -> MacroKind {
         self.builtin_macro_kinds.get(&def_id).copied().unwrap_or(MacroKind::Bang)
     }
@@ -236,7 +242,7 @@
     ReturnPositionOpaqueTy {
         /// Origin: Either OpaqueTyOrigin::FnReturn or OpaqueTyOrigin::AsyncFn,
         origin: hir::OpaqueTyOrigin,
-        in_trait: bool,
+        fn_kind: FnDeclKind,
     },
     /// Impl trait in type aliases.
     TypeAliasesOpaqueTy { in_assoc_ty: bool },
@@ -312,7 +318,7 @@
     }
 }
 
-#[derive(Debug, PartialEq, Eq)]
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
 enum FnDeclKind {
     Fn,
     Inherent,
@@ -665,8 +671,7 @@
         } else {
             (None, None)
         };
-        let (nodes, parenting) =
-            index::index_hir(self.tcx.sess, &*self.tcx.definitions_untracked(), node, &bodies);
+        let (nodes, parenting) = index::index_hir(self.tcx, node, &bodies);
         let nodes = hir::OwnerNodes { opt_hash_including_bodies, nodes, bodies };
         let attrs = hir::AttributeMap { map: attrs, opt_hash: attrs_hash };
 
@@ -765,7 +770,7 @@
     /// Intercept all spans entering HIR.
     /// Mark a span as relative to the current owning item.
     fn lower_span(&self, span: Span) -> Span {
-        if self.tcx.sess.opts.incremental_relative_spans() {
+        if self.tcx.sess.opts.incremental.is_some() {
             span.with_parent(Some(self.current_hir_id_owner.def_id))
         } else {
             // Do not make spans relative when not using incremental compilation.
@@ -1089,6 +1094,11 @@
                         // constructing the HIR for `impl bounds...` and then lowering that.
 
                         let impl_trait_node_id = self.next_node_id();
+                        // Shift `impl Trait` lifetime captures from the associated type bound's
+                        // node id to the opaque node id, so that the opaque can actually use
+                        // these lifetime bounds.
+                        self.resolver
+                            .remap_extra_lifetime_params(constraint.id, impl_trait_node_id);
 
                         self.with_dyn_type_scope(false, |this| {
                             let node_id = this.next_node_id();
@@ -1293,6 +1303,18 @@
             TyKind::Err => {
                 hir::TyKind::Err(self.tcx.sess.delay_span_bug(t.span, "TyKind::Err lowered"))
             }
+            // FIXME(unnamed_fields): IMPLEMENTATION IN PROGRESS
+            #[allow(rustc::untranslatable_diagnostic)]
+            #[allow(rustc::diagnostic_outside_of_impl)]
+            TyKind::AnonStruct(ref _fields) => hir::TyKind::Err(
+                self.tcx.sess.span_err(t.span, "anonymous structs are unimplemented"),
+            ),
+            // FIXME(unnamed_fields): IMPLEMENTATION IN PROGRESS
+            #[allow(rustc::untranslatable_diagnostic)]
+            #[allow(rustc::diagnostic_outside_of_impl)]
+            TyKind::AnonUnion(ref _fields) => hir::TyKind::Err(
+                self.tcx.sess.span_err(t.span, "anonymous unions are unimplemented"),
+            ),
             TyKind::Slice(ty) => hir::TyKind::Slice(self.lower_ty(ty, itctx)),
             TyKind::Ptr(mt) => hir::TyKind::Ptr(self.lower_mt(mt, itctx)),
             TyKind::Ref(region, mt) => {
@@ -1389,13 +1411,13 @@
             TyKind::ImplTrait(def_node_id, bounds) => {
                 let span = t.span;
                 match itctx {
-                    ImplTraitContext::ReturnPositionOpaqueTy { origin, in_trait } => self
+                    ImplTraitContext::ReturnPositionOpaqueTy { origin, fn_kind } => self
                         .lower_opaque_impl_trait(
                             span,
                             *origin,
                             *def_node_id,
                             bounds,
-                            *in_trait,
+                            Some(*fn_kind),
                             itctx,
                         ),
                     &ImplTraitContext::TypeAliasesOpaqueTy { in_assoc_ty } => self
@@ -1404,17 +1426,11 @@
                             hir::OpaqueTyOrigin::TyAlias { in_assoc_ty },
                             *def_node_id,
                             bounds,
-                            false,
+                            None,
                             itctx,
                         ),
                     ImplTraitContext::Universal => {
                         let span = t.span;
-                        self.create_def(
-                            self.current_hir_id_owner.def_id,
-                            *def_node_id,
-                            DefPathData::ImplTrait,
-                            span,
-                        );
 
                         // HACK: pprust breaks strings with newlines when the type
                         // gets too long. We don't want these to show up in compiler
@@ -1425,6 +1441,12 @@
                             span,
                         );
 
+                        self.create_def(
+                            self.current_hir_id_owner.def_id,
+                            *def_node_id,
+                            DefPathData::TypeNs(ident.name),
+                            span,
+                        );
                         let (param, bounds, path) = self.lower_universal_param_and_bounds(
                             *def_node_id,
                             span,
@@ -1511,7 +1533,7 @@
         origin: hir::OpaqueTyOrigin,
         opaque_ty_node_id: NodeId,
         bounds: &GenericBounds,
-        in_trait: bool,
+        fn_kind: Option<FnDeclKind>,
         itctx: &ImplTraitContext,
     ) -> hir::TyKind<'hir> {
         // Make sure we know that some funky desugaring has been going on here.
@@ -1528,10 +1550,22 @@
                 Vec::new()
             }
             hir::OpaqueTyOrigin::FnReturn(..) => {
-                // in fn return position, like the `fn test<'a>() -> impl Debug + 'a`
-                // example, we only need to duplicate lifetimes that appear in the
-                // bounds, since those are the only ones that are captured by the opaque.
-                lifetime_collector::lifetimes_in_bounds(&self.resolver, bounds)
+                if let FnDeclKind::Impl | FnDeclKind::Trait =
+                    fn_kind.expect("expected RPITs to be lowered with a FnKind")
+                {
+                    // return-position impl trait in trait was decided to capture all
+                    // in-scope lifetimes, which we collect for all opaques during resolution.
+                    self.resolver
+                        .take_extra_lifetime_params(opaque_ty_node_id)
+                        .into_iter()
+                        .map(|(ident, id, _)| Lifetime { id, ident })
+                        .collect()
+                } else {
+                    // in fn return position, like the `fn test<'a>() -> impl Debug + 'a`
+                    // example, we only need to duplicate lifetimes that appear in the
+                    // bounds, since those are the only ones that are captured by the opaque.
+                    lifetime_collector::lifetimes_in_bounds(&self.resolver, bounds)
+                }
             }
             hir::OpaqueTyOrigin::AsyncFn(..) => {
                 unreachable!("should be using `lower_async_fn_ret_ty`")
@@ -1542,7 +1576,7 @@
         self.lower_opaque_inner(
             opaque_ty_node_id,
             origin,
-            in_trait,
+            matches!(fn_kind, Some(FnDeclKind::Trait)),
             captured_lifetimes_to_duplicate,
             span,
             opaque_ty_span,
@@ -1630,7 +1664,7 @@
                     lifetime.ident,
                 ));
 
-                // Now make an arg that we can use for the substs of the opaque tykind.
+                // Now make an arg that we can use for the generic params of the opaque tykind.
                 let id = self.next_node_id();
                 let lifetime_arg = self.new_named_lifetime_with_res(id, lifetime.ident, res);
                 let duplicated_lifetime_def_id = self.local_def_id(duplicated_lifetime_node_id);
@@ -1790,12 +1824,7 @@
             }
 
             let fn_def_id = self.local_def_id(fn_node_id);
-            self.lower_async_fn_ret_ty(
-                &decl.output,
-                fn_def_id,
-                ret_id,
-                matches!(kind, FnDeclKind::Trait),
-            )
+            self.lower_async_fn_ret_ty(&decl.output, fn_def_id, ret_id, kind)
         } else {
             match &decl.output {
                 FnRetTy::Ty(ty) => {
@@ -1803,7 +1832,7 @@
                         let fn_def_id = self.local_def_id(fn_node_id);
                         ImplTraitContext::ReturnPositionOpaqueTy {
                             origin: hir::OpaqueTyOrigin::FnReturn(fn_def_id),
-                            in_trait: matches!(kind, FnDeclKind::Trait),
+                            fn_kind: kind,
                         }
                     } else {
                         let position = match kind {
@@ -1871,7 +1900,7 @@
         output: &FnRetTy,
         fn_def_id: LocalDefId,
         opaque_ty_node_id: NodeId,
-        in_trait: bool,
+        fn_kind: FnDeclKind,
     ) -> hir::FnRetTy<'hir> {
         let span = self.lower_span(output.span());
         let opaque_ty_span = self.mark_span_with_reason(DesugaringKind::Async, span, None);
@@ -1886,7 +1915,7 @@
         let opaque_ty_ref = self.lower_opaque_inner(
             opaque_ty_node_id,
             hir::OpaqueTyOrigin::AsyncFn(fn_def_id),
-            in_trait,
+            matches!(fn_kind, FnDeclKind::Trait),
             captured_lifetimes,
             span,
             opaque_ty_span,
@@ -1894,7 +1923,9 @@
                 let future_bound = this.lower_async_fn_output_type_to_future_bound(
                     output,
                     span,
-                    if in_trait && !this.tcx.features().return_position_impl_trait_in_trait {
+                    if let FnDeclKind::Trait = fn_kind
+                        && !this.tcx.features().return_position_impl_trait_in_trait
+                    {
                         ImplTraitContext::FeatureGated(
                             ImplTraitPosition::TraitReturn,
                             sym::return_position_impl_trait_in_trait,
@@ -1902,7 +1933,7 @@
                     } else {
                         ImplTraitContext::ReturnPositionOpaqueTy {
                             origin: hir::OpaqueTyOrigin::FnReturn(fn_def_id),
-                            in_trait,
+                            fn_kind,
                         }
                     },
                 );
diff --git a/compiler/rustc_ast_passes/messages.ftl b/compiler/rustc_ast_passes/messages.ftl
index f323bb4..43020a9 100644
--- a/compiler/rustc_ast_passes/messages.ftl
+++ b/compiler/rustc_ast_passes/messages.ftl
@@ -1,3 +1,7 @@
+ast_passes_anon_struct_or_union_not_allowed =
+    anonymous {$struct_or_union}s are not allowed outside of unnamed struct or union fields
+    .label = anonymous {$struct_or_union} declared here
+
 ast_passes_assoc_const_without_body =
     associated constant in `impl` without body
     .suggestion = provide a definition for the constant
@@ -113,16 +117,6 @@
     `default` is only allowed on items in trait impls
     .label = `default` because of this
 
-ast_passes_forbidden_let =
-    `let` expressions are not supported here
-    .note = only supported directly in conditions of `if` and `while` expressions
-    .not_supported_or = `||` operators are not supported in let chain expressions
-    .not_supported_parentheses = `let`s wrapped in parentheses are not supported in a context with let chains
-
-ast_passes_forbidden_let_stable =
-    expected expression, found statement (`let`)
-    .note = variable declaration using `let` is a statement
-
 ast_passes_forbidden_lifetime_bound =
     lifetime bounds cannot be used in this context
 
@@ -162,6 +156,14 @@
 ast_passes_invalid_label =
     invalid label name `{$name}`
 
+ast_passes_invalid_unnamed_field =
+    unnamed fields are not allowed outside of structs or unions
+    .label = unnamed field declared here
+
+ast_passes_invalid_unnamed_field_ty =
+    unnamed fields can only have struct or union types
+    .label = not a struct or union
+
 ast_passes_item_underscore = `{$kind}` items in this context need a name
     .label = `_` is not a valid name for this `{$kind}` item
 
diff --git a/compiler/rustc_ast_passes/src/ast_validation.rs b/compiler/rustc_ast_passes/src/ast_validation.rs
index bd3e676..7bc685a 100644
--- a/compiler/rustc_ast_passes/src/ast_validation.rs
+++ b/compiler/rustc_ast_passes/src/ast_validation.rs
@@ -14,14 +14,12 @@
 use rustc_ast_pretty::pprust::{self, State};
 use rustc_data_structures::fx::FxIndexMap;
 use rustc_feature::Features;
-use rustc_macros::Subdiagnostic;
 use rustc_parse::validate_attr;
 use rustc_session::lint::builtin::{
     DEPRECATED_WHERE_CLAUSE_LOCATION, MISSING_ABI, PATTERNS_IN_FNS_WITHOUT_BODY,
 };
 use rustc_session::lint::{BuiltinLintDiagnostics, LintBuffer};
 use rustc_session::Session;
-use rustc_span::source_map::Spanned;
 use rustc_span::symbol::{kw, sym, Ident};
 use rustc_span::Span;
 use rustc_target::spec::abi;
@@ -69,9 +67,6 @@
     /// or `Foo::Bar<impl Trait>`
     is_impl_trait_banned: bool,
 
-    /// See [ForbiddenLetReason]
-    forbidden_let_reason: Option<ForbiddenLetReason>,
-
     lint_buffer: &'a mut LintBuffer,
 }
 
@@ -118,26 +113,6 @@
         self.with_tilde_const(Some(ctx), f)
     }
 
-    fn with_let_management(
-        &mut self,
-        forbidden_let_reason: Option<ForbiddenLetReason>,
-        f: impl FnOnce(&mut Self, Option<ForbiddenLetReason>),
-    ) {
-        let old = mem::replace(&mut self.forbidden_let_reason, forbidden_let_reason);
-        f(self, old);
-        self.forbidden_let_reason = old;
-    }
-
-    /// Emits an error banning the `let` expression provided in the given location.
-    fn ban_let_expr(&self, expr: &'a Expr, forbidden_let_reason: ForbiddenLetReason) {
-        let sess = &self.session;
-        if sess.opts.unstable_features.is_nightly_build() {
-            sess.emit_err(errors::ForbiddenLet { span: expr.span, reason: forbidden_let_reason });
-        } else {
-            sess.emit_err(errors::ForbiddenLetStable { span: expr.span });
-        }
-    }
-
     fn check_type_alias_where_clause_location(
         &mut self,
         ty_alias: &TyAlias,
@@ -223,10 +198,27 @@
                     }
                 }
             }
+            TyKind::AnonStruct(ref fields, ..) | TyKind::AnonUnion(ref fields, ..) => {
+                walk_list!(self, visit_field_def, fields)
+            }
             _ => visit::walk_ty(self, t),
         }
     }
 
+    fn visit_struct_field_def(&mut self, field: &'a FieldDef) {
+        if let Some(ident) = field.ident &&
+            ident.name == kw::Underscore {
+                self.check_unnamed_field_ty(&field.ty, ident.span);
+                self.visit_vis(&field.vis);
+                self.visit_ident(ident);
+                self.visit_ty_common(&field.ty);
+                self.walk_ty(&field.ty);
+                walk_list!(self, visit_attribute, &field.attrs);
+        } else {
+            self.visit_field_def(field);
+        }
+    }
+
     fn err_handler(&self) -> &rustc_errors::Handler {
         &self.session.diagnostic()
     }
@@ -264,6 +256,42 @@
         }
     }
 
+    fn check_unnamed_field_ty(&self, ty: &Ty, span: Span) {
+        if matches!(
+            &ty.kind,
+            // We already checked for `kw::Underscore` before calling this function,
+            // so skip the check
+            TyKind::AnonStruct(..) | TyKind::AnonUnion(..)
+            // If the anonymous field contains a Path as type, we can't determine
+            // if the path is a valid struct or union, so skip the check
+            | TyKind::Path(..)
+        ) {
+            return;
+        }
+        self.err_handler().emit_err(errors::InvalidUnnamedFieldTy { span, ty_span: ty.span });
+    }
+
+    fn deny_anon_struct_or_union(&self, ty: &Ty) {
+        let struct_or_union = match &ty.kind {
+            TyKind::AnonStruct(..) => "struct",
+            TyKind::AnonUnion(..) => "union",
+            _ => return,
+        };
+        self.err_handler()
+            .emit_err(errors::AnonStructOrUnionNotAllowed { struct_or_union, span: ty.span });
+    }
+
+    fn deny_unnamed_field(&self, field: &FieldDef) {
+        if let Some(ident) = field.ident &&
+            ident.name == kw::Underscore {
+                self.err_handler()
+                    .emit_err(errors::InvalidUnnamedField {
+                        span: field.span,
+                        ident_span: ident.span
+                    });
+        }
+    }
+
     fn check_trait_fn_not_const(&self, constness: Const) {
         if let Const::Yes(span) = constness {
             self.session.emit_err(errors::TraitFnConst { span });
@@ -726,69 +754,9 @@
         validate_attr::check_attr(&self.session.parse_sess, attr);
     }
 
-    fn visit_expr(&mut self, expr: &'a Expr) {
-        self.with_let_management(Some(ForbiddenLetReason::GenericForbidden), |this, forbidden_let_reason| {
-            match &expr.kind {
-                ExprKind::Binary(Spanned { node: BinOpKind::Or, span }, lhs, rhs) => {
-                    let local_reason = Some(ForbiddenLetReason::NotSupportedOr(*span));
-                    this.with_let_management(local_reason, |this, _| this.visit_expr(lhs));
-                    this.with_let_management(local_reason, |this, _| this.visit_expr(rhs));
-                }
-                ExprKind::If(cond, then, opt_else) => {
-                    this.visit_block(then);
-                    walk_list!(this, visit_expr, opt_else);
-                    this.with_let_management(None, |this, _| this.visit_expr(cond));
-                    return;
-                }
-                ExprKind::Let(..) if let Some(elem) = forbidden_let_reason => {
-                    this.ban_let_expr(expr, elem);
-                },
-                ExprKind::Match(scrutinee, arms) => {
-                    this.visit_expr(scrutinee);
-                    for arm in arms {
-                        this.visit_expr(&arm.body);
-                        this.visit_pat(&arm.pat);
-                        walk_list!(this, visit_attribute, &arm.attrs);
-                        if let Some(guard) = &arm.guard {
-                            this.with_let_management(None, |this, _| {
-                                this.visit_expr(guard)
-                            });
-                        }
-                    }
-                }
-                ExprKind::Paren(local_expr) => {
-                    fn has_let_expr(expr: &Expr) -> bool {
-                        match &expr.kind {
-                            ExprKind::Binary(_, lhs, rhs) => has_let_expr(lhs) || has_let_expr(rhs),
-                            ExprKind::Let(..) => true,
-                            _ => false,
-                        }
-                    }
-                    let local_reason = if has_let_expr(local_expr) {
-                        Some(ForbiddenLetReason::NotSupportedParentheses(local_expr.span))
-                    }
-                    else {
-                        forbidden_let_reason
-                    };
-                    this.with_let_management(local_reason, |this, _| this.visit_expr(local_expr));
-                }
-                ExprKind::Binary(Spanned { node: BinOpKind::And, .. }, ..) => {
-                    this.with_let_management(forbidden_let_reason, |this, _| visit::walk_expr(this, expr));
-                    return;
-                }
-                ExprKind::While(cond, then, opt_label) => {
-                    walk_list!(this, visit_label, opt_label);
-                    this.visit_block(then);
-                    this.with_let_management(None, |this, _| this.visit_expr(cond));
-                    return;
-                }
-                _ => visit::walk_expr(this, expr),
-            }
-        });
-    }
-
     fn visit_ty(&mut self, ty: &'a Ty) {
         self.visit_ty_common(ty);
+        self.deny_anon_struct_or_union(ty);
         self.walk_ty(ty)
     }
 
@@ -803,6 +771,7 @@
     }
 
     fn visit_field_def(&mut self, field: &'a FieldDef) {
+        self.deny_unnamed_field(field);
         visit::walk_field_def(self, field)
     }
 
@@ -995,10 +964,38 @@
                     self.check_mod_file_item_asciionly(item.ident);
                 }
             }
-            ItemKind::Union(vdata, ..) => {
+            ItemKind::Struct(vdata, generics) => match vdata {
+                // Duplicating the `Visitor` logic allows catching all cases
+                // of `Anonymous(Struct, Union)` outside of a field struct or union.
+                //
+                // Inside `visit_ty` the validator catches every `Anonymous(Struct, Union)` it
+                // encounters, and only on `ItemKind::Struct` and `ItemKind::Union`
+                // it uses `visit_ty_common`, which doesn't contain that specific check.
+                VariantData::Struct(fields, ..) => {
+                    self.visit_vis(&item.vis);
+                    self.visit_ident(item.ident);
+                    self.visit_generics(generics);
+                    walk_list!(self, visit_struct_field_def, fields);
+                    walk_list!(self, visit_attribute, &item.attrs);
+                    return;
+                }
+                _ => {}
+            },
+            ItemKind::Union(vdata, generics) => {
                 if vdata.fields().is_empty() {
                     self.err_handler().emit_err(errors::FieldlessUnion { span: item.span });
                 }
+                match vdata {
+                    VariantData::Struct(fields, ..) => {
+                        self.visit_vis(&item.vis);
+                        self.visit_ident(item.ident);
+                        self.visit_generics(generics);
+                        walk_list!(self, visit_struct_field_def, fields);
+                        walk_list!(self, visit_attribute, &item.attrs);
+                        return;
+                    }
+                    _ => {}
+                }
             }
             ItemKind::Const(box ConstItem { defaultness, expr: None, .. }) => {
                 self.check_defaultness(item.span, *defaultness);
@@ -1518,26 +1515,9 @@
         outer_impl_trait: None,
         disallow_tilde_const: None,
         is_impl_trait_banned: false,
-        forbidden_let_reason: Some(ForbiddenLetReason::GenericForbidden),
         lint_buffer: lints,
     };
     visit::walk_crate(&mut validator, krate);
 
     validator.has_proc_macro_decls
 }
-
-/// Used to forbid `let` expressions in certain syntactic locations.
-#[derive(Clone, Copy, Subdiagnostic)]
-pub(crate) enum ForbiddenLetReason {
-    /// `let` is not valid and the source environment is not important
-    GenericForbidden,
-    /// A let chain with the `||` operator
-    #[note(ast_passes_not_supported_or)]
-    NotSupportedOr(#[primary_span] Span),
-    /// A let chain with invalid parentheses
-    ///
-    /// For example, `let 1 = 1 && (expr && expr)` is allowed
-    /// but `(let 1 = 1 && (let 1 = 1 && (let 1 = 1))) && let a = 1` is not
-    #[note(ast_passes_not_supported_parentheses)]
-    NotSupportedParentheses(#[primary_span] Span),
-}
diff --git a/compiler/rustc_ast_passes/src/errors.rs b/compiler/rustc_ast_passes/src/errors.rs
index a6f217d..e74d94e 100644
--- a/compiler/rustc_ast_passes/src/errors.rs
+++ b/compiler/rustc_ast_passes/src/errors.rs
@@ -5,28 +5,9 @@
 use rustc_macros::{Diagnostic, Subdiagnostic};
 use rustc_span::{symbol::Ident, Span, Symbol};
 
-use crate::ast_validation::ForbiddenLetReason;
 use crate::fluent_generated as fluent;
 
 #[derive(Diagnostic)]
-#[diag(ast_passes_forbidden_let)]
-#[note]
-pub struct ForbiddenLet {
-    #[primary_span]
-    pub span: Span,
-    #[subdiagnostic]
-    pub(crate) reason: ForbiddenLetReason,
-}
-
-#[derive(Diagnostic)]
-#[diag(ast_passes_forbidden_let_stable)]
-#[note]
-pub struct ForbiddenLetStable {
-    #[primary_span]
-    pub span: Span,
-}
-
-#[derive(Diagnostic)]
 #[diag(ast_passes_keyword_lifetime)]
 pub struct KeywordLifetime {
     #[primary_span]
@@ -727,3 +708,30 @@
     #[primary_span]
     pub span: Span,
 }
+
+#[derive(Diagnostic)]
+#[diag(ast_passes_invalid_unnamed_field_ty)]
+pub struct InvalidUnnamedFieldTy {
+    #[primary_span]
+    pub span: Span,
+    #[label]
+    pub ty_span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(ast_passes_invalid_unnamed_field)]
+pub struct InvalidUnnamedField {
+    #[primary_span]
+    pub span: Span,
+    #[label]
+    pub ident_span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(ast_passes_anon_struct_or_union_not_allowed)]
+pub struct AnonStructOrUnionNotAllowed {
+    #[primary_span]
+    #[label]
+    pub span: Span,
+    pub struct_or_union: &'static str,
+}
diff --git a/compiler/rustc_ast_passes/src/feature_gate.rs b/compiler/rustc_ast_passes/src/feature_gate.rs
index 10c9c3e..62dc7ae 100644
--- a/compiler/rustc_ast_passes/src/feature_gate.rs
+++ b/compiler/rustc_ast_passes/src/feature_gate.rs
@@ -570,6 +570,7 @@
     gate_all!(builtin_syntax, "`builtin #` syntax is unstable");
     gate_all!(explicit_tail_calls, "`become` expression is experimental");
     gate_all!(generic_const_items, "generic const items are experimental");
+    gate_all!(unnamed_fields, "unnamed fields are not yet fully implemented");
 
     if !visitor.features.negative_bounds {
         for &span in spans.get(&sym::negative_bounds).iter().copied().flatten() {
@@ -577,11 +578,11 @@
         }
     }
 
-    // All uses of `gate_all!` below this point were added in #65742,
+    // All uses of `gate_all_legacy_dont_use!` below this point were added in #65742,
     // and subsequently disabled (with the non-early gating readded).
     // We emit an early future-incompatible warning for these.
     // New syntax gates should go above here to get a hard error gate.
-    macro_rules! gate_all {
+    macro_rules! gate_all_legacy_dont_use {
         ($gate:ident, $msg:literal) => {
             for span in spans.get(&sym::$gate).unwrap_or(&vec![]) {
                 gate_feature_post!(future_incompatible; &visitor, $gate, *span, $msg);
@@ -589,13 +590,19 @@
         };
     }
 
-    gate_all!(trait_alias, "trait aliases are experimental");
-    gate_all!(associated_type_bounds, "associated type bounds are unstable");
-    gate_all!(return_type_notation, "return type notation is experimental");
-    gate_all!(decl_macro, "`macro` is experimental");
-    gate_all!(box_patterns, "box pattern syntax is experimental");
-    gate_all!(exclusive_range_pattern, "exclusive range pattern syntax is experimental");
-    gate_all!(try_blocks, "`try` blocks are unstable");
+    gate_all_legacy_dont_use!(trait_alias, "trait aliases are experimental");
+    gate_all_legacy_dont_use!(associated_type_bounds, "associated type bounds are unstable");
+    // Despite being a new feature, `where T: Trait<Assoc(): Sized>`, which is RTN syntax now,
+    // used to be gated under associated_type_bounds, which are right above, so RTN needs to
+    // be too.
+    gate_all_legacy_dont_use!(return_type_notation, "return type notation is experimental");
+    gate_all_legacy_dont_use!(decl_macro, "`macro` is experimental");
+    gate_all_legacy_dont_use!(box_patterns, "box pattern syntax is experimental");
+    gate_all_legacy_dont_use!(
+        exclusive_range_pattern,
+        "exclusive range pattern syntax is experimental"
+    );
+    gate_all_legacy_dont_use!(try_blocks, "`try` blocks are unstable");
 
     visit::walk_crate(&mut visitor, krate);
 }
diff --git a/compiler/rustc_ast_pretty/src/pprust/state.rs b/compiler/rustc_ast_pretty/src/pprust/state.rs
index 58ce730..8b7e918 100644
--- a/compiler/rustc_ast_pretty/src/pprust/state.rs
+++ b/compiler/rustc_ast_pretty/src/pprust/state.rs
@@ -1064,6 +1064,14 @@
                 }
                 self.pclose();
             }
+            ast::TyKind::AnonStruct(fields) => {
+                self.head("struct");
+                self.print_record_struct_body(&fields, ty.span);
+            }
+            ast::TyKind::AnonUnion(fields) => {
+                self.head("union");
+                self.print_record_struct_body(&fields, ty.span);
+            }
             ast::TyKind::Paren(typ) => {
                 self.popen();
                 self.print_type(typ);
diff --git a/compiler/rustc_ast_pretty/src/pprust/state/expr.rs b/compiler/rustc_ast_pretty/src/pprust/state/expr.rs
index 39741a0..1142d49 100644
--- a/compiler/rustc_ast_pretty/src/pprust/state/expr.rs
+++ b/compiler/rustc_ast_pretty/src/pprust/state/expr.rs
@@ -352,7 +352,7 @@
                 self.end();
                 self.word(")");
             }
-            ast::ExprKind::Let(pat, scrutinee, _) => {
+            ast::ExprKind::Let(pat, scrutinee, _, _) => {
                 self.print_let(pat, scrutinee);
             }
             ast::ExprKind::If(test, blk, elseopt) => self.print_if(test, blk, elseopt.as_deref()),
diff --git a/compiler/rustc_ast_pretty/src/pprust/state/item.rs b/compiler/rustc_ast_pretty/src/pprust/state/item.rs
index d27a44f..3393f03 100644
--- a/compiler/rustc_ast_pretty/src/pprust/state/item.rs
+++ b/compiler/rustc_ast_pretty/src/pprust/state/item.rs
@@ -443,7 +443,11 @@
         }
     }
 
-    fn print_record_struct_body(&mut self, fields: &[ast::FieldDef], span: rustc_span::Span) {
+    pub(crate) fn print_record_struct_body(
+        &mut self,
+        fields: &[ast::FieldDef],
+        span: rustc_span::Span,
+    ) {
         self.nbsp();
         self.bopen();
 
diff --git a/compiler/rustc_attr/src/builtin.rs b/compiler/rustc_attr/src/builtin.rs
index 3592287..ca4b366 100644
--- a/compiler/rustc_attr/src/builtin.rs
+++ b/compiler/rustc_attr/src/builtin.rs
@@ -937,6 +937,7 @@
 #[derive(PartialEq, Debug, Encodable, Decodable, Copy, Clone)]
 pub enum ReprAttr {
     ReprInt(IntType),
+    ReprRust,
     ReprC,
     ReprPacked(u32),
     ReprSimd,
@@ -985,6 +986,7 @@
             let mut recognised = false;
             if item.is_word() {
                 let hint = match item.name_or_empty() {
+                    sym::Rust => Some(ReprRust),
                     sym::C => Some(ReprC),
                     sym::packed => Some(ReprPacked(1)),
                     sym::simd => Some(ReprSimd),
diff --git a/compiler/rustc_borrowck/messages.ftl b/compiler/rustc_borrowck/messages.ftl
index 67fdb67..2c7b97a 100644
--- a/compiler/rustc_borrowck/messages.ftl
+++ b/compiler/rustc_borrowck/messages.ftl
@@ -74,9 +74,6 @@
 borrowck_lifetime_constraints_error =
     lifetime may not live long enough
 
-borrowck_move_borrowed =
-    cannot move out of `{$desc}` because it is borrowed
-
 borrowck_move_out_place_here =
     {$place} is moved here
 
@@ -166,6 +163,8 @@
 borrowck_returned_ref_escaped =
     returns a reference to a captured variable which escapes the closure body
 
+borrowck_simd_shuffle_last_const = last argument of `simd_shuffle` is required to be a `const` item
+
 borrowck_suggest_create_freash_reborrow =
     consider reborrowing the `Pin` instead of moving it
 
@@ -248,12 +247,6 @@
 borrowck_var_move_by_use_in_generator =
     move occurs due to use in generator
 
-borrowck_var_move_by_use_place_in_closure =
-    move occurs due to use of {$place} in closure
-
-borrowck_var_move_by_use_place_in_generator =
-    move occurs due to use of {$place} in generator
-
 borrowck_var_mutable_borrow_by_use_place_in_closure =
     mutable borrow occurs due to use of {$place} in closure
 
diff --git a/compiler/rustc_borrowck/src/borrow_set.rs b/compiler/rustc_borrowck/src/borrow_set.rs
index 0b44bee..5248a64 100644
--- a/compiler/rustc_borrowck/src/borrow_set.rs
+++ b/compiler/rustc_borrowck/src/borrow_set.rs
@@ -71,7 +71,7 @@
     fn fmt(&self, w: &mut fmt::Formatter<'_>) -> fmt::Result {
         let kind = match self.kind {
             mir::BorrowKind::Shared => "",
-            mir::BorrowKind::Shallow => "shallow ",
+            mir::BorrowKind::Fake => "fake ",
             mir::BorrowKind::Mut { kind: mir::MutBorrowKind::ClosureCapture } => "uniq ",
             // FIXME: differentiate `TwoPhaseBorrow`
             mir::BorrowKind::Mut {
diff --git a/compiler/rustc_borrowck/src/def_use.rs b/compiler/rustc_borrowck/src/def_use.rs
index b719a61..95db937 100644
--- a/compiler/rustc_borrowck/src/def_use.rs
+++ b/compiler/rustc_borrowck/src/def_use.rs
@@ -49,7 +49,7 @@
         // cross suspension points so this behavior is unproblematic.
         PlaceContext::MutatingUse(MutatingUseContext::Borrow) |
         PlaceContext::NonMutatingUse(NonMutatingUseContext::SharedBorrow) |
-        PlaceContext::NonMutatingUse(NonMutatingUseContext::ShallowBorrow) |
+        PlaceContext::NonMutatingUse(NonMutatingUseContext::FakeBorrow) |
 
         // `PlaceMention` and `AscribeUserType` both evaluate the place, which must not
         // contain dangling references.
diff --git a/compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs b/compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs
index fe4a45b..ee352e9 100644
--- a/compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs
+++ b/compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs
@@ -1025,7 +1025,7 @@
                 self.cannot_uniquely_borrow_by_two_closures(span, &desc_place, issued_span, None)
             }
 
-            (BorrowKind::Mut { .. }, BorrowKind::Shallow) => {
+            (BorrowKind::Mut { .. }, BorrowKind::Fake) => {
                 if let Some(immutable_section_description) =
                     self.classify_immutable_section(issued_borrow.assigned_place)
                 {
@@ -1117,11 +1117,10 @@
                 )
             }
 
-            (BorrowKind::Shared, BorrowKind::Shared | BorrowKind::Shallow)
-            | (
-                BorrowKind::Shallow,
-                BorrowKind::Mut { .. } | BorrowKind::Shared | BorrowKind::Shallow,
-            ) => unreachable!(),
+            (BorrowKind::Shared, BorrowKind::Shared | BorrowKind::Fake)
+            | (BorrowKind::Fake, BorrowKind::Mut { .. } | BorrowKind::Shared | BorrowKind::Fake) => {
+                unreachable!()
+            }
         };
 
         if issued_spans == borrow_spans {
@@ -2130,21 +2129,27 @@
                 /// misleading users in cases like `tests/ui/nll/borrowed-temporary-error.rs`.
                 /// We could expand the analysis to suggest hoising all of the relevant parts of
                 /// the users' code to make the code compile, but that could be too much.
-                struct NestedStatementVisitor {
+                /// We found the `prop_expr` by the way to check whether the expression is a `FormatArguments`,
+                /// which is a special case since it's generated by the compiler.
+                struct NestedStatementVisitor<'tcx> {
                     span: Span,
                     current: usize,
                     found: usize,
+                    prop_expr: Option<&'tcx hir::Expr<'tcx>>,
                 }
 
-                impl<'tcx> Visitor<'tcx> for NestedStatementVisitor {
-                    fn visit_block(&mut self, block: &hir::Block<'tcx>) {
+                impl<'tcx> Visitor<'tcx> for NestedStatementVisitor<'tcx> {
+                    fn visit_block(&mut self, block: &'tcx hir::Block<'tcx>) {
                         self.current += 1;
                         walk_block(self, block);
                         self.current -= 1;
                     }
-                    fn visit_expr(&mut self, expr: &hir::Expr<'tcx>) {
+                    fn visit_expr(&mut self, expr: &'tcx hir::Expr<'tcx>) {
                         if self.span == expr.span.source_callsite() {
                             self.found = self.current;
+                            if self.prop_expr.is_none() {
+                                self.prop_expr = Some(expr);
+                            }
                         }
                         walk_expr(self, expr);
                     }
@@ -2162,22 +2167,40 @@
                             span: proper_span,
                             current: 0,
                             found: 0,
+                            prop_expr: None,
                         };
                         visitor.visit_stmt(stmt);
+
+                        let typeck_results = self.infcx.tcx.typeck(self.mir_def_id());
+                        let expr_ty: Option<Ty<'_>> = visitor.prop_expr.map(|expr| typeck_results.expr_ty(expr).peel_refs());
+
+                        let is_format_arguments_item =
+                            if let Some(expr_ty) = expr_ty
+                               && let ty::Adt(adt, _) = expr_ty.kind() {
+                                    self.infcx.tcx.lang_items().get(LangItem::FormatArguments) == Some(adt.did())
+                               } else {
+                                   false
+                               };
+
                         if visitor.found == 0
                             && stmt.span.contains(proper_span)
                             && let Some(p) = sm.span_to_margin(stmt.span)
                             && let Ok(s) = sm.span_to_snippet(proper_span)
                         {
-                            let addition = format!("let binding = {};\n{}", s, " ".repeat(p));
-                            err.multipart_suggestion_verbose(
-                                msg,
-                                vec![
-                                    (stmt.span.shrink_to_lo(), addition),
-                                    (proper_span, "binding".to_string()),
-                                ],
-                                Applicability::MaybeIncorrect,
-                            );
+                            if !is_format_arguments_item {
+                                let addition = format!("let binding = {};\n{}", s, " ".repeat(p));
+                                err.multipart_suggestion_verbose(
+                                    msg,
+                                    vec![
+                                        (stmt.span.shrink_to_lo(), addition),
+                                        (proper_span, "binding".to_string()),
+                                    ],
+                                    Applicability::MaybeIncorrect,
+                                );
+                            } else {
+                                err.note("the result of `format_args!` can only be assigned directly if no placeholders in it's arguments are used");
+                                err.note("to learn more, visit <https://doc.rust-lang.org/std/macro.format_args.html>");
+                            }
                             suggested = true;
                             break;
                         }
@@ -2620,7 +2643,7 @@
         let loan_span = loan_spans.args_or_use();
 
         let descr_place = self.describe_any_place(place.as_ref());
-        if loan.kind == BorrowKind::Shallow {
+        if loan.kind == BorrowKind::Fake {
             if let Some(section) = self.classify_immutable_section(loan.assigned_place) {
                 let mut err = self.cannot_mutate_in_immutable_section(
                     span,
@@ -2804,6 +2827,7 @@
                         }
                         ProjectionElem::ConstantIndex { .. }
                         | ProjectionElem::Subslice { .. }
+                        | ProjectionElem::Subtype(_)
                         | ProjectionElem::Index(_) => kind,
                     },
                     place_ty.projection_ty(tcx, elem),
diff --git a/compiler/rustc_borrowck/src/diagnostics/mod.rs b/compiler/rustc_borrowck/src/diagnostics/mod.rs
index 099e07e..8d4028d 100644
--- a/compiler/rustc_borrowck/src/diagnostics/mod.rs
+++ b/compiler/rustc_borrowck/src/diagnostics/mod.rs
@@ -13,7 +13,7 @@
 use rustc_infer::infer::LateBoundRegionConversionTime;
 use rustc_middle::mir::tcx::PlaceTy;
 use rustc_middle::mir::{
-    AggregateKind, CallSource, Constant, FakeReadCause, Local, LocalInfo, LocalKind, Location,
+    AggregateKind, CallSource, ConstOperand, FakeReadCause, Local, LocalInfo, LocalKind, Location,
     Operand, Place, PlaceRef, ProjectionElem, Rvalue, Statement, StatementKind, Terminator,
     TerminatorKind,
 };
@@ -101,12 +101,12 @@
         let terminator = self.body[location.block].terminator();
         debug!("add_moved_or_invoked_closure_note: terminator={:?}", terminator);
         if let TerminatorKind::Call {
-            func: Operand::Constant(box Constant { literal, .. }),
+            func: Operand::Constant(box ConstOperand { const_, .. }),
             args,
             ..
         } = &terminator.kind
         {
-            if let ty::FnDef(id, _) = *literal.ty().kind() {
+            if let ty::FnDef(id, _) = *const_.ty().kind() {
                 debug!("add_moved_or_invoked_closure_note: id={:?}", id);
                 if Some(self.infcx.tcx.parent(id)) == self.infcx.tcx.lang_items().fn_once_trait() {
                     let closure = match args.first() {
@@ -242,6 +242,7 @@
                 ProjectionElem::Downcast(..) if opt.including_downcast => return None,
                 ProjectionElem::Downcast(..) => (),
                 ProjectionElem::OpaqueCast(..) => (),
+                ProjectionElem::Subtype(..) => (),
                 ProjectionElem::Field(field, _ty) => {
                     // FIXME(project-rfc_2229#36): print capture precisely here.
                     if let Some(field) = self.is_upvar_field_projection(PlaceRef {
@@ -322,7 +323,9 @@
                     PlaceRef { local, projection: proj_base }.ty(self.body, self.infcx.tcx)
                 }
                 ProjectionElem::Downcast(..) => place.ty(self.body, self.infcx.tcx),
-                ProjectionElem::OpaqueCast(ty) => PlaceTy::from_ty(*ty),
+                ProjectionElem::Subtype(ty) | ProjectionElem::OpaqueCast(ty) => {
+                    PlaceTy::from_ty(*ty)
+                }
                 ProjectionElem::Field(_, field_type) => PlaceTy::from_ty(*field_type),
             },
         };
@@ -628,7 +631,7 @@
                 err.subdiagnostic(match kind {
                     Some(kd) => match kd {
                         rustc_middle::mir::BorrowKind::Shared
-                        | rustc_middle::mir::BorrowKind::Shallow => {
+                        | rustc_middle::mir::BorrowKind::Fake => {
                             CaptureVarKind::Immut { kind_span: capture_kind_span }
                         }
 
diff --git a/compiler/rustc_borrowck/src/diagnostics/mutability_errors.rs b/compiler/rustc_borrowck/src/diagnostics/mutability_errors.rs
index d62541d..8ca5738 100644
--- a/compiler/rustc_borrowck/src/diagnostics/mutability_errors.rs
+++ b/compiler/rustc_borrowck/src/diagnostics/mutability_errors.rs
@@ -1,9 +1,10 @@
+use hir::ExprKind;
 use rustc_errors::{Applicability, Diagnostic, DiagnosticBuilder, ErrorGuaranteed};
 use rustc_hir as hir;
 use rustc_hir::intravisit::Visitor;
 use rustc_hir::Node;
 use rustc_middle::mir::{Mutability, Place, PlaceRef, ProjectionElem};
-use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_middle::ty::{self, InstanceDef, Ty, TyCtxt};
 use rustc_middle::{
     hir::place::PlaceBase,
     mir::{self, BindingForm, Local, LocalDecl, LocalInfo, LocalKind, Location},
@@ -158,6 +159,7 @@
                     [
                         ..,
                         ProjectionElem::Index(_)
+                        | ProjectionElem::Subtype(_)
                         | ProjectionElem::ConstantIndex { .. }
                         | ProjectionElem::OpaqueCast { .. }
                         | ProjectionElem::Subslice { .. }
@@ -225,17 +227,17 @@
                 }
                 if suggest {
                     borrow_spans.var_subdiag(
-                    None,
-                    &mut err,
-                    Some(mir::BorrowKind::Mut { kind: mir::MutBorrowKind::Default }),
-                    |_kind, var_span| {
-                        let place = self.describe_any_place(access_place.as_ref());
-                        crate::session_diagnostics::CaptureVarCause::MutableBorrowUsePlaceClosure {
-                            place,
-                            var_span,
-                        }
-                    },
-                );
+                        None,
+                        &mut err,
+                        Some(mir::BorrowKind::Mut { kind: mir::MutBorrowKind::Default }),
+                        |_kind, var_span| {
+                            let place = self.describe_any_place(access_place.as_ref());
+                            crate::session_diagnostics::CaptureVarCause::MutableBorrowUsePlaceClosure {
+                                place,
+                                var_span,
+                            }
+                        },
+                    );
                 }
                 borrow_span
             }
@@ -262,11 +264,8 @@
             } => {
                 err.span_label(span, format!("cannot {act}"));
 
-                if let Some(span) = get_mut_span_in_struct_field(
-                    self.infcx.tcx,
-                    Place::ty_from(local, proj_base, self.body, self.infcx.tcx).ty,
-                    *field,
-                ) {
+                let place = Place::ty_from(local, proj_base, self.body, self.infcx.tcx);
+                if let Some(span) = get_mut_span_in_struct_field(self.infcx.tcx, place.ty, *field) {
                     err.span_suggestion_verbose(
                         span,
                         "consider changing this to be mutable",
@@ -373,12 +372,7 @@
                     err.span_label(span, format!("cannot {act}"));
                 }
                 if suggest {
-                    err.span_suggestion_verbose(
-                        local_decl.source_info.span.shrink_to_lo(),
-                        "consider changing this to be mutable",
-                        "mut ",
-                        Applicability::MachineApplicable,
-                    );
+                    self.construct_mut_suggestion_for_local_binding_patterns(&mut err, local);
                     let tcx = self.infcx.tcx;
                     if let ty::Closure(id, _) = *the_place_err.ty(self.body, tcx).ty.kind() {
                         self.show_mutating_upvar(tcx, id.expect_local(), the_place_err, &mut err);
@@ -494,6 +488,7 @@
                             ),
                         );
 
+                        self.suggest_using_iter_mut(&mut err);
                         self.suggest_make_local_mut(&mut err, local, name);
                     }
                     _ => {
@@ -713,6 +708,83 @@
         )
     }
 
+    fn construct_mut_suggestion_for_local_binding_patterns(
+        &self,
+        err: &mut DiagnosticBuilder<'_, ErrorGuaranteed>,
+        local: Local,
+    ) {
+        let local_decl = &self.body.local_decls[local];
+        debug!("local_decl: {:?}", local_decl);
+        let pat_span = match *local_decl.local_info() {
+            LocalInfo::User(BindingForm::Var(mir::VarBindingForm {
+                binding_mode: ty::BindingMode::BindByValue(Mutability::Not),
+                opt_ty_info: _,
+                opt_match_place: _,
+                pat_span,
+            })) => pat_span,
+            _ => local_decl.source_info.span,
+        };
+
+        struct BindingFinder {
+            span: Span,
+            hir_id: Option<hir::HirId>,
+        }
+
+        impl<'tcx> Visitor<'tcx> for BindingFinder {
+            fn visit_stmt(&mut self, s: &'tcx hir::Stmt<'tcx>) {
+                if let hir::StmtKind::Local(local) = s.kind {
+                    if local.pat.span == self.span {
+                        self.hir_id = Some(local.hir_id);
+                    }
+                }
+                hir::intravisit::walk_stmt(self, s);
+            }
+        }
+
+        let hir_map = self.infcx.tcx.hir();
+        let def_id = self.body.source.def_id();
+        let hir_id = if let Some(local_def_id) = def_id.as_local()
+            && let Some(body_id) = hir_map.maybe_body_owned_by(local_def_id)
+        {
+            let body = hir_map.body(body_id);
+            let mut v = BindingFinder {
+                span: pat_span,
+                hir_id: None,
+            };
+            v.visit_body(body);
+            v.hir_id
+        } else {
+            None
+        };
+
+        // With ref-binding patterns, the mutability suggestion has to apply to
+        // the binding, not the reference (which would be a type error):
+        //
+        // `let &b = a;` -> `let &(mut b) = a;`
+        if let Some(hir_id) = hir_id
+            && let Some(hir::Node::Local(hir::Local {
+                pat: hir::Pat { kind: hir::PatKind::Ref(_, _), .. },
+                ..
+            })) = hir_map.find(hir_id)
+            && let Ok(name) = self.infcx.tcx.sess.source_map().span_to_snippet(local_decl.source_info.span)
+        {
+            err.span_suggestion(
+                pat_span,
+                "consider changing this to be mutable",
+                format!("&(mut {name})"),
+                Applicability::MachineApplicable,
+            );
+            return;
+        }
+
+        err.span_suggestion_verbose(
+            local_decl.source_info.span.shrink_to_lo(),
+            "consider changing this to be mutable",
+            "mut ",
+            Applicability::MachineApplicable,
+        );
+    }
+
     // point to span of upvar making closure call require mutable borrow
     fn show_mutating_upvar(
         &self,
@@ -781,83 +853,88 @@
 
     // Attempt to search similar mutable associated items for suggestion.
     // In the future, attempt in all path but initially for RHS of for_loop
-    fn suggest_similar_mut_method_for_for_loop(&self, err: &mut Diagnostic) {
+    fn suggest_similar_mut_method_for_for_loop(&self, err: &mut Diagnostic, span: Span) {
         use hir::{
-            Expr,
-            ExprKind::{Block, Call, DropTemps, Match, MethodCall},
+            BorrowKind, Expr,
+            ExprKind::{AddrOf, Block, Call, MethodCall},
         };
 
         let hir_map = self.infcx.tcx.hir();
-        if let Some(body_id) = hir_map.maybe_body_owned_by(self.mir_def_id()) {
-            if let Block(
-                hir::Block {
-                    expr:
-                        Some(Expr {
-                            kind:
-                                DropTemps(Expr {
-                                    kind:
-                                        Match(
-                                            Expr {
-                                                kind:
-                                                    Call(
-                                                        _,
-                                                        [
-                                                            Expr {
-                                                                kind:
-                                                                    MethodCall(path_segment, _, _, span),
-                                                                hir_id,
-                                                                ..
-                                                            },
-                                                            ..,
-                                                        ],
-                                                    ),
-                                                ..
-                                            },
-                                            ..,
-                                        ),
-                                    ..
-                                }),
-                            ..
-                        }),
-                    ..
-                },
-                _,
-            ) = hir_map.body(body_id).value.kind
-            {
-                let opt_suggestions = self
-                    .infcx
-                    .tcx
-                    .typeck(path_segment.hir_id.owner.def_id)
-                    .type_dependent_def_id(*hir_id)
-                    .and_then(|def_id| self.infcx.tcx.impl_of_method(def_id))
-                    .map(|def_id| self.infcx.tcx.associated_items(def_id))
-                    .map(|assoc_items| {
-                        assoc_items
-                            .in_definition_order()
-                            .map(|assoc_item_def| assoc_item_def.ident(self.infcx.tcx))
-                            .filter(|&ident| {
-                                let original_method_ident = path_segment.ident;
-                                original_method_ident != ident
-                                    && ident
-                                        .as_str()
-                                        .starts_with(&original_method_ident.name.to_string())
-                            })
-                            .map(|ident| format!("{ident}()"))
-                            .peekable()
-                    });
+        struct Finder<'tcx> {
+            span: Span,
+            expr: Option<&'tcx Expr<'tcx>>,
+        }
 
-                if let Some(mut suggestions) = opt_suggestions
-                    && suggestions.peek().is_some()
-                {
-                    err.span_suggestions(
-                        *span,
-                        "use mutable method",
-                        suggestions,
-                        Applicability::MaybeIncorrect,
-                    );
+        impl<'tcx> Visitor<'tcx> for Finder<'tcx> {
+            fn visit_expr(&mut self, e: &'tcx hir::Expr<'tcx>) {
+                if e.span == self.span && self.expr.is_none() {
+                    self.expr = Some(e);
+                }
+                hir::intravisit::walk_expr(self, e);
+            }
+        }
+        if let Some(body_id) = hir_map.maybe_body_owned_by(self.mir_def_id())
+            && let Block(block, _) = hir_map.body(body_id).value.kind
+        {
+            // `span` corresponds to the expression being iterated, find the `for`-loop desugared
+            // expression with that span in order to identify potential fixes when encountering a
+            // read-only iterator that should be mutable.
+            let mut v = Finder {
+                span,
+                expr: None,
+            };
+            v.visit_block(block);
+            if let Some(expr) = v.expr && let Call(_, [expr]) = expr.kind {
+                match expr.kind {
+                    MethodCall(path_segment, _, _, span) => {
+                        // We have `for _ in iter.read_only_iter()`, try to
+                        // suggest `for _ in iter.mutable_iter()` instead.
+                        let opt_suggestions = self
+                            .infcx
+                            .tcx
+                            .typeck(path_segment.hir_id.owner.def_id)
+                            .type_dependent_def_id(expr.hir_id)
+                            .and_then(|def_id| self.infcx.tcx.impl_of_method(def_id))
+                            .map(|def_id| self.infcx.tcx.associated_items(def_id))
+                            .map(|assoc_items| {
+                                assoc_items
+                                    .in_definition_order()
+                                    .map(|assoc_item_def| assoc_item_def.ident(self.infcx.tcx))
+                                    .filter(|&ident| {
+                                        let original_method_ident = path_segment.ident;
+                                        original_method_ident != ident
+                                            && ident.as_str().starts_with(
+                                                &original_method_ident.name.to_string(),
+                                            )
+                                    })
+                                    .map(|ident| format!("{ident}()"))
+                                    .peekable()
+                            });
+
+                        if let Some(mut suggestions) = opt_suggestions
+                            && suggestions.peek().is_some()
+                        {
+                            err.span_suggestions(
+                                span,
+                                "use mutable method",
+                                suggestions,
+                                Applicability::MaybeIncorrect,
+                            );
+                        }
+                    }
+                    AddrOf(BorrowKind::Ref, Mutability::Not, expr) => {
+                        // We have `for _ in &i`, suggest `for _ in &mut i`.
+                        err.span_suggestion_verbose(
+                            expr.span.shrink_to_lo(),
+                            "use a mutable iterator instead",
+                            "mut ".to_string(),
+                            Applicability::MachineApplicable,
+                        );
+                    }
+                    _ => {}
                 }
             }
-        };
+        }
     }
 
     /// Targeted error when encountering an `FnMut` closure where an `Fn` closure was expected.
@@ -951,6 +1028,44 @@
         }
     }
 
+    fn suggest_using_iter_mut(&self, err: &mut DiagnosticBuilder<'_, ErrorGuaranteed>) {
+        let source = self.body.source;
+        let hir = self.infcx.tcx.hir();
+        if let InstanceDef::Item(def_id) = source.instance
+            && let Some(Node::Expr(hir::Expr { hir_id, kind, ..})) = hir.get_if_local(def_id)
+            && let ExprKind::Closure(closure) = kind && closure.movability == None
+            && let Some(Node::Expr(expr)) = hir.find_parent(*hir_id) {
+                let mut cur_expr = expr;
+                while let ExprKind::MethodCall(path_segment, recv, _, _) = cur_expr.kind {
+                    if path_segment.ident.name == sym::iter {
+                        // check `_ty` has `iter_mut` method
+                        let res = self
+                            .infcx
+                            .tcx
+                            .typeck(path_segment.hir_id.owner.def_id)
+                            .type_dependent_def_id(cur_expr.hir_id)
+                            .and_then(|def_id| self.infcx.tcx.impl_of_method(def_id))
+                            .map(|def_id| self.infcx.tcx.associated_items(def_id))
+                            .map(|assoc_items| {
+                                assoc_items.filter_by_name_unhygienic(sym::iter_mut).peekable()
+                            });
+
+                        if let Some(mut res) = res && res.peek().is_some() {
+                            err.span_suggestion_verbose(
+                                path_segment.ident.span,
+                                "you may want to use `iter_mut` here",
+                                "iter_mut",
+                                Applicability::MaybeIncorrect,
+                            );
+                        }
+                        break;
+                    } else {
+                        cur_expr = recv;
+                    }
+                }
+            }
+    }
+
     fn suggest_make_local_mut(
         &self,
         err: &mut DiagnosticBuilder<'_, ErrorGuaranteed>,
@@ -1003,9 +1118,10 @@
                 match opt_assignment_rhs_span.and_then(|s| s.desugaring_kind()) {
                     // on for loops, RHS points to the iterator part
                     Some(DesugaringKind::ForLoop) => {
-                        self.suggest_similar_mut_method_for_for_loop(err);
+                        let span = opt_assignment_rhs_span.unwrap();
+                        self.suggest_similar_mut_method_for_for_loop(err, span);
                         err.span_label(
-                            opt_assignment_rhs_span.unwrap(),
+                            span,
                             format!("this iterator yields `{pointer_sigil}` {pointer_desc}s",),
                         );
                         None
diff --git a/compiler/rustc_borrowck/src/diagnostics/region_errors.rs b/compiler/rustc_borrowck/src/diagnostics/region_errors.rs
index 2ea3997..27072a6 100644
--- a/compiler/rustc_borrowck/src/diagnostics/region_errors.rs
+++ b/compiler/rustc_borrowck/src/diagnostics/region_errors.rs
@@ -245,7 +245,7 @@
             let Trait(PolyTraitRef { trait_ref, span: trait_span, .. }, _) = bound else { return; };
             diag.span_note(
                 *trait_span,
-                format!("due to current limitations in the borrow checker, this implies a `'static` lifetime")
+                "due to current limitations in the borrow checker, this implies a `'static` lifetime"
             );
             let Some(generics_fn) = hir.get_generics(self.body.source.def_id().expect_local()) else { return; };
             let Def(_, trait_res_defid) = trait_ref.path.res else { return; };
@@ -277,7 +277,7 @@
         if suggestions.len() > 0 {
             suggestions.dedup();
             diag.multipart_suggestion_verbose(
-                format!("consider restricting the type parameter to the `'static` lifetime"),
+                "consider restricting the type parameter to the `'static` lifetime",
                 suggestions,
                 Applicability::MaybeIncorrect,
             );
diff --git a/compiler/rustc_borrowck/src/diagnostics/region_name.rs b/compiler/rustc_borrowck/src/diagnostics/region_name.rs
index 337af89..55d581b 100644
--- a/compiler/rustc_borrowck/src/diagnostics/region_name.rs
+++ b/compiler/rustc_borrowck/src/diagnostics/region_name.rs
@@ -27,7 +27,7 @@
 /// This helps to print the right kinds of diagnostics.
 #[derive(Debug, Clone)]
 pub(crate) enum RegionNameSource {
-    /// A bound (not free) region that was substituted at the def site (not an HRTB).
+    /// A bound (not free) region that was instantiated at the def site (not an HRTB).
     NamedEarlyBoundRegion(Span),
     /// A free region that the user has a name (`'a`) for.
     NamedFreeRegion(Span),
@@ -302,7 +302,7 @@
                     if free_region.bound_region.is_named() {
                         // A named region that is actually named.
                         Some(RegionName { name, source: RegionNameSource::NamedFreeRegion(span) })
-                    } else if let hir::IsAsync::Async = tcx.asyncness(self.mir_hir_id().owner) {
+                    } else if tcx.asyncness(self.mir_hir_id().owner).is_async() {
                         // If we spuriously thought that the region is named, we should let the
                         // system generate a true name for error messages. Currently this can
                         // happen if we have an elided name in an async fn for example: the
@@ -354,7 +354,7 @@
                     })
                 }
 
-                ty::BoundRegionKind::BrAnon(..) => None,
+                ty::BoundRegionKind::BrAnon => None,
             },
 
             ty::ReLateBound(..)
@@ -442,8 +442,8 @@
         span: Span,
         counter: usize,
     ) -> RegionNameHighlight {
-        let mut highlight = RegionHighlightMode::new(self.infcx.tcx);
-        highlight.highlighting_region_vid(needle_fr, counter);
+        let mut highlight = RegionHighlightMode::default();
+        highlight.highlighting_region_vid(self.infcx.tcx, needle_fr, counter);
         let type_name =
             self.infcx.extract_inference_diagnostics_data(ty.into(), Some(highlight)).name;
 
@@ -516,7 +516,7 @@
                         // be the same as those of the ADT.
                         // FIXME: We should be able to do something similar to
                         // match_adt_and_segment in this case.
-                        Res::Def(DefKind::TyAlias { .. }, _) => (),
+                        Res::Def(DefKind::TyAlias, _) => (),
                         _ => {
                             if let Some(last_segment) = path.segments.last() {
                                 if let Some(highlight) = self.match_adt_and_segment(
@@ -619,7 +619,7 @@
                     // programs, so we need to use delay_span_bug here. See #82126.
                     self.infcx.tcx.sess.delay_span_bug(
                         hir_arg.span(),
-                        format!("unmatched subst and hir arg: found {kind:?} vs {hir_arg:?}"),
+                        format!("unmatched arg and hir arg: found {kind:?} vs {hir_arg:?}"),
                     );
                 }
             }
@@ -804,8 +804,8 @@
             return None;
         }
 
-        let mut highlight = RegionHighlightMode::new(tcx);
-        highlight.highlighting_region_vid(fr, *self.next_region_name.try_borrow().unwrap());
+        let mut highlight = RegionHighlightMode::default();
+        highlight.highlighting_region_vid(tcx, fr, *self.next_region_name.try_borrow().unwrap());
         let type_name =
             self.infcx.extract_inference_diagnostics_data(yield_ty.into(), Some(highlight)).name;
 
diff --git a/compiler/rustc_borrowck/src/invalidation.rs b/compiler/rustc_borrowck/src/invalidation.rs
index df5e383..2faf1a5 100644
--- a/compiler/rustc_borrowck/src/invalidation.rs
+++ b/compiler/rustc_borrowck/src/invalidation.rs
@@ -159,7 +159,9 @@
 
                 self.mutate_place(location, *resume_arg, Deep);
             }
-            TerminatorKind::Resume | TerminatorKind::Return | TerminatorKind::GeneratorDrop => {
+            TerminatorKind::UnwindResume
+            | TerminatorKind::Return
+            | TerminatorKind::GeneratorDrop => {
                 // Invalidate all borrows of local places
                 let borrow_set = self.borrow_set;
                 let start = self.location_table.start_index(location);
@@ -200,7 +202,7 @@
                 }
             }
             TerminatorKind::Goto { target: _ }
-            | TerminatorKind::Terminate
+            | TerminatorKind::UnwindTerminate(_)
             | TerminatorKind::Unreachable
             | TerminatorKind::FalseEdge { real_target: _, imaginary_target: _ }
             | TerminatorKind::FalseUnwind { real_target: _, unwind: _ } => {
@@ -251,8 +253,8 @@
         match rvalue {
             &Rvalue::Ref(_ /*rgn*/, bk, place) => {
                 let access_kind = match bk {
-                    BorrowKind::Shallow => {
-                        (Shallow(Some(ArtificialField::ShallowBorrow)), Read(ReadKind::Borrow(bk)))
+                    BorrowKind::Fake => {
+                        (Shallow(Some(ArtificialField::FakeBorrow)), Read(ReadKind::Borrow(bk)))
                     }
                     BorrowKind::Shared => (Deep, Read(ReadKind::Borrow(bk))),
                     BorrowKind::Mut { .. } => {
@@ -374,8 +376,8 @@
                         // have already taken the reservation
                     }
 
-                    (Read(_), BorrowKind::Shallow | BorrowKind::Shared)
-                    | (Read(ReadKind::Borrow(BorrowKind::Shallow)), BorrowKind::Mut { .. }) => {
+                    (Read(_), BorrowKind::Fake | BorrowKind::Shared)
+                    | (Read(ReadKind::Borrow(BorrowKind::Fake)), BorrowKind::Mut { .. }) => {
                         // Reads don't invalidate shared or shallow borrows
                     }
 
@@ -420,7 +422,7 @@
 
             // only mutable borrows should be 2-phase
             assert!(match borrow.kind {
-                BorrowKind::Shared | BorrowKind::Shallow => false,
+                BorrowKind::Shared | BorrowKind::Fake => false,
                 BorrowKind::Mut { .. } => true,
             });
 
diff --git a/compiler/rustc_borrowck/src/lib.rs b/compiler/rustc_borrowck/src/lib.rs
index efe525c..1d17df8 100644
--- a/compiler/rustc_borrowck/src/lib.rs
+++ b/compiler/rustc_borrowck/src/lib.rs
@@ -11,7 +11,7 @@
 #![feature(trusted_step)]
 #![feature(try_blocks)]
 #![recursion_limit = "256"]
-#![cfg_attr(not(bootstrap), allow(internal_features))]
+#![allow(internal_features)]
 
 #[macro_use]
 extern crate rustc_middle;
@@ -603,7 +603,7 @@
 
     fn visit_statement_before_primary_effect(
         &mut self,
-        _results: &R,
+        _results: &mut R,
         flow_state: &Flows<'cx, 'tcx>,
         stmt: &'cx Statement<'tcx>,
         location: Location,
@@ -673,7 +673,7 @@
 
     fn visit_terminator_before_primary_effect(
         &mut self,
-        _results: &R,
+        _results: &mut R,
         flow_state: &Flows<'cx, 'tcx>,
         term: &'cx Terminator<'tcx>,
         loc: Location,
@@ -770,9 +770,9 @@
             }
 
             TerminatorKind::Goto { target: _ }
-            | TerminatorKind::Terminate
+            | TerminatorKind::UnwindTerminate(_)
             | TerminatorKind::Unreachable
-            | TerminatorKind::Resume
+            | TerminatorKind::UnwindResume
             | TerminatorKind::Return
             | TerminatorKind::GeneratorDrop
             | TerminatorKind::FalseEdge { real_target: _, imaginary_target: _ }
@@ -784,7 +784,7 @@
 
     fn visit_terminator_after_primary_effect(
         &mut self,
-        _results: &R,
+        _results: &mut R,
         flow_state: &Flows<'cx, 'tcx>,
         term: &'cx Terminator<'tcx>,
         loc: Location,
@@ -803,7 +803,9 @@
                 }
             }
 
-            TerminatorKind::Resume | TerminatorKind::Return | TerminatorKind::GeneratorDrop => {
+            TerminatorKind::UnwindResume
+            | TerminatorKind::Return
+            | TerminatorKind::GeneratorDrop => {
                 // Returning from the function implicitly kills storage for all locals and statics.
                 // Often, the storage will already have been killed by an explicit
                 // StorageDead, but we don't always emit those (notably on unwind paths),
@@ -815,7 +817,7 @@
                 }
             }
 
-            TerminatorKind::Terminate
+            TerminatorKind::UnwindTerminate(_)
             | TerminatorKind::Assert { .. }
             | TerminatorKind::Call { .. }
             | TerminatorKind::Drop { .. }
@@ -835,7 +837,7 @@
 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
 enum ArtificialField {
     ArrayLength,
-    ShallowBorrow,
+    FakeBorrow,
 }
 
 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
@@ -1074,18 +1076,18 @@
                     Control::Continue
                 }
 
-                (Read(_), BorrowKind::Shared | BorrowKind::Shallow)
-                | (Read(ReadKind::Borrow(BorrowKind::Shallow)), BorrowKind::Mut { .. }) => {
+                (Read(_), BorrowKind::Shared | BorrowKind::Fake)
+                | (Read(ReadKind::Borrow(BorrowKind::Fake)), BorrowKind::Mut { .. }) => {
                     Control::Continue
                 }
 
-                (Reservation(_), BorrowKind::Shallow | BorrowKind::Shared) => {
+                (Reservation(_), BorrowKind::Fake | BorrowKind::Shared) => {
                     // This used to be a future compatibility warning (to be
                     // disallowed on NLL). See rust-lang/rust#56254
                     Control::Continue
                 }
 
-                (Write(WriteKind::Move), BorrowKind::Shallow) => {
+                (Write(WriteKind::Move), BorrowKind::Fake) => {
                     // Handled by initialization checks.
                     Control::Continue
                 }
@@ -1193,8 +1195,8 @@
         match rvalue {
             &Rvalue::Ref(_ /*rgn*/, bk, place) => {
                 let access_kind = match bk {
-                    BorrowKind::Shallow => {
-                        (Shallow(Some(ArtificialField::ShallowBorrow)), Read(ReadKind::Borrow(bk)))
+                    BorrowKind::Fake => {
+                        (Shallow(Some(ArtificialField::FakeBorrow)), Read(ReadKind::Borrow(bk)))
                     }
                     BorrowKind::Shared => (Deep, Read(ReadKind::Borrow(bk))),
                     BorrowKind::Mut { .. } => {
@@ -1215,7 +1217,7 @@
                     flow_state,
                 );
 
-                let action = if bk == BorrowKind::Shallow {
+                let action = if bk == BorrowKind::Fake {
                     InitializationRequiringAction::MatchOn
                 } else {
                     InitializationRequiringAction::Borrow
@@ -1567,7 +1569,7 @@
 
             // only mutable borrows should be 2-phase
             assert!(match borrow.kind {
-                BorrowKind::Shared | BorrowKind::Shallow => false,
+                BorrowKind::Shared | BorrowKind::Fake => false,
                 BorrowKind::Mut { .. } => true,
             });
 
@@ -1801,6 +1803,7 @@
         for (place_base, elem) in place.iter_projections().rev() {
             match elem {
                 ProjectionElem::Index(_/*operand*/) |
+                ProjectionElem::Subtype(_) |
                 ProjectionElem::OpaqueCast(_) |
                 ProjectionElem::ConstantIndex { .. } |
                 // assigning to P[i] requires P to be valid.
@@ -2000,14 +2003,14 @@
                 | WriteKind::Replace
                 | WriteKind::StorageDeadOrDrop
                 | WriteKind::MutableBorrow(BorrowKind::Shared)
-                | WriteKind::MutableBorrow(BorrowKind::Shallow),
+                | WriteKind::MutableBorrow(BorrowKind::Fake),
             )
             | Write(
                 WriteKind::Move
                 | WriteKind::Replace
                 | WriteKind::StorageDeadOrDrop
                 | WriteKind::MutableBorrow(BorrowKind::Shared)
-                | WriteKind::MutableBorrow(BorrowKind::Shallow),
+                | WriteKind::MutableBorrow(BorrowKind::Fake),
             ) => {
                 if self.is_mutable(place.as_ref(), is_local_mutation_allowed).is_err()
                     && !self.has_buffered_errors()
@@ -2031,7 +2034,7 @@
                 return false;
             }
             Read(
-                ReadKind::Borrow(BorrowKind::Mut { .. } | BorrowKind::Shared | BorrowKind::Shallow)
+                ReadKind::Borrow(BorrowKind::Mut { .. } | BorrowKind::Shared | BorrowKind::Fake)
                 | ReadKind::Copy,
             ) => {
                 // Access authorized
@@ -2189,6 +2192,7 @@
                     | ProjectionElem::Index(..)
                     | ProjectionElem::ConstantIndex { .. }
                     | ProjectionElem::Subslice { .. }
+                    | ProjectionElem::Subtype(..)
                     | ProjectionElem::OpaqueCast { .. }
                     | ProjectionElem::Downcast(..) => {
                         let upvar_field_projection = self.is_upvar_field_projection(place);
diff --git a/compiler/rustc_borrowck/src/nll.rs b/compiler/rustc_borrowck/src/nll.rs
index 679a197..3f60f5a 100644
--- a/compiler/rustc_borrowck/src/nll.rs
+++ b/compiler/rustc_borrowck/src/nll.rs
@@ -10,6 +10,7 @@
     Body, ClosureOutlivesSubject, ClosureRegionRequirements, LocalKind, Location, Promoted,
     START_BLOCK,
 };
+use rustc_middle::ty::print::with_no_trimmed_paths;
 use rustc_middle::ty::{self, OpaqueHiddenType, TyCtxt};
 use rustc_span::symbol::sym;
 use std::env;
@@ -441,7 +442,10 @@
         let subject = match req.subject {
             ClosureOutlivesSubject::Region(subject) => format!("{subject:?}"),
             ClosureOutlivesSubject::Ty(ty) => {
-                format!("{:?}", ty.instantiate(tcx, |vid| ty::Region::new_var(tcx, vid)))
+                with_no_trimmed_paths!(format!(
+                    "{}",
+                    ty.instantiate(tcx, |vid| ty::Region::new_var(tcx, vid))
+                ))
             }
         };
         with_msg(format!("where {}: {:?}", subject, req.outlived_free_region,))?;
diff --git a/compiler/rustc_borrowck/src/places_conflict.rs b/compiler/rustc_borrowck/src/places_conflict.rs
index c02f6f3..777ebf0 100644
--- a/compiler/rustc_borrowck/src/places_conflict.rs
+++ b/compiler/rustc_borrowck/src/places_conflict.rs
@@ -204,7 +204,7 @@
 
             match (elem, &base_ty.kind(), access) {
                 (_, _, Shallow(Some(ArtificialField::ArrayLength)))
-                | (_, _, Shallow(Some(ArtificialField::ShallowBorrow))) => {
+                | (_, _, Shallow(Some(ArtificialField::FakeBorrow))) => {
                     // The array length is like additional fields on the
                     // type; it does not overlap any existing data there.
                     // Furthermore, if cannot actually be a prefix of any
@@ -249,6 +249,7 @@
                 | (ProjectionElem::ConstantIndex { .. }, _, _)
                 | (ProjectionElem::Subslice { .. }, _, _)
                 | (ProjectionElem::OpaqueCast { .. }, _, _)
+                | (ProjectionElem::Subtype(_), _, _)
                 | (ProjectionElem::Downcast { .. }, _, _) => {
                     // Recursive case. This can still be disjoint on a
                     // further iteration if this a shallow access and
@@ -272,10 +273,10 @@
     // If the second example, where we did, then we still know
     // that the borrow can access a *part* of our place that
     // our access cares about, so we still have a conflict.
-    if borrow_kind == BorrowKind::Shallow
+    if borrow_kind == BorrowKind::Fake
         && borrow_place.projection.len() < access_place.projection.len()
     {
-        debug!("borrow_conflicts_with_place: shallow borrow");
+        debug!("borrow_conflicts_with_place: fake borrow");
         false
     } else {
         debug!("borrow_conflicts_with_place: full borrow, CONFLICT");
@@ -508,6 +509,7 @@
             | ProjectionElem::Field(..)
             | ProjectionElem::Index(..)
             | ProjectionElem::ConstantIndex { .. }
+            | ProjectionElem::Subtype(_)
             | ProjectionElem::OpaqueCast { .. }
             | ProjectionElem::Subslice { .. }
             | ProjectionElem::Downcast(..),
diff --git a/compiler/rustc_borrowck/src/prefixes.rs b/compiler/rustc_borrowck/src/prefixes.rs
index 6f28134..e9c9709 100644
--- a/compiler/rustc_borrowck/src/prefixes.rs
+++ b/compiler/rustc_borrowck/src/prefixes.rs
@@ -89,6 +89,9 @@
                             cursor = cursor_base;
                             continue 'cursor;
                         }
+                        ProjectionElem::Subtype(..) => {
+                            panic!("Subtype projection is not allowed before borrow check")
+                        }
                         ProjectionElem::Deref => {
                             // (handled below)
                         }
diff --git a/compiler/rustc_borrowck/src/region_infer/mod.rs b/compiler/rustc_borrowck/src/region_infer/mod.rs
index b8cd94e..8529356 100644
--- a/compiler/rustc_borrowck/src/region_infer/mod.rs
+++ b/compiler/rustc_borrowck/src/region_infer/mod.rs
@@ -2249,7 +2249,14 @@
     }
 
     pub(crate) fn universe_info(&self, universe: ty::UniverseIndex) -> UniverseInfo<'tcx> {
-        self.universe_causes[&universe].clone()
+        // Query canonicalization can create local superuniverses (for example in
+        // `InferCtx::query_response_instantiation_guess`), but they don't have an associated
+        // `UniverseInfo` explaining why they were created.
+        // This can cause ICEs if these causes are accessed in diagnostics, for example in issue
+        // #114907 where this happens via liveness and dropck outlives results.
+        // Therefore, we return a default value in case that happens, which should at worst emit a
+        // suboptimal error, instead of the ICE.
+        self.universe_causes.get(&universe).cloned().unwrap_or_else(|| UniverseInfo::other())
     }
 
     /// Tries to find the terminator of the loop in which the region 'r' resides.
diff --git a/compiler/rustc_borrowck/src/renumber.rs b/compiler/rustc_borrowck/src/renumber.rs
index 4c69ea8..5d6f5cc 100644
--- a/compiler/rustc_borrowck/src/renumber.rs
+++ b/compiler/rustc_borrowck/src/renumber.rs
@@ -4,11 +4,10 @@
 use rustc_index::IndexSlice;
 use rustc_infer::infer::NllRegionVariableOrigin;
 use rustc_middle::mir::visit::{MutVisitor, TyContext};
-use rustc_middle::mir::Constant;
-use rustc_middle::mir::{Body, Location, Promoted};
+use rustc_middle::mir::{Body, ConstOperand, Location, Promoted};
 use rustc_middle::ty::GenericArgsRef;
 use rustc_middle::ty::{self, Ty, TyCtxt, TypeFoldable};
-use rustc_span::{Span, Symbol};
+use rustc_span::Symbol;
 
 /// Replaces all free regions appearing in the MIR with fresh
 /// inference variables, returning the number of variables created.
@@ -30,20 +29,14 @@
 }
 
 #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
-pub(crate) enum BoundRegionInfo {
-    Name(Symbol),
-    Span(Span),
-}
-
-#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
 pub(crate) enum RegionCtxt {
     Location(Location),
     TyContext(TyContext),
     Free(Symbol),
-    Bound(BoundRegionInfo),
-    LateBound(BoundRegionInfo),
+    Bound(Symbol),
+    LateBound(Symbol),
     Existential(Option<Symbol>),
-    Placeholder(BoundRegionInfo),
+    Placeholder(Symbol),
     Unknown,
 }
 
@@ -117,9 +110,9 @@
     }
 
     #[instrument(skip(self), level = "debug")]
-    fn visit_constant(&mut self, constant: &mut Constant<'tcx>, location: Location) {
-        let literal = constant.literal;
-        constant.literal = self.renumber_regions(literal, || RegionCtxt::Location(location));
+    fn visit_constant(&mut self, constant: &mut ConstOperand<'tcx>, location: Location) {
+        let const_ = constant.const_;
+        constant.const_ = self.renumber_regions(const_, || RegionCtxt::Location(location));
         debug!("constant: {:#?}", constant);
     }
 }
diff --git a/compiler/rustc_borrowck/src/session_diagnostics.rs b/compiler/rustc_borrowck/src/session_diagnostics.rs
index d1d8cfa..ca3ccf4 100644
--- a/compiler/rustc_borrowck/src/session_diagnostics.rs
+++ b/compiler/rustc_borrowck/src/session_diagnostics.rs
@@ -452,3 +452,10 @@
     #[note(borrowck_ty_no_impl_copy)]
     Note { is_partial_move: bool, ty: Ty<'tcx>, place: &'a str },
 }
+
+#[derive(Diagnostic)]
+#[diag(borrowck_simd_shuffle_last_const)]
+pub(crate) struct SimdShuffleLastConst {
+    #[primary_span]
+    pub span: Span,
+}
diff --git a/compiler/rustc_borrowck/src/type_check/canonical.rs b/compiler/rustc_borrowck/src/type_check/canonical.rs
index 16f5e68..b7adc31 100644
--- a/compiler/rustc_borrowck/src/type_check/canonical.rs
+++ b/compiler/rustc_borrowck/src/type_check/canonical.rs
@@ -9,7 +9,7 @@
 use rustc_trait_selection::traits::query::type_op::{self, TypeOpOutput};
 use rustc_trait_selection::traits::ObligationCause;
 
-use crate::diagnostics::{ToUniverseInfo, UniverseInfo};
+use crate::diagnostics::ToUniverseInfo;
 
 use super::{Locations, NormalizeLocation, TypeChecker};
 
@@ -46,13 +46,11 @@
             self.push_region_constraints(locations, category, data);
         }
 
+        // If the query has created new universes and errors are going to be emitted, register the
+        // cause of these new universes for improved diagnostics.
         let universe = self.infcx.universe();
-
-        if old_universe != universe {
-            let universe_info = match error_info {
-                Some(error_info) => error_info.to_universe_info(old_universe),
-                None => UniverseInfo::other(),
-            };
+        if old_universe != universe && let Some(error_info) = error_info {
+            let universe_info = error_info.to_universe_info(old_universe);
             for u in (old_universe + 1)..=universe {
                 self.borrowck_context.constraints.universe_causes.insert(u, universe_info.clone());
             }
@@ -69,15 +67,8 @@
     where
         T: TypeFoldable<TyCtxt<'tcx>>,
     {
-        let old_universe = self.infcx.universe();
-
         let (instantiated, _) =
             self.infcx.instantiate_canonical_with_fresh_inference_vars(span, canonical);
-
-        for u in (old_universe + 1)..=self.infcx.universe() {
-            self.borrowck_context.constraints.universe_causes.insert(u, UniverseInfo::other());
-        }
-
         instantiated
     }
 
diff --git a/compiler/rustc_borrowck/src/type_check/mod.rs b/compiler/rustc_borrowck/src/type_check/mod.rs
index 50d875d..1f383e5 100644
--- a/compiler/rustc_borrowck/src/type_check/mod.rs
+++ b/compiler/rustc_borrowck/src/type_check/mod.rs
@@ -50,7 +50,7 @@
 use rustc_mir_dataflow::move_paths::MoveData;
 use rustc_mir_dataflow::ResultsCursor;
 
-use crate::session_diagnostics::MoveUnsized;
+use crate::session_diagnostics::{MoveUnsized, SimdShuffleLastConst};
 use crate::{
     borrow_set::BorrowSet,
     constraints::{OutlivesConstraint, OutlivesConstraintSet},
@@ -163,10 +163,6 @@
 
     debug!(?normalized_inputs_and_output);
 
-    for u in ty::UniverseIndex::ROOT..=infcx.universe() {
-        constraints.universe_causes.insert(u, UniverseInfo::other());
-    }
-
     let mut borrowck_context = BorrowCheckContext {
         universal_regions,
         location_table,
@@ -306,11 +302,11 @@
         self.sanitize_place(place, location, context);
     }
 
-    fn visit_constant(&mut self, constant: &Constant<'tcx>, location: Location) {
+    fn visit_constant(&mut self, constant: &ConstOperand<'tcx>, location: Location) {
         debug!(?constant, ?location, "visit_constant");
 
         self.super_constant(constant, location);
-        let ty = self.sanitize_type(constant, constant.literal.ty());
+        let ty = self.sanitize_type(constant, constant.const_.ty());
 
         self.cx.infcx.tcx.for_each_free_region(&ty, |live_region| {
             let live_region_vid =
@@ -332,7 +328,7 @@
 
         if let Some(annotation_index) = constant.user_ty {
             if let Err(terr) = self.cx.relate_type_and_user_type(
-                constant.literal.ty(),
+                constant.const_.ty(),
                 ty::Variance::Invariant,
                 &UserTypeProjection { base: annotation_index, projs: vec![] },
                 locations,
@@ -344,20 +340,20 @@
                     constant,
                     "bad constant user type {:?} vs {:?}: {:?}",
                     annotation,
-                    constant.literal.ty(),
+                    constant.const_.ty(),
                     terr,
                 );
             }
         } else {
             let tcx = self.tcx();
-            let maybe_uneval = match constant.literal {
-                ConstantKind::Ty(ct) => match ct.kind() {
+            let maybe_uneval = match constant.const_ {
+                Const::Ty(ct) => match ct.kind() {
                     ty::ConstKind::Unevaluated(_) => {
-                        bug!("should not encounter unevaluated ConstantKind::Ty here, got {:?}", ct)
+                        bug!("should not encounter unevaluated Const::Ty here, got {:?}", ct)
                     }
                     _ => None,
                 },
-                ConstantKind::Unevaluated(uv, _) => Some(uv),
+                Const::Unevaluated(uv, _) => Some(uv),
                 _ => None,
             };
 
@@ -388,7 +384,7 @@
                     check_err(self, promoted_body, ty, promoted_ty);
                 } else {
                     self.cx.ascribe_user_type(
-                        constant.literal.ty(),
+                        constant.const_.ty(),
                         UserType::TypeOf(uv.def, UserArgs { args: uv.args, user_self_ty: None }),
                         locations.span(&self.cx.body),
                     );
@@ -396,7 +392,7 @@
             } else if let Some(static_def_id) = constant.check_static_ptr(tcx) {
                 let unnormalized_ty = tcx.type_of(static_def_id).instantiate_identity();
                 let normalized_ty = self.cx.normalize(unnormalized_ty, locations);
-                let literal_ty = constant.literal.ty().builtin_deref(true).unwrap().ty;
+                let literal_ty = constant.const_.ty().builtin_deref(true).unwrap().ty;
 
                 if let Err(terr) = self.cx.eq_types(
                     literal_ty,
@@ -408,7 +404,7 @@
                 }
             }
 
-            if let ty::FnDef(def_id, args) = *constant.literal.ty().kind() {
+            if let ty::FnDef(def_id, args) = *constant.const_.ty().kind() {
                 let instantiated_predicates = tcx.predicates_of(def_id).instantiate(tcx, args);
                 self.cx.normalize_and_prove_instantiated_predicates(
                     def_id,
@@ -720,6 +716,9 @@
                 }
                 PlaceTy::from_ty(fty)
             }
+            ProjectionElem::Subtype(_) => {
+                bug!("ProjectionElem::Subtype shouldn't exist in borrowck")
+            }
             ProjectionElem::OpaqueCast(ty) => {
                 let ty = self.sanitize_type(place, ty);
                 let ty = self.cx.normalize(ty, location);
@@ -749,7 +748,7 @@
             PlaceContext::MutatingUse(_) => ty::Invariant,
             PlaceContext::NonUse(StorageDead | StorageLive | VarDebugInfo) => ty::Invariant,
             PlaceContext::NonMutatingUse(
-                Inspect | Copy | Move | PlaceMention | SharedBorrow | ShallowBorrow | AddressOf
+                Inspect | Copy | Move | PlaceMention | SharedBorrow | FakeBorrow | AddressOf
                 | Projection,
             ) => ty::Covariant,
             PlaceContext::NonUse(AscribeUserTy(variance)) => variance,
@@ -1011,7 +1010,7 @@
     }
 
     pub(super) fn register_predefined_opaques_in_new_solver(&mut self) {
-        // OK to use the identity substitutions for each opaque type key, since
+        // OK to use the identity arguments for each opaque type key, since
         // we remap opaques from HIR typeck back to their definition params.
         let opaques: Vec<_> = self
             .infcx
@@ -1333,8 +1332,8 @@
         debug!("terminator kind: {:?}", term.kind);
         match &term.kind {
             TerminatorKind::Goto { .. }
-            | TerminatorKind::Resume
-            | TerminatorKind::Terminate
+            | TerminatorKind::UnwindResume
+            | TerminatorKind::UnwindTerminate(_)
             | TerminatorKind::Return
             | TerminatorKind::GeneratorDrop
             | TerminatorKind::Unreachable
@@ -1371,14 +1370,13 @@
                     }
                 };
                 let (sig, map) = tcx.replace_late_bound_regions(sig, |br| {
-                    use crate::renumber::{BoundRegionInfo, RegionCtxt};
+                    use crate::renumber::RegionCtxt;
 
                     let region_ctxt_fn = || {
                         let reg_info = match br.kind {
-                            ty::BoundRegionKind::BrAnon(Some(span)) => BoundRegionInfo::Span(span),
-                            ty::BoundRegionKind::BrAnon(..) => BoundRegionInfo::Name(sym::anon),
-                            ty::BoundRegionKind::BrNamed(_, name) => BoundRegionInfo::Name(name),
-                            ty::BoundRegionKind::BrEnv => BoundRegionInfo::Name(sym::env),
+                            ty::BoundRegionKind::BrAnon => sym::anon,
+                            ty::BoundRegionKind::BrNamed(_, name) => name,
+                            ty::BoundRegionKind::BrEnv => sym::env,
                         };
 
                         RegionCtxt::LateBound(reg_info)
@@ -1430,7 +1428,7 @@
                         .add_element(region_vid, term_location);
                 }
 
-                self.check_call_inputs(body, term, &sig, args, term_location, *call_source);
+                self.check_call_inputs(body, term, func, &sig, args, term_location, *call_source);
             }
             TerminatorKind::Assert { cond, msg, .. } => {
                 self.check_operand(cond, term_location);
@@ -1550,25 +1548,36 @@
         }
     }
 
+    #[instrument(level = "debug", skip(self, body, term, func, term_location, call_source))]
     fn check_call_inputs(
         &mut self,
         body: &Body<'tcx>,
         term: &Terminator<'tcx>,
+        func: &Operand<'tcx>,
         sig: &ty::FnSig<'tcx>,
         args: &[Operand<'tcx>],
         term_location: Location,
         call_source: CallSource,
     ) {
-        debug!("check_call_inputs({:?}, {:?})", sig, args);
         if args.len() < sig.inputs().len() || (args.len() > sig.inputs().len() && !sig.c_variadic) {
             span_mirbug!(self, term, "call to {:?} with wrong # of args", sig);
         }
 
-        let func_ty = if let TerminatorKind::Call { func, .. } = &term.kind {
-            Some(func.ty(body, self.infcx.tcx))
-        } else {
-            None
-        };
+        let func_ty = func.ty(body, self.infcx.tcx);
+        if let ty::FnDef(def_id, _) = *func_ty.kind() {
+            if self.tcx().is_intrinsic(def_id) {
+                match self.tcx().item_name(def_id) {
+                    sym::simd_shuffle => {
+                        if !matches!(args[2], Operand::Constant(_)) {
+                            self.tcx()
+                                .sess
+                                .emit_err(SimdShuffleLastConst { span: term.source_info.span });
+                        }
+                    }
+                    _ => {}
+                }
+            }
+        }
         debug!(?func_ty);
 
         for (n, (fn_arg, op_arg)) in iter::zip(sig.inputs(), args).enumerate() {
@@ -1576,7 +1585,7 @@
 
             let op_arg_ty = self.normalize(op_arg_ty, term_location);
             let category = if call_source.from_hir_call() {
-                ConstraintCategory::CallArgument(self.infcx.tcx.erase_regions(func_ty))
+                ConstraintCategory::CallArgument(Some(self.infcx.tcx.erase_regions(func_ty)))
             } else {
                 ConstraintCategory::Boring
             };
@@ -1608,12 +1617,12 @@
                     self.assert_iscleanup(body, block_data, *target, is_cleanup);
                 }
             }
-            TerminatorKind::Resume => {
+            TerminatorKind::UnwindResume => {
                 if !is_cleanup {
                     span_mirbug!(self, block_data, "resume on non-cleanup block!")
                 }
             }
-            TerminatorKind::Terminate => {
+            TerminatorKind::UnwindTerminate(_) => {
                 if !is_cleanup {
                     span_mirbug!(self, block_data, "abort on non-cleanup block!")
                 }
@@ -1697,7 +1706,7 @@
                     span_mirbug!(self, ctxt, "unwind on cleanup block")
                 }
             }
-            UnwindAction::Unreachable | UnwindAction::Terminate => (),
+            UnwindAction::Unreachable | UnwindAction::Terminate(_) => (),
         }
     }
 
@@ -1794,9 +1803,9 @@
         debug!(?op, ?location, "check_operand");
 
         if let Operand::Constant(constant) = op {
-            let maybe_uneval = match constant.literal {
-                ConstantKind::Val(..) | ConstantKind::Ty(_) => None,
-                ConstantKind::Unevaluated(uv, _) => Some(uv),
+            let maybe_uneval = match constant.const_ {
+                Const::Val(..) | Const::Ty(_) => None,
+                Const::Unevaluated(uv, _) => Some(uv),
             };
 
             if let Some(uv) = maybe_uneval {
@@ -2557,6 +2566,9 @@
                 | ProjectionElem::Subslice { .. } => {
                     // other field access
                 }
+                ProjectionElem::Subtype(_) => {
+                    bug!("ProjectionElem::Subtype shouldn't exist in borrowck")
+                }
             }
         }
     }
diff --git a/compiler/rustc_borrowck/src/type_check/relate_tys.rs b/compiler/rustc_borrowck/src/type_check/relate_tys.rs
index e0c6295..c1f82e1 100644
--- a/compiler/rustc_borrowck/src/type_check/relate_tys.rs
+++ b/compiler/rustc_borrowck/src/type_check/relate_tys.rs
@@ -11,7 +11,7 @@
 
 use crate::constraints::OutlivesConstraint;
 use crate::diagnostics::UniverseInfo;
-use crate::renumber::{BoundRegionInfo, RegionCtxt};
+use crate::renumber::RegionCtxt;
 use crate::type_check::{InstantiateOpaqueType, Locations, TypeChecker};
 
 impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
@@ -126,10 +126,9 @@
             .placeholder_region(self.type_checker.infcx, placeholder);
 
         let reg_info = match placeholder.bound.kind {
-            ty::BoundRegionKind::BrAnon(Some(span)) => BoundRegionInfo::Span(span),
-            ty::BoundRegionKind::BrAnon(..) => BoundRegionInfo::Name(sym::anon),
-            ty::BoundRegionKind::BrNamed(_, name) => BoundRegionInfo::Name(name),
-            ty::BoundRegionKind::BrEnv => BoundRegionInfo::Name(sym::env),
+            ty::BoundRegionKind::BrAnon => sym::anon,
+            ty::BoundRegionKind::BrNamed(_, name) => name,
+            ty::BoundRegionKind::BrEnv => sym::env,
         };
 
         if cfg!(debug_assertions) {
diff --git a/compiler/rustc_borrowck/src/universal_regions.rs b/compiler/rustc_borrowck/src/universal_regions.rs
index 56945f4..af437f3 100644
--- a/compiler/rustc_borrowck/src/universal_regions.rs
+++ b/compiler/rustc_borrowck/src/universal_regions.rs
@@ -21,13 +21,14 @@
 use rustc_index::IndexVec;
 use rustc_infer::infer::NllRegionVariableOrigin;
 use rustc_middle::ty::fold::TypeFoldable;
+use rustc_middle::ty::print::with_no_trimmed_paths;
 use rustc_middle::ty::{self, InlineConstArgs, InlineConstArgsParts, RegionVid, Ty, TyCtxt};
 use rustc_middle::ty::{GenericArgs, GenericArgsRef};
 use rustc_span::symbol::{kw, sym};
 use rustc_span::Symbol;
 use std::iter;
 
-use crate::renumber::{BoundRegionInfo, RegionCtxt};
+use crate::renumber::RegionCtxt;
 use crate::BorrowckInferCtxt;
 
 #[derive(Debug)]
@@ -332,10 +333,16 @@
     pub(crate) fn annotate(&self, tcx: TyCtxt<'tcx>, err: &mut Diagnostic) {
         match self.defining_ty {
             DefiningTy::Closure(def_id, args) => {
+                let v = with_no_trimmed_paths!(
+                    args[tcx.generics_of(def_id).parent_count..]
+                        .iter()
+                        .map(|arg| arg.to_string())
+                        .collect::<Vec<_>>()
+                );
                 err.note(format!(
-                    "defining type: {} with closure args {:#?}",
+                    "defining type: {} with closure args [\n    {},\n]",
                     tcx.def_path_str_with_args(def_id, args),
-                    &args[tcx.generics_of(def_id).parent_count..],
+                    v.join(",\n    "),
                 ));
 
                 // FIXME: It'd be nice to print the late-bound regions
@@ -348,10 +355,16 @@
                 });
             }
             DefiningTy::Generator(def_id, args, _) => {
+                let v = with_no_trimmed_paths!(
+                    args[tcx.generics_of(def_id).parent_count..]
+                        .iter()
+                        .map(|arg| arg.to_string())
+                        .collect::<Vec<_>>()
+                );
                 err.note(format!(
-                    "defining type: {} with generator args {:#?}",
+                    "defining type: {} with generator args [\n    {},\n]",
                     tcx.def_path_str_with_args(def_id, args),
-                    &args[tcx.generics_of(def_id).parent_count..],
+                    v.join(",\n    "),
                 ));
 
                 // FIXME: As above, we'd like to print out the region
@@ -433,9 +446,7 @@
                     if !indices.indices.contains_key(&r) {
                         let region_vid = {
                             let name = r.get_name_or_anon();
-                            self.infcx.next_nll_region_var(FR, || {
-                                RegionCtxt::LateBound(BoundRegionInfo::Name(name))
-                            })
+                            self.infcx.next_nll_region_var(FR, || RegionCtxt::LateBound(name))
                         };
 
                         debug!(?region_vid);
@@ -467,9 +478,7 @@
             if !indices.indices.contains_key(&r) {
                 let region_vid = {
                     let name = r.get_name_or_anon();
-                    self.infcx.next_nll_region_var(FR, || {
-                        RegionCtxt::LateBound(BoundRegionInfo::Name(name))
-                    })
+                    self.infcx.next_nll_region_var(FR, || RegionCtxt::LateBound(name))
                 };
 
                 debug!(?region_vid);
@@ -567,7 +576,7 @@
                 }
             }
 
-            BodyOwnerKind::Const | BodyOwnerKind::Static(..) => {
+            BodyOwnerKind::Const { .. } | BodyOwnerKind::Static(..) => {
                 let identity_args = GenericArgs::identity_for_item(tcx, typeck_root_def_id);
                 if self.mir_def.to_def_id() == typeck_root_def_id {
                     let args =
@@ -630,10 +639,9 @@
         };
 
         let global_mapping = iter::once((tcx.lifetimes.re_static, fr_static));
-        let subst_mapping =
-            iter::zip(identity_args.regions(), fr_args.regions().map(|r| r.as_var()));
+        let arg_mapping = iter::zip(identity_args.regions(), fr_args.regions().map(|r| r.as_var()));
 
-        UniversalRegionIndices { indices: global_mapping.chain(subst_mapping).collect(), fr_static }
+        UniversalRegionIndices { indices: global_mapping.chain(arg_mapping).collect(), fr_static }
     }
 
     fn compute_inputs_and_output(
@@ -783,7 +791,7 @@
                     _ => sym::anon,
                 };
 
-                self.next_nll_region_var(origin, || RegionCtxt::Bound(BoundRegionInfo::Name(name)))
+                self.next_nll_region_var(origin, || RegionCtxt::Bound(name))
             };
 
             indices.insert_late_bound_region(liberated_region, region_vid.as_var());
@@ -813,9 +821,7 @@
             if !indices.indices.contains_key(&r) {
                 let region_vid = {
                     let name = r.get_name_or_anon();
-                    self.next_nll_region_var(FR, || {
-                        RegionCtxt::LateBound(BoundRegionInfo::Name(name))
-                    })
+                    self.next_nll_region_var(FR, || RegionCtxt::LateBound(name))
                 };
 
                 debug!(?region_vid);
@@ -835,9 +841,7 @@
             if !indices.indices.contains_key(&r) {
                 let region_vid = {
                     let name = r.get_name_or_anon();
-                    self.next_nll_region_var(FR, || {
-                        RegionCtxt::LateBound(BoundRegionInfo::Name(name))
-                    })
+                    self.next_nll_region_var(FR, || RegionCtxt::LateBound(name))
                 };
 
                 indices.insert_late_bound_region(r, region_vid.as_var());
diff --git a/compiler/rustc_builtin_macros/messages.ftl b/compiler/rustc_builtin_macros/messages.ftl
index 8d8db4c..207ae8a 100644
--- a/compiler/rustc_builtin_macros/messages.ftl
+++ b/compiler/rustc_builtin_macros/messages.ftl
@@ -137,6 +137,8 @@
     .label = positional arguments must be before named arguments
     .named_args = named argument
 
+builtin_macros_format_remove_raw_ident = remove the `r#`
+
 builtin_macros_format_requires_string = requires at least a format string argument
 
 builtin_macros_format_string_invalid = invalid format string: {$desc}
@@ -165,6 +167,8 @@
 builtin_macros_format_unused_args = multiple unused formatting arguments
     .label = multiple missing formatting specifiers
 
+builtin_macros_format_use_positional = consider using a positional formatting argument instead
+
 builtin_macros_global_asm_clobber_abi = `clobber_abi` cannot be used with `global_asm!`
 
 builtin_macros_invalid_crate_attribute = invalid crate attribute
@@ -205,8 +209,6 @@
 
 builtin_macros_should_panic = functions using `#[should_panic]` must return `()`
 
-builtin_macros_sugg = consider using a positional formatting argument instead
-
 builtin_macros_test_arg_non_lifetime = functions used as tests can not have any non-lifetime generic parameters
 
 builtin_macros_test_args = functions used as tests can not have any arguments
diff --git a/compiler/rustc_builtin_macros/src/assert/context.rs b/compiler/rustc_builtin_macros/src/assert/context.rs
index bda4731..0682d48 100644
--- a/compiler/rustc_builtin_macros/src/assert/context.rs
+++ b/compiler/rustc_builtin_macros/src/assert/context.rs
@@ -241,7 +241,7 @@
                 self.manage_cond_expr(prefix);
                 self.manage_cond_expr(suffix);
             }
-            ExprKind::Let(_, local_expr, _) => {
+            ExprKind::Let(_, local_expr, _, _) => {
                 self.manage_cond_expr(local_expr);
             }
             ExprKind::Match(local_expr, _) => {
diff --git a/compiler/rustc_builtin_macros/src/deriving/bounds.rs b/compiler/rustc_builtin_macros/src/deriving/bounds.rs
index 2c8e6f9..8027ca2 100644
--- a/compiler/rustc_builtin_macros/src/deriving/bounds.rs
+++ b/compiler/rustc_builtin_macros/src/deriving/bounds.rs
@@ -41,7 +41,7 @@
         path: path_std!(marker::ConstParamTy),
         skip_path_as_bound: false,
         needs_copy_as_bound_if_packed: false,
-        additional_bounds: Vec::new(),
+        additional_bounds: vec![ty::Ty::Path(path_std!(cmp::Eq))],
         supports_unions: false,
         methods: Vec::new(),
         associated_types: Vec::new(),
diff --git a/compiler/rustc_builtin_macros/src/deriving/cmp/eq.rs b/compiler/rustc_builtin_macros/src/deriving/cmp/eq.rs
index c78a0eb..a000e48 100644
--- a/compiler/rustc_builtin_macros/src/deriving/cmp/eq.rs
+++ b/compiler/rustc_builtin_macros/src/deriving/cmp/eq.rs
@@ -18,6 +18,20 @@
     is_const: bool,
 ) {
     let span = cx.with_def_site_ctxt(span);
+
+    let structural_trait_def = TraitDef {
+        span,
+        path: path_std!(marker::StructuralEq),
+        skip_path_as_bound: true, // crucial!
+        needs_copy_as_bound_if_packed: false,
+        additional_bounds: Vec::new(),
+        supports_unions: true,
+        methods: Vec::new(),
+        associated_types: Vec::new(),
+        is_const: false,
+    };
+    structural_trait_def.expand(cx, mitem, item, push);
+
     let trait_def = TraitDef {
         span,
         path: path_std!(cmp::Eq),
@@ -34,7 +48,7 @@
             attributes: thin_vec![
                 cx.attr_word(sym::inline, span),
                 cx.attr_nested_word(sym::doc, sym::hidden, span),
-                cx.attr_word(sym::no_coverage, span)
+                cx.attr_nested_word(sym::coverage, sym::off, span)
             ],
             fieldless_variants_strategy: FieldlessVariantsStrategy::Unify,
             combine_substructure: combine_substructure(Box::new(|a, b, c| {
@@ -44,9 +58,6 @@
         associated_types: Vec::new(),
         is_const,
     };
-
-    super::inject_impl_of_structural_trait(cx, span, item, path_std!(marker::StructuralEq), push);
-
     trait_def.expand_ext(cx, mitem, item, push, true)
 }
 
diff --git a/compiler/rustc_builtin_macros/src/deriving/cmp/partial_eq.rs b/compiler/rustc_builtin_macros/src/deriving/cmp/partial_eq.rs
index a71ecc5..a170468 100644
--- a/compiler/rustc_builtin_macros/src/deriving/cmp/partial_eq.rs
+++ b/compiler/rustc_builtin_macros/src/deriving/cmp/partial_eq.rs
@@ -72,13 +72,20 @@
         BlockOrExpr::new_expr(expr)
     }
 
-    super::inject_impl_of_structural_trait(
-        cx,
+    let structural_trait_def = TraitDef {
         span,
-        item,
-        path_std!(marker::StructuralPartialEq),
-        push,
-    );
+        path: path_std!(marker::StructuralPartialEq),
+        skip_path_as_bound: true, // crucial!
+        needs_copy_as_bound_if_packed: false,
+        additional_bounds: Vec::new(),
+        // We really don't support unions, but that's already checked by the impl generated below;
+        // a second check here would lead to redundant error messages.
+        supports_unions: true,
+        methods: Vec::new(),
+        associated_types: Vec::new(),
+        is_const: false,
+    };
+    structural_trait_def.expand(cx, mitem, item, push);
 
     // No need to generate `ne`, the default suffices, and not generating it is
     // faster.
diff --git a/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs b/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs
index 6597ee3..7252658 100644
--- a/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs
+++ b/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs
@@ -88,7 +88,7 @@
 //!
 //! When generating the `expr` for the `A` impl, the `SubstructureFields` is
 //!
-//! ```{.text}
+//! ```text
 //! Struct(vec![FieldInfo {
 //!            span: <span of x>
 //!            name: Some(<ident of x>),
@@ -99,7 +99,7 @@
 //!
 //! For the `B` impl, called with `B(a)` and `B(b)`,
 //!
-//! ```{.text}
+//! ```text
 //! Struct(vec![FieldInfo {
 //!           span: <span of `i32`>,
 //!           name: None,
@@ -113,7 +113,7 @@
 //! When generating the `expr` for a call with `self == C0(a)` and `other
 //! == C0(b)`, the SubstructureFields is
 //!
-//! ```{.text}
+//! ```text
 //! EnumMatching(0, <ast::Variant for C0>,
 //!              vec![FieldInfo {
 //!                 span: <span of i32>
@@ -125,7 +125,7 @@
 //!
 //! For `C1 {x}` and `C1 {x}`,
 //!
-//! ```{.text}
+//! ```text
 //! EnumMatching(1, <ast::Variant for C1>,
 //!              vec![FieldInfo {
 //!                 span: <span of x>
@@ -137,7 +137,7 @@
 //!
 //! For the tags,
 //!
-//! ```{.text}
+//! ```text
 //! EnumTag(
 //!     &[<ident of self tag>, <ident of other tag>], <expr to combine with>)
 //! ```
@@ -149,7 +149,7 @@
 //!
 //! A static method on the types above would result in,
 //!
-//! ```{.text}
+//! ```text
 //! StaticStruct(<ast::VariantData of A>, Named(vec![(<ident of x>, <span of x>)]))
 //!
 //! StaticStruct(<ast::VariantData of B>, Unnamed(vec![<span of x>]))
@@ -711,7 +711,9 @@
                             .collect();
 
                         // Require the current trait.
-                        bounds.push(cx.trait_bound(trait_path.clone(), self.is_const));
+                        if !self.skip_path_as_bound {
+                            bounds.push(cx.trait_bound(trait_path.clone(), self.is_const));
+                        }
 
                         // Add a `Copy` bound if required.
                         if is_packed && self.needs_copy_as_bound_if_packed {
@@ -722,15 +724,17 @@
                             ));
                         }
 
-                        let predicate = ast::WhereBoundPredicate {
-                            span: self.span,
-                            bound_generic_params: field_ty_param.bound_generic_params,
-                            bounded_ty: field_ty_param.ty,
-                            bounds,
-                        };
+                        if !bounds.is_empty() {
+                            let predicate = ast::WhereBoundPredicate {
+                                span: self.span,
+                                bound_generic_params: field_ty_param.bound_generic_params,
+                                bounded_ty: field_ty_param.ty,
+                                bounds,
+                            };
 
-                        let predicate = ast::WherePredicate::BoundPredicate(predicate);
-                        where_clause.predicates.push(predicate);
+                            let predicate = ast::WherePredicate::BoundPredicate(predicate);
+                            where_clause.predicates.push(predicate);
+                        }
                     }
                 }
             }
diff --git a/compiler/rustc_builtin_macros/src/deriving/mod.rs b/compiler/rustc_builtin_macros/src/deriving/mod.rs
index d34336e..a6f3252 100644
--- a/compiler/rustc_builtin_macros/src/deriving/mod.rs
+++ b/compiler/rustc_builtin_macros/src/deriving/mod.rs
@@ -2,9 +2,9 @@
 
 use rustc_ast as ast;
 use rustc_ast::ptr::P;
-use rustc_ast::{GenericArg, Impl, ItemKind, MetaItem};
+use rustc_ast::{GenericArg, MetaItem};
 use rustc_expand::base::{Annotatable, ExpandResult, ExtCtxt, MultiItemModifier};
-use rustc_span::symbol::{sym, Ident, Symbol};
+use rustc_span::symbol::{sym, Symbol};
 use rustc_span::Span;
 use thin_vec::{thin_vec, ThinVec};
 
@@ -116,100 +116,6 @@
     }))
 }
 
-// Injects `impl<...> Structural for ItemType<...> { }`. In particular,
-// does *not* add `where T: Structural` for parameters `T` in `...`.
-// (That's the main reason we cannot use TraitDef here.)
-fn inject_impl_of_structural_trait(
-    cx: &mut ExtCtxt<'_>,
-    span: Span,
-    item: &Annotatable,
-    structural_path: generic::ty::Path,
-    push: &mut dyn FnMut(Annotatable),
-) {
-    let Annotatable::Item(item) = item else {
-        unreachable!();
-    };
-
-    let generics = match &item.kind {
-        ItemKind::Struct(_, generics) | ItemKind::Enum(_, generics) => generics,
-        // Do not inject `impl Structural for Union`. (`PartialEq` does not
-        // support unions, so we will see error downstream.)
-        ItemKind::Union(..) => return,
-        _ => unreachable!(),
-    };
-
-    // Create generics param list for where clauses and impl headers
-    let mut generics = generics.clone();
-
-    let ctxt = span.ctxt();
-
-    // Create the type of `self`.
-    //
-    // in addition, remove defaults from generic params (impls cannot have them).
-    let self_params: Vec<_> = generics
-        .params
-        .iter_mut()
-        .map(|param| match &mut param.kind {
-            ast::GenericParamKind::Lifetime => ast::GenericArg::Lifetime(
-                cx.lifetime(param.ident.span.with_ctxt(ctxt), param.ident),
-            ),
-            ast::GenericParamKind::Type { default } => {
-                *default = None;
-                ast::GenericArg::Type(cx.ty_ident(param.ident.span.with_ctxt(ctxt), param.ident))
-            }
-            ast::GenericParamKind::Const { ty: _, kw_span: _, default } => {
-                *default = None;
-                ast::GenericArg::Const(
-                    cx.const_ident(param.ident.span.with_ctxt(ctxt), param.ident),
-                )
-            }
-        })
-        .collect();
-
-    let type_ident = item.ident;
-
-    let trait_ref = cx.trait_ref(structural_path.to_path(cx, span, type_ident, &generics));
-    let self_type = cx.ty_path(cx.path_all(span, false, vec![type_ident], self_params));
-
-    // It would be nice to also encode constraint `where Self: Eq` (by adding it
-    // onto `generics` cloned above). Unfortunately, that strategy runs afoul of
-    // rust-lang/rust#48214. So we perform that additional check in the compiler
-    // itself, instead of encoding it here.
-
-    // Keep the lint and stability attributes of the original item, to control
-    // how the generated implementation is linted.
-    let mut attrs = ast::AttrVec::new();
-    attrs.extend(
-        item.attrs
-            .iter()
-            .filter(|a| {
-                [sym::allow, sym::warn, sym::deny, sym::forbid, sym::stable, sym::unstable]
-                    .contains(&a.name_or_empty())
-            })
-            .cloned(),
-    );
-    // Mark as `automatically_derived` to avoid some silly lints.
-    attrs.push(cx.attr_word(sym::automatically_derived, span));
-
-    let newitem = cx.item(
-        span,
-        Ident::empty(),
-        attrs,
-        ItemKind::Impl(Box::new(Impl {
-            unsafety: ast::Unsafe::No,
-            polarity: ast::ImplPolarity::Positive,
-            defaultness: ast::Defaultness::Final,
-            constness: ast::Const::No,
-            generics,
-            of_trait: Some(trait_ref),
-            self_ty: self_type,
-            items: ThinVec::new(),
-        })),
-    );
-
-    push(Annotatable::Item(newitem));
-}
-
 fn assert_ty_bounds(
     cx: &mut ExtCtxt<'_>,
     stmts: &mut ThinVec<ast::Stmt>,
diff --git a/compiler/rustc_builtin_macros/src/errors.rs b/compiler/rustc_builtin_macros/src/errors.rs
index fbf0395..1238773 100644
--- a/compiler/rustc_builtin_macros/src/errors.rs
+++ b/compiler/rustc_builtin_macros/src/errors.rs
@@ -539,18 +539,29 @@
 }
 
 #[derive(Subdiagnostic)]
-#[multipart_suggestion(
-    builtin_macros_sugg,
-    style = "verbose",
-    applicability = "machine-applicable"
-)]
-pub(crate) struct InvalidFormatStringSuggestion {
-    #[suggestion_part(code = "{len}")]
-    pub(crate) captured: Span,
-    pub(crate) len: String,
-    #[suggestion_part(code = ", {arg}")]
-    pub(crate) span: Span,
-    pub(crate) arg: String,
+pub(crate) enum InvalidFormatStringSuggestion {
+    #[multipart_suggestion(
+        builtin_macros_format_use_positional,
+        style = "verbose",
+        applicability = "machine-applicable"
+    )]
+    UsePositional {
+        #[suggestion_part(code = "{len}")]
+        captured: Span,
+        len: String,
+        #[suggestion_part(code = ", {arg}")]
+        span: Span,
+        arg: String,
+    },
+    #[suggestion(
+        builtin_macros_format_remove_raw_ident,
+        code = "",
+        applicability = "machine-applicable"
+    )]
+    RemoveRawIdent {
+        #[primary_span]
+        span: Span,
+    },
 }
 
 #[derive(Diagnostic)]
diff --git a/compiler/rustc_builtin_macros/src/format.rs b/compiler/rustc_builtin_macros/src/format.rs
index ede95db..8397b5e 100644
--- a/compiler/rustc_builtin_macros/src/format.rs
+++ b/compiler/rustc_builtin_macros/src/format.rs
@@ -260,20 +260,29 @@
         if let Some((label, span)) = err.secondary_label && is_source_literal {
             e.label_ = Some(errors::InvalidFormatStringLabel { span: fmt_span.from_inner(InnerSpan::new(span.start, span.end)), label } );
         }
-        if err.should_be_replaced_with_positional_argument {
-            let captured_arg_span =
-                fmt_span.from_inner(InnerSpan::new(err.span.start, err.span.end));
-            if let Ok(arg) = ecx.source_map().span_to_snippet(captured_arg_span) {
-                let span = match args.unnamed_args().last() {
-                    Some(arg) => arg.expr.span,
-                    None => fmt_span,
-                };
-                e.sugg_ = Some(errors::InvalidFormatStringSuggestion {
-                    captured: captured_arg_span,
-                    len: args.unnamed_args().len().to_string(),
-                    span: span.shrink_to_hi(),
-                    arg,
-                });
+        match err.suggestion {
+            parse::Suggestion::None => {}
+            parse::Suggestion::UsePositional => {
+                let captured_arg_span =
+                    fmt_span.from_inner(InnerSpan::new(err.span.start, err.span.end));
+                if let Ok(arg) = ecx.source_map().span_to_snippet(captured_arg_span) {
+                    let span = match args.unnamed_args().last() {
+                        Some(arg) => arg.expr.span,
+                        None => fmt_span,
+                    };
+                    e.sugg_ = Some(errors::InvalidFormatStringSuggestion::UsePositional {
+                        captured: captured_arg_span,
+                        len: args.unnamed_args().len().to_string(),
+                        span: span.shrink_to_hi(),
+                        arg,
+                    });
+                }
+            }
+            parse::Suggestion::RemoveRawIdent(span) => {
+                if is_source_literal {
+                    let span = fmt_span.from_inner(InnerSpan::new(span.start, span.end));
+                    e.sugg_ = Some(errors::InvalidFormatStringSuggestion::RemoveRawIdent { span })
+                }
             }
         }
         ecx.emit_err(e);
diff --git a/compiler/rustc_builtin_macros/src/source_util.rs b/compiler/rustc_builtin_macros/src/source_util.rs
index 433da74..953d957 100644
--- a/compiler/rustc_builtin_macros/src/source_util.rs
+++ b/compiler/rustc_builtin_macros/src/source_util.rs
@@ -217,7 +217,7 @@
     };
     match cx.source_map().load_binary_file(&file) {
         Ok(bytes) => {
-            let expr = cx.expr(sp, ast::ExprKind::IncludedBytes(bytes.into()));
+            let expr = cx.expr(sp, ast::ExprKind::IncludedBytes(bytes));
             base::MacEager::expr(expr)
         }
         Err(e) => {
diff --git a/compiler/rustc_builtin_macros/src/test_harness.rs b/compiler/rustc_builtin_macros/src/test_harness.rs
index d8846a9..53ff089 100644
--- a/compiler/rustc_builtin_macros/src/test_harness.rs
+++ b/compiler/rustc_builtin_macros/src/test_harness.rs
@@ -254,7 +254,7 @@
     let expn_id = ext_cx.resolver.expansion_for_ast_pass(
         DUMMY_SP,
         AstPass::TestHarness,
-        &[sym::test, sym::rustc_attrs, sym::no_coverage],
+        &[sym::test, sym::rustc_attrs, sym::coverage_attribute],
         None,
     );
     let def_site = DUMMY_SP.with_def_site_ctxt(expn_id.to_expn_id());
@@ -335,8 +335,8 @@
 
     // #[rustc_main]
     let main_attr = ecx.attr_word(sym::rustc_main, sp);
-    // #[no_coverage]
-    let no_coverage_attr = ecx.attr_word(sym::no_coverage, sp);
+    // #[coverage(off)]
+    let coverage_attr = ecx.attr_nested_word(sym::coverage, sym::off, sp);
 
     // pub fn main() { ... }
     let main_ret_ty = ecx.ty(sp, ast::TyKind::Tup(ThinVec::new()));
@@ -366,7 +366,7 @@
 
     let main = P(ast::Item {
         ident: main_id,
-        attrs: thin_vec![main_attr, no_coverage_attr],
+        attrs: thin_vec![main_attr, coverage_attr],
         id: ast::DUMMY_NODE_ID,
         kind: main,
         vis: ast::Visibility { span: sp, kind: ast::VisibilityKind::Public, tokens: None },
diff --git a/compiler/rustc_codegen_cranelift/docs/usage.md b/compiler/rustc_codegen_cranelift/docs/usage.md
index c6210f9..135a51c 100644
--- a/compiler/rustc_codegen_cranelift/docs/usage.md
+++ b/compiler/rustc_codegen_cranelift/docs/usage.md
@@ -54,7 +54,7 @@
 
 ```bash
 function jit_naked() {
-    echo "$@" | $cg_clif_dir/dist/rustc-clif - -Zunstable-features -Cllvm-args=mode=jit -Cprefer-dynamic
+    echo "$@" | $cg_clif_dir/dist/rustc-clif - -Zunstable-options -Cllvm-args=mode=jit-lazy -Cprefer-dynamic
 }
 
 function jit() {
diff --git a/compiler/rustc_codegen_cranelift/patches/stdlib-lock.toml b/compiler/rustc_codegen_cranelift/patches/stdlib-lock.toml
index fa175ed..5b79d65 100644
--- a/compiler/rustc_codegen_cranelift/patches/stdlib-lock.toml
+++ b/compiler/rustc_codegen_cranelift/patches/stdlib-lock.toml
@@ -4,9 +4,9 @@
 
 [[package]]
 name = "addr2line"
-version = "0.20.0"
+version = "0.21.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f4fa78e18c64fce05e902adecd7a5eed15a5e0a3439f7b0e169f0252214865e3"
+checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb"
 dependencies = [
  "compiler_builtins",
  "gimli",
@@ -140,9 +140,9 @@
 
 [[package]]
 name = "gimli"
-version = "0.27.2"
+version = "0.28.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ad0a93d233ebf96623465aad4046a8d3aa4da22d4f4beba5388838c8a434bbb4"
+checksum = "6fb8d784f27acf97159b40fc4db5ecd8aa23b9ad5ef69cdd136d3bc80665f0c0"
 dependencies = [
  "compiler_builtins",
  "rustc-std-workspace-alloc",
@@ -205,9 +205,9 @@
 
 [[package]]
 name = "object"
-version = "0.31.1"
+version = "0.32.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8bda667d9f2b5051b8833f59f3bf748b28ef54f850f4fcb389a252aa383866d1"
+checksum = "77ac5bbd07aea88c60a577a1ce218075ffd59208b2d7ca97adf9bfc5aeb21ebe"
 dependencies = [
  "compiler_builtins",
  "memchr",
diff --git a/compiler/rustc_codegen_cranelift/rust-toolchain b/compiler/rustc_codegen_cranelift/rust-toolchain
index 5689bde..2cc5d77 100644
--- a/compiler/rustc_codegen_cranelift/rust-toolchain
+++ b/compiler/rustc_codegen_cranelift/rust-toolchain
@@ -1,3 +1,3 @@
 [toolchain]
-channel = "nightly-2023-08-08"
+channel = "nightly-2023-09-06"
 components = ["rust-src", "rustc-dev", "llvm-tools"]
diff --git a/compiler/rustc_codegen_cranelift/scripts/filter_profile.rs b/compiler/rustc_codegen_cranelift/scripts/filter_profile.rs
index f782671..03912b1 100755
--- a/compiler/rustc_codegen_cranelift/scripts/filter_profile.rs
+++ b/compiler/rustc_codegen_cranelift/scripts/filter_profile.rs
@@ -100,9 +100,9 @@
             stack = &stack[..index + ENCODE_METADATA.len()];
         }
 
-        const SUBST_AND_NORMALIZE_ERASING_REGIONS: &str = "rustc_middle::ty::normalize_erasing_regions::<impl rustc_middle::ty::context::TyCtxt>::subst_and_normalize_erasing_regions";
-        if let Some(index) = stack.find(SUBST_AND_NORMALIZE_ERASING_REGIONS) {
-            stack = &stack[..index + SUBST_AND_NORMALIZE_ERASING_REGIONS.len()];
+        const INSTANTIATE_AND_NORMALIZE_ERASING_REGIONS: &str = "rustc_middle::ty::normalize_erasing_regions::<impl rustc_middle::ty::context::TyCtxt>::instantiate_and_normalize_erasing_regions";
+        if let Some(index) = stack.find(INSTANTIATE_AND_NORMALIZE_ERASING_REGIONS) {
+            stack = &stack[..index + INSTANTIATE_AND_NORMALIZE_ERASING_REGIONS.len()];
         }
 
         const NORMALIZE_ERASING_LATE_BOUND_REGIONS: &str = "rustc_middle::ty::normalize_erasing_regions::<impl rustc_middle::ty::context::TyCtxt>::normalize_erasing_late_bound_regions";
diff --git a/compiler/rustc_codegen_cranelift/scripts/test_rustc_tests.sh b/compiler/rustc_codegen_cranelift/scripts/test_rustc_tests.sh
index c163b85..3fc462a 100755
--- a/compiler/rustc_codegen_cranelift/scripts/test_rustc_tests.sh
+++ b/compiler/rustc_codegen_cranelift/scripts/test_rustc_tests.sh
@@ -45,6 +45,7 @@
 rm tests/ui/proc-macro/no-missing-docs.rs
 rm tests/ui/rust-2018/proc-macro-crate-in-paths.rs
 rm tests/ui/proc-macro/allowed-signatures.rs
+rm tests/ui/proc-macro/no-mangle-in-proc-macro-issue-111888.rs
 
 # vendor intrinsics
 rm tests/ui/sse2.rs # cpuid not supported, so sse2 not detected
@@ -114,6 +115,7 @@
 rm tests/ui/mir/mir_raw_fat_ptr.rs # same
 rm tests/ui/consts/issue-33537.rs # same
 rm tests/ui/layout/valid_range_oob.rs # different ICE message
+rm tests/ui/const-generics/generic_const_exprs/issue-80742.rs # gives error instead of ICE with cg_clif
 
 rm tests/ui/consts/issue-miri-1910.rs # different error message
 rm tests/ui/consts/offset_ub.rs # same
diff --git a/compiler/rustc_codegen_cranelift/src/abi/mod.rs b/compiler/rustc_codegen_cranelift/src/abi/mod.rs
index b7f56a2..5d775b9 100644
--- a/compiler/rustc_codegen_cranelift/src/abi/mod.rs
+++ b/compiler/rustc_codegen_cranelift/src/abi/mod.rs
@@ -39,7 +39,7 @@
 pub(crate) fn conv_to_call_conv(sess: &Session, c: Conv, default_call_conv: CallConv) -> CallConv {
     match c {
         Conv::Rust | Conv::C => default_call_conv,
-        Conv::RustCold => CallConv::Cold,
+        Conv::Cold | Conv::PreserveMost | Conv::PreserveAll => CallConv::Cold,
         Conv::X86_64SysV => CallConv::SystemV,
         Conv::X86_64Win64 => CallConv::WindowsFastcall,
 
diff --git a/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs b/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs
index d847e52..0d16da4 100644
--- a/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs
+++ b/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs
@@ -100,11 +100,11 @@
                 }
                 _ => unreachable!("{:?}", self.layout.abi),
             },
-            PassMode::Cast(ref cast, pad_i32) => {
+            PassMode::Cast { ref cast, pad_i32 } => {
                 assert!(!pad_i32, "padding support not yet implemented");
                 cast_target_to_abi_params(cast)
             }
-            PassMode::Indirect { attrs, extra_attrs: None, on_stack } => {
+            PassMode::Indirect { attrs, meta_attrs: None, on_stack } => {
                 if on_stack {
                     // Abi requires aligning struct size to pointer size
                     let size = self.layout.size.align_to(tcx.data_layout.pointer_align.abi);
@@ -117,11 +117,11 @@
                     smallvec![apply_arg_attrs_to_abi_param(AbiParam::new(pointer_ty(tcx)), attrs)]
                 }
             }
-            PassMode::Indirect { attrs, extra_attrs: Some(extra_attrs), on_stack } => {
+            PassMode::Indirect { attrs, meta_attrs: Some(meta_attrs), on_stack } => {
                 assert!(!on_stack);
                 smallvec![
                     apply_arg_attrs_to_abi_param(AbiParam::new(pointer_ty(tcx)), attrs),
-                    apply_arg_attrs_to_abi_param(AbiParam::new(pointer_ty(tcx)), extra_attrs),
+                    apply_arg_attrs_to_abi_param(AbiParam::new(pointer_ty(tcx)), meta_attrs),
                 ]
             }
         }
@@ -148,14 +148,14 @@
                 }
                 _ => unreachable!("{:?}", self.layout.abi),
             },
-            PassMode::Cast(ref cast, _) => {
+            PassMode::Cast { ref cast, .. } => {
                 (None, cast_target_to_abi_params(cast).into_iter().collect())
             }
-            PassMode::Indirect { attrs: _, extra_attrs: None, on_stack } => {
+            PassMode::Indirect { attrs: _, meta_attrs: None, on_stack } => {
                 assert!(!on_stack);
                 (Some(AbiParam::special(pointer_ty(tcx), ArgumentPurpose::StructReturn)), vec![])
             }
-            PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+            PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ } => {
                 unreachable!("unsized return value")
             }
         }
@@ -229,7 +229,7 @@
             let (a, b) = arg.load_scalar_pair(fx);
             smallvec![a, b]
         }
-        PassMode::Cast(ref cast, _) => to_casted_value(fx, arg, cast),
+        PassMode::Cast { ref cast, .. } => to_casted_value(fx, arg, cast),
         PassMode::Indirect { .. } => {
             if is_owned {
                 match arg.force_stack(fx) {
@@ -287,14 +287,14 @@
             assert_eq!(block_params.len(), 2, "{:?}", block_params);
             Some(CValue::by_val_pair(block_params[0], block_params[1], arg_abi.layout))
         }
-        PassMode::Cast(ref cast, _) => {
+        PassMode::Cast { ref cast, .. } => {
             Some(from_casted_value(fx, &block_params, arg_abi.layout, cast))
         }
-        PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
+        PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ } => {
             assert_eq!(block_params.len(), 1, "{:?}", block_params);
             Some(CValue::by_ref(Pointer::new(block_params[0]), arg_abi.layout))
         }
-        PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+        PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ } => {
             assert_eq!(block_params.len(), 2, "{:?}", block_params);
             Some(CValue::by_ref_unsized(
                 Pointer::new(block_params[0]),
diff --git a/compiler/rustc_codegen_cranelift/src/abi/returning.rs b/compiler/rustc_codegen_cranelift/src/abi/returning.rs
index 14e54d5..646fb4a 100644
--- a/compiler/rustc_codegen_cranelift/src/abi/returning.rs
+++ b/compiler/rustc_codegen_cranelift/src/abi/returning.rs
@@ -13,7 +13,7 @@
     block_params_iter: &mut impl Iterator<Item = Value>,
 ) -> CPlace<'tcx> {
     let (ret_place, ret_param): (_, SmallVec<[_; 2]>) = match fx.fn_abi.as_ref().unwrap().ret.mode {
-        PassMode::Ignore | PassMode::Direct(_) | PassMode::Pair(_, _) | PassMode::Cast(..) => {
+        PassMode::Ignore | PassMode::Direct(_) | PassMode::Pair(_, _) | PassMode::Cast { .. } => {
             let is_ssa =
                 ssa_analyzed[RETURN_PLACE].is_ssa(fx, fx.fn_abi.as_ref().unwrap().ret.layout.ty);
             (
@@ -26,7 +26,7 @@
                 smallvec![],
             )
         }
-        PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
+        PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ } => {
             let ret_param = block_params_iter.next().unwrap();
             assert_eq!(fx.bcx.func.dfg.value_type(ret_param), fx.pointer_type);
             (
@@ -34,7 +34,7 @@
                 smallvec![ret_param],
             )
         }
-        PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+        PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ } => {
             unreachable!("unsized return value")
         }
     };
@@ -62,7 +62,7 @@
 ) {
     let (ret_temp_place, return_ptr) = match ret_arg_abi.mode {
         PassMode::Ignore => (None, None),
-        PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
+        PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ } => {
             if let Some(ret_ptr) = ret_place.try_to_ptr() {
                 // This is an optimization to prevent unnecessary copies of the return value when
                 // the return place is already a memory place as opposed to a register.
@@ -73,10 +73,10 @@
                 (Some(place), Some(place.to_ptr().get_addr(fx)))
             }
         }
-        PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+        PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ } => {
             unreachable!("unsized return value")
         }
-        PassMode::Direct(_) | PassMode::Pair(_, _) | PassMode::Cast(..) => (None, None),
+        PassMode::Direct(_) | PassMode::Pair(_, _) | PassMode::Cast { .. } => (None, None),
     };
 
     let call_inst = f(fx, return_ptr);
@@ -93,21 +93,21 @@
             ret_place
                 .write_cvalue(fx, CValue::by_val_pair(ret_val_a, ret_val_b, ret_arg_abi.layout));
         }
-        PassMode::Cast(ref cast, _) => {
+        PassMode::Cast { ref cast, .. } => {
             let results =
                 fx.bcx.inst_results(call_inst).iter().copied().collect::<SmallVec<[Value; 2]>>();
             let result =
                 super::pass_mode::from_casted_value(fx, &results, ret_place.layout(), cast);
             ret_place.write_cvalue(fx, result);
         }
-        PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
+        PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ } => {
             if let Some(ret_temp_place) = ret_temp_place {
                 // If ret_temp_place is None, it is not necessary to copy the return value.
                 let ret_temp_value = ret_temp_place.to_cvalue(fx);
                 ret_place.write_cvalue(fx, ret_temp_value);
             }
         }
-        PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+        PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ } => {
             unreachable!("unsized return value")
         }
     }
@@ -116,10 +116,10 @@
 /// Codegen a return instruction with the right return value(s) if any.
 pub(crate) fn codegen_return(fx: &mut FunctionCx<'_, '_, '_>) {
     match fx.fn_abi.as_ref().unwrap().ret.mode {
-        PassMode::Ignore | PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
+        PassMode::Ignore | PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ } => {
             fx.bcx.ins().return_(&[]);
         }
-        PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+        PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ } => {
             unreachable!("unsized return value")
         }
         PassMode::Direct(_) => {
@@ -132,7 +132,7 @@
             let (ret_val_a, ret_val_b) = place.to_cvalue(fx).load_scalar_pair(fx);
             fx.bcx.ins().return_(&[ret_val_a, ret_val_b]);
         }
-        PassMode::Cast(ref cast, _) => {
+        PassMode::Cast { ref cast, .. } => {
             let place = fx.get_local_place(RETURN_PLACE);
             let ret_val = place.to_cvalue(fx);
             let ret_vals = super::pass_mode::to_casted_value(fx, ret_val, cast);
diff --git a/compiler/rustc_codegen_cranelift/src/base.rs b/compiler/rustc_codegen_cranelift/src/base.rs
index 522dd71..0a451da 100644
--- a/compiler/rustc_codegen_cranelift/src/base.rs
+++ b/compiler/rustc_codegen_cranelift/src/base.rs
@@ -250,7 +250,10 @@
 }
 
 fn codegen_fn_body(fx: &mut FunctionCx<'_, '_, '_>, start_block: Block) {
-    if !crate::constant::check_constants(fx) {
+    if let Err(err) =
+        fx.mir.post_mono_checks(fx.tcx, ty::ParamEnv::reveal_all(), |c| Ok(fx.monomorphize(c)))
+    {
+        err.emit_err(fx.tcx);
         fx.bcx.append_block_params_for_function_params(fx.block_map[START_BLOCK]);
         fx.bcx.switch_to_block(fx.block_map[START_BLOCK]);
         // compilation should have been aborted
@@ -474,10 +477,10 @@
                     *destination,
                 );
             }
-            TerminatorKind::Terminate => {
-                codegen_panic_cannot_unwind(fx, source_info);
+            TerminatorKind::UnwindTerminate(reason) => {
+                codegen_unwind_terminate(fx, source_info, *reason);
             }
-            TerminatorKind::Resume => {
+            TerminatorKind::UnwindResume => {
                 // FIXME implement unwinding
                 fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
             }
@@ -723,11 +726,8 @@
                 }
                 Rvalue::Repeat(ref operand, times) => {
                     let operand = codegen_operand(fx, operand);
-                    let times = fx
-                        .monomorphize(times)
-                        .eval(fx.tcx, ParamEnv::reveal_all())
-                        .try_to_bits(fx.tcx.data_layout.pointer_size)
-                        .unwrap();
+                    let times =
+                        fx.monomorphize(times).eval_target_usize(fx.tcx, ParamEnv::reveal_all());
                     if operand.layout().size.bytes() == 0 {
                         // Do nothing for ZST's
                     } else if fx.clif_type(operand.layout().ty) == Some(types::I8) {
@@ -875,7 +875,8 @@
             PlaceElem::Deref => {
                 cplace = cplace.place_deref(fx);
             }
-            PlaceElem::OpaqueCast(ty) => cplace = cplace.place_opaque_cast(fx, ty),
+            PlaceElem::OpaqueCast(ty) => bug!("encountered OpaqueCast({ty}) in codegen"),
+            PlaceElem::Subtype(ty) => cplace = cplace.place_transmute_type(fx, fx.monomorphize(ty)),
             PlaceElem::Field(field, _ty) => {
                 cplace = cplace.place_field(fx, field);
             }
@@ -971,13 +972,14 @@
     codegen_panic_inner(fx, rustc_hir::LangItem::PanicNounwind, &args, source_info.span);
 }
 
-pub(crate) fn codegen_panic_cannot_unwind<'tcx>(
+pub(crate) fn codegen_unwind_terminate<'tcx>(
     fx: &mut FunctionCx<'_, '_, 'tcx>,
     source_info: mir::SourceInfo,
+    reason: UnwindTerminateReason,
 ) {
     let args = [];
 
-    codegen_panic_inner(fx, rustc_hir::LangItem::PanicCannotUnwind, &args, source_info.span);
+    codegen_panic_inner(fx, reason.lang_item(), &args, source_info.span);
 }
 
 fn codegen_panic_inner<'tcx>(
diff --git a/compiler/rustc_codegen_cranelift/src/common.rs b/compiler/rustc_codegen_cranelift/src/common.rs
index 3081dcf..359b430 100644
--- a/compiler/rustc_codegen_cranelift/src/common.rs
+++ b/compiler/rustc_codegen_cranelift/src/common.rs
@@ -359,7 +359,7 @@
     where
         T: TypeFoldable<TyCtxt<'tcx>> + Copy,
     {
-        self.instance.subst_mir_and_normalize_erasing_regions(
+        self.instance.instantiate_mir_and_normalize_erasing_regions(
             self.tcx,
             ty::ParamEnv::reveal_all(),
             ty::EarlyBinder::bind(value),
@@ -480,7 +480,7 @@
         if let LayoutError::SizeOverflow(_) | LayoutError::ReferencesError(_) = err {
             self.0.sess.span_fatal(span, err.to_string())
         } else {
-            span_bug!(span, "failed to get layout for `{}`: {}", ty, err)
+            self.0.sess.span_fatal(span, format!("failed to get layout for `{}`: {}", ty, err))
         }
     }
 }
diff --git a/compiler/rustc_codegen_cranelift/src/constant.rs b/compiler/rustc_codegen_cranelift/src/constant.rs
index c315357..14b10ed 100644
--- a/compiler/rustc_codegen_cranelift/src/constant.rs
+++ b/compiler/rustc_codegen_cranelift/src/constant.rs
@@ -2,9 +2,8 @@
 
 use rustc_data_structures::fx::{FxHashMap, FxHashSet};
 use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
-use rustc_middle::mir::interpret::{
-    read_target_uint, AllocId, ConstAllocation, ConstValue, ErrorHandled, GlobalAlloc, Scalar,
-};
+use rustc_middle::mir::interpret::{read_target_uint, AllocId, GlobalAlloc, Scalar};
+use rustc_middle::mir::ConstValue;
 
 use cranelift_module::*;
 
@@ -33,16 +32,6 @@
     }
 }
 
-pub(crate) fn check_constants(fx: &mut FunctionCx<'_, '_, '_>) -> bool {
-    let mut all_constants_ok = true;
-    for constant in &fx.mir.required_consts {
-        if eval_mir_constant(fx, constant).is_none() {
-            all_constants_ok = false;
-        }
-    }
-    all_constants_ok
-}
-
 pub(crate) fn codegen_static(tcx: TyCtxt<'_>, module: &mut dyn Module, def_id: DefId) {
     let mut constants_cx = ConstantCx::new();
     constants_cx.todo.push(TodoItem::Static(def_id));
@@ -75,53 +64,21 @@
 
 pub(crate) fn eval_mir_constant<'tcx>(
     fx: &FunctionCx<'_, '_, 'tcx>,
-    constant: &Constant<'tcx>,
-) -> Option<(ConstValue<'tcx>, Ty<'tcx>)> {
-    let constant_kind = fx.monomorphize(constant.literal);
-    let uv = match constant_kind {
-        ConstantKind::Ty(const_) => match const_.kind() {
-            ty::ConstKind::Unevaluated(uv) => uv.expand(),
-            ty::ConstKind::Value(val) => {
-                return Some((fx.tcx.valtree_to_const_val((const_.ty(), val)), const_.ty()));
-            }
-            err => span_bug!(
-                constant.span,
-                "encountered bad ConstKind after monomorphizing: {:?}",
-                err
-            ),
-        },
-        ConstantKind::Unevaluated(mir::UnevaluatedConst { def, .. }, _)
-            if fx.tcx.is_static(def) =>
-        {
-            span_bug!(constant.span, "MIR constant refers to static");
-        }
-        ConstantKind::Unevaluated(uv, _) => uv,
-        ConstantKind::Val(val, _) => return Some((val, constant_kind.ty())),
-    };
-
-    let val = fx
-        .tcx
-        .const_eval_resolve(ty::ParamEnv::reveal_all(), uv, None)
-        .map_err(|err| match err {
-            ErrorHandled::Reported(_) => {
-                fx.tcx.sess.span_err(constant.span, "erroneous constant encountered");
-            }
-            ErrorHandled::TooGeneric => {
-                span_bug!(constant.span, "codegen encountered polymorphic constant: {:?}", err);
-            }
-        })
-        .ok();
-    val.map(|val| (val, constant_kind.ty()))
+    constant: &ConstOperand<'tcx>,
+) -> (ConstValue<'tcx>, Ty<'tcx>) {
+    let cv = fx.monomorphize(constant.const_);
+    // This cannot fail because we checked all required_consts in advance.
+    let val = cv
+        .eval(fx.tcx, ty::ParamEnv::reveal_all(), Some(constant.span))
+        .expect("erroneous constant not captured by required_consts");
+    (val, cv.ty())
 }
 
 pub(crate) fn codegen_constant_operand<'tcx>(
     fx: &mut FunctionCx<'_, '_, 'tcx>,
-    constant: &Constant<'tcx>,
+    constant: &ConstOperand<'tcx>,
 ) -> CValue<'tcx> {
-    let (const_val, ty) = eval_mir_constant(fx, constant).unwrap_or_else(|| {
-        span_bug!(constant.span, "erroneous constant not captured by required_consts")
-    });
-
+    let (const_val, ty) = eval_mir_constant(fx, constant);
     codegen_const_value(fx, const_val, ty)
 }
 
@@ -138,7 +95,7 @@
     }
 
     match const_val {
-        ConstValue::ZeroSized => unreachable!(), // we already handles ZST above
+        ConstValue::ZeroSized => unreachable!(), // we already handled ZST above
         ConstValue::Scalar(x) => match x {
             Scalar::Int(int) => {
                 if fx.clif_type(layout.ty).is_some() {
@@ -222,19 +179,16 @@
                 CValue::by_val(val, layout)
             }
         },
-        ConstValue::ByRef { alloc, offset } => CValue::by_ref(
-            pointer_for_allocation(fx, alloc)
+        ConstValue::Indirect { alloc_id, offset } => CValue::by_ref(
+            pointer_for_allocation(fx, alloc_id)
                 .offset_i64(fx, i64::try_from(offset.bytes()).unwrap()),
             layout,
         ),
-        ConstValue::Slice { data, start, end } => {
-            let ptr = pointer_for_allocation(fx, data)
-                .offset_i64(fx, i64::try_from(start).unwrap())
-                .get_addr(fx);
-            let len = fx
-                .bcx
-                .ins()
-                .iconst(fx.pointer_type, i64::try_from(end.checked_sub(start).unwrap()).unwrap());
+        ConstValue::Slice { data, meta } => {
+            let alloc_id = fx.tcx.reserve_and_set_memory_alloc(data);
+            let ptr = pointer_for_allocation(fx, alloc_id).get_addr(fx);
+            // FIXME: the `try_from` here can actually fail, e.g. for very long ZST slices.
+            let len = fx.bcx.ins().iconst(fx.pointer_type, i64::try_from(meta).unwrap());
             CValue::by_val_pair(ptr, len, layout)
         }
     }
@@ -242,9 +196,9 @@
 
 fn pointer_for_allocation<'tcx>(
     fx: &mut FunctionCx<'_, '_, 'tcx>,
-    alloc: ConstAllocation<'tcx>,
+    alloc_id: AllocId,
 ) -> crate::pointer::Pointer {
-    let alloc_id = fx.tcx.create_memory_alloc(alloc);
+    let alloc = fx.tcx.global_alloc(alloc_id).unwrap_memory();
     let data_id = data_id_for_alloc_id(
         &mut fx.constants_cx,
         &mut *fx.module,
@@ -375,6 +329,7 @@
                         unreachable!()
                     }
                 };
+                // FIXME: should we have a cache so we don't do this multiple times for the same `ConstAllocation`?
                 let data_id = *cx.anon_allocs.entry(alloc_id).or_insert_with(|| {
                     module.declare_anonymous_data(alloc.inner().mutability.is_mut(), false).unwrap()
                 });
@@ -479,7 +434,7 @@
     operand: &Operand<'tcx>,
 ) -> Option<ConstValue<'tcx>> {
     match operand {
-        Operand::Constant(const_) => Some(eval_mir_constant(fx, const_).unwrap().0),
+        Operand::Constant(const_) => Some(eval_mir_constant(fx, const_).0),
         // FIXME(rust-lang/rust#85105): Casts like `IMM8 as u32` result in the const being stored
         // inside a temporary before being passed to the intrinsic requiring the const argument.
         // This code tries to find a single constant defining definition of the referenced local.
@@ -550,8 +505,8 @@
                 match &bb_data.terminator().kind {
                     TerminatorKind::Goto { .. }
                     | TerminatorKind::SwitchInt { .. }
-                    | TerminatorKind::Resume
-                    | TerminatorKind::Terminate
+                    | TerminatorKind::UnwindResume
+                    | TerminatorKind::UnwindTerminate(_)
                     | TerminatorKind::Return
                     | TerminatorKind::Unreachable
                     | TerminatorKind::Drop { .. }
diff --git a/compiler/rustc_codegen_cranelift/src/debuginfo/line_info.rs b/compiler/rustc_codegen_cranelift/src/debuginfo/line_info.rs
index 50bc7a1..b19b935 100644
--- a/compiler/rustc_codegen_cranelift/src/debuginfo/line_info.rs
+++ b/compiler/rustc_codegen_cranelift/src/debuginfo/line_info.rs
@@ -81,13 +81,10 @@
 
         match tcx.sess.source_map().lookup_line(span.lo()) {
             Ok(SourceFileAndLine { sf: file, line }) => {
-                let line_pos = file.lines(|lines| lines[line]);
+                let line_pos = file.lines()[line];
+                let col = file.relative_position(span.lo()) - line_pos;
 
-                (
-                    file,
-                    u64::try_from(line).unwrap() + 1,
-                    u64::from((span.lo() - line_pos).to_u32()) + 1,
-                )
+                (file, u64::try_from(line).unwrap() + 1, u64::from(col.to_u32()) + 1)
             }
             Err(file) => (file, 0, 0),
         }
diff --git a/compiler/rustc_codegen_cranelift/src/driver/aot.rs b/compiler/rustc_codegen_cranelift/src/driver/aot.rs
index d143bcc..3e93830 100644
--- a/compiler/rustc_codegen_cranelift/src/driver/aot.rs
+++ b/compiler/rustc_codegen_cranelift/src/driver/aot.rs
@@ -269,7 +269,7 @@
     ),
 ) -> OngoingModuleCodegen {
     let (cgu_name, mut cx, mut module, codegened_functions) =
-        tcx.prof.verbose_generic_activity_with_arg("codegen cgu", cgu_name.as_str()).run(|| {
+        tcx.prof.generic_activity_with_arg("codegen cgu", cgu_name.as_str()).run(|| {
             let cgu = tcx.codegen_unit(cgu_name);
             let mono_items = cgu.items_in_deterministic_order(tcx);
 
@@ -322,35 +322,24 @@
         });
 
     OngoingModuleCodegen::Async(std::thread::spawn(move || {
-        cx.profiler.clone().verbose_generic_activity_with_arg("compile functions", &*cgu_name).run(
-            || {
-                cranelift_codegen::timing::set_thread_profiler(Box::new(super::MeasuremeProfiler(
-                    cx.profiler.clone(),
-                )));
+        cx.profiler.clone().generic_activity_with_arg("compile functions", &*cgu_name).run(|| {
+            cranelift_codegen::timing::set_thread_profiler(Box::new(super::MeasuremeProfiler(
+                cx.profiler.clone(),
+            )));
 
-                let mut cached_context = Context::new();
-                for codegened_func in codegened_functions {
-                    crate::base::compile_fn(
-                        &mut cx,
-                        &mut cached_context,
-                        &mut module,
-                        codegened_func,
-                    );
-                }
-            },
-        );
+            let mut cached_context = Context::new();
+            for codegened_func in codegened_functions {
+                crate::base::compile_fn(&mut cx, &mut cached_context, &mut module, codegened_func);
+            }
+        });
 
-        let global_asm_object_file = cx
-            .profiler
-            .verbose_generic_activity_with_arg("compile assembly", &*cgu_name)
-            .run(|| {
+        let global_asm_object_file =
+            cx.profiler.generic_activity_with_arg("compile assembly", &*cgu_name).run(|| {
                 crate::global_asm::compile_global_asm(&global_asm_config, &cgu_name, &cx.global_asm)
             })?;
 
-        let codegen_result = cx
-            .profiler
-            .verbose_generic_activity_with_arg("write object file", &*cgu_name)
-            .run(|| {
+        let codegen_result =
+            cx.profiler.generic_activity_with_arg("write object file", &*cgu_name).run(|| {
                 emit_cgu(
                     &global_asm_config.output_filenames,
                     &cx.profiler,
diff --git a/compiler/rustc_codegen_cranelift/src/inline_asm.rs b/compiler/rustc_codegen_cranelift/src/inline_asm.rs
index 518e3da..50bbf81 100644
--- a/compiler/rustc_codegen_cranelift/src/inline_asm.rs
+++ b/compiler/rustc_codegen_cranelift/src/inline_asm.rs
@@ -242,8 +242,7 @@
                 }
             }
             InlineAsmOperand::Const { ref value } => {
-                let (const_value, ty) = crate::constant::eval_mir_constant(fx, value)
-                    .unwrap_or_else(|| span_bug!(span, "asm const cannot be resolved"));
+                let (const_value, ty) = crate::constant::eval_mir_constant(fx, value);
                 let value = rustc_codegen_ssa::common::asm_const_to_str(
                     fx.tcx,
                     span,
@@ -253,8 +252,8 @@
                 CInlineAsmOperand::Const { value }
             }
             InlineAsmOperand::SymFn { ref value } => {
-                let literal = fx.monomorphize(value.literal);
-                if let ty::FnDef(def_id, args) = *literal.ty().kind() {
+                let const_ = fx.monomorphize(value.const_);
+                if let ty::FnDef(def_id, args) = *const_.ty().kind() {
                     let instance = ty::Instance::resolve_for_fn_ptr(
                         fx.tcx,
                         ty::ParamEnv::reveal_all(),
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs
index fdd27a4..e62de6b 100644
--- a/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs
@@ -177,244 +177,6 @@
                 bool_to_zero_or_max_uint(fx, res_lane_ty, res_lane)
             });
         }
-        "llvm.x86.sse2.psrli.d" => {
-            let (a, imm8) = match args {
-                [a, imm8] => (a, imm8),
-                _ => bug!("wrong number of args for intrinsic {intrinsic}"),
-            };
-            let a = codegen_operand(fx, a);
-            let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
-                .expect("llvm.x86.sse2.psrli.d imm8 not const");
-
-            simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
-                .try_to_bits(Size::from_bytes(4))
-                .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
-            {
-                imm8 if imm8 < 32 => fx.bcx.ins().ushr_imm(lane, i64::from(imm8 as u8)),
-                _ => fx.bcx.ins().iconst(types::I32, 0),
-            });
-        }
-        "llvm.x86.sse2.psrai.d" => {
-            let (a, imm8) = match args {
-                [a, imm8] => (a, imm8),
-                _ => bug!("wrong number of args for intrinsic {intrinsic}"),
-            };
-            let a = codegen_operand(fx, a);
-            let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
-                .expect("llvm.x86.sse2.psrai.d imm8 not const");
-
-            simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
-                .try_to_bits(Size::from_bytes(4))
-                .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
-            {
-                imm8 if imm8 < 32 => fx.bcx.ins().sshr_imm(lane, i64::from(imm8 as u8)),
-                _ => fx.bcx.ins().iconst(types::I32, 0),
-            });
-        }
-        "llvm.x86.sse2.pslli.d" => {
-            let (a, imm8) = match args {
-                [a, imm8] => (a, imm8),
-                _ => bug!("wrong number of args for intrinsic {intrinsic}"),
-            };
-            let a = codegen_operand(fx, a);
-            let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
-                .expect("llvm.x86.sse2.pslli.d imm8 not const");
-
-            simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
-                .try_to_bits(Size::from_bytes(4))
-                .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
-            {
-                imm8 if imm8 < 32 => fx.bcx.ins().ishl_imm(lane, i64::from(imm8 as u8)),
-                _ => fx.bcx.ins().iconst(types::I32, 0),
-            });
-        }
-        "llvm.x86.sse2.psrli.w" => {
-            let (a, imm8) = match args {
-                [a, imm8] => (a, imm8),
-                _ => bug!("wrong number of args for intrinsic {intrinsic}"),
-            };
-            let a = codegen_operand(fx, a);
-            let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
-                .expect("llvm.x86.sse2.psrli.d imm8 not const");
-
-            simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
-                .try_to_bits(Size::from_bytes(4))
-                .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
-            {
-                imm8 if imm8 < 16 => fx.bcx.ins().ushr_imm(lane, i64::from(imm8 as u8)),
-                _ => fx.bcx.ins().iconst(types::I32, 0),
-            });
-        }
-        "llvm.x86.sse2.psrai.w" => {
-            let (a, imm8) = match args {
-                [a, imm8] => (a, imm8),
-                _ => bug!("wrong number of args for intrinsic {intrinsic}"),
-            };
-            let a = codegen_operand(fx, a);
-            let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
-                .expect("llvm.x86.sse2.psrai.d imm8 not const");
-
-            simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
-                .try_to_bits(Size::from_bytes(4))
-                .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
-            {
-                imm8 if imm8 < 16 => fx.bcx.ins().sshr_imm(lane, i64::from(imm8 as u8)),
-                _ => fx.bcx.ins().iconst(types::I32, 0),
-            });
-        }
-        "llvm.x86.sse2.pslli.w" => {
-            let (a, imm8) = match args {
-                [a, imm8] => (a, imm8),
-                _ => bug!("wrong number of args for intrinsic {intrinsic}"),
-            };
-            let a = codegen_operand(fx, a);
-            let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
-                .expect("llvm.x86.sse2.pslli.d imm8 not const");
-
-            simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
-                .try_to_bits(Size::from_bytes(4))
-                .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
-            {
-                imm8 if imm8 < 16 => fx.bcx.ins().ishl_imm(lane, i64::from(imm8 as u8)),
-                _ => fx.bcx.ins().iconst(types::I32, 0),
-            });
-        }
-        "llvm.x86.avx.psrli.d" => {
-            let (a, imm8) = match args {
-                [a, imm8] => (a, imm8),
-                _ => bug!("wrong number of args for intrinsic {intrinsic}"),
-            };
-            let a = codegen_operand(fx, a);
-            let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
-                .expect("llvm.x86.avx.psrli.d imm8 not const");
-
-            simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
-                .try_to_bits(Size::from_bytes(4))
-                .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
-            {
-                imm8 if imm8 < 32 => fx.bcx.ins().ushr_imm(lane, i64::from(imm8 as u8)),
-                _ => fx.bcx.ins().iconst(types::I32, 0),
-            });
-        }
-        "llvm.x86.avx.psrai.d" => {
-            let (a, imm8) = match args {
-                [a, imm8] => (a, imm8),
-                _ => bug!("wrong number of args for intrinsic {intrinsic}"),
-            };
-            let a = codegen_operand(fx, a);
-            let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
-                .expect("llvm.x86.avx.psrai.d imm8 not const");
-
-            simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
-                .try_to_bits(Size::from_bytes(4))
-                .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
-            {
-                imm8 if imm8 < 32 => fx.bcx.ins().sshr_imm(lane, i64::from(imm8 as u8)),
-                _ => fx.bcx.ins().iconst(types::I32, 0),
-            });
-        }
-        "llvm.x86.sse2.psrli.q" => {
-            let (a, imm8) = match args {
-                [a, imm8] => (a, imm8),
-                _ => bug!("wrong number of args for intrinsic {intrinsic}"),
-            };
-            let a = codegen_operand(fx, a);
-            let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
-                .expect("llvm.x86.avx.psrli.q imm8 not const");
-
-            simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
-                .try_to_bits(Size::from_bytes(4))
-                .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
-            {
-                imm8 if imm8 < 64 => fx.bcx.ins().ushr_imm(lane, i64::from(imm8 as u8)),
-                _ => fx.bcx.ins().iconst(types::I32, 0),
-            });
-        }
-        "llvm.x86.sse2.pslli.q" => {
-            let (a, imm8) = match args {
-                [a, imm8] => (a, imm8),
-                _ => bug!("wrong number of args for intrinsic {intrinsic}"),
-            };
-            let a = codegen_operand(fx, a);
-            let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
-                .expect("llvm.x86.avx.pslli.q imm8 not const");
-
-            simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
-                .try_to_bits(Size::from_bytes(4))
-                .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
-            {
-                imm8 if imm8 < 64 => fx.bcx.ins().ishl_imm(lane, i64::from(imm8 as u8)),
-                _ => fx.bcx.ins().iconst(types::I32, 0),
-            });
-        }
-        "llvm.x86.avx.pslli.d" => {
-            let (a, imm8) = match args {
-                [a, imm8] => (a, imm8),
-                _ => bug!("wrong number of args for intrinsic {intrinsic}"),
-            };
-            let a = codegen_operand(fx, a);
-            let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
-                .expect("llvm.x86.avx.pslli.d imm8 not const");
-
-            simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
-                .try_to_bits(Size::from_bytes(4))
-                .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
-            {
-                imm8 if imm8 < 32 => fx.bcx.ins().ishl_imm(lane, i64::from(imm8 as u8)),
-                _ => fx.bcx.ins().iconst(types::I32, 0),
-            });
-        }
-        "llvm.x86.avx2.psrli.w" => {
-            let (a, imm8) = match args {
-                [a, imm8] => (a, imm8),
-                _ => bug!("wrong number of args for intrinsic {intrinsic}"),
-            };
-            let a = codegen_operand(fx, a);
-            let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
-                .expect("llvm.x86.avx.psrli.w imm8 not const");
-
-            simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
-                .try_to_bits(Size::from_bytes(4))
-                .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
-            {
-                imm8 if imm8 < 16 => fx.bcx.ins().ushr_imm(lane, i64::from(imm8 as u8)),
-                _ => fx.bcx.ins().iconst(types::I32, 0),
-            });
-        }
-        "llvm.x86.avx2.psrai.w" => {
-            let (a, imm8) = match args {
-                [a, imm8] => (a, imm8),
-                _ => bug!("wrong number of args for intrinsic {intrinsic}"),
-            };
-            let a = codegen_operand(fx, a);
-            let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
-                .expect("llvm.x86.avx.psrai.w imm8 not const");
-
-            simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
-                .try_to_bits(Size::from_bytes(4))
-                .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
-            {
-                imm8 if imm8 < 16 => fx.bcx.ins().sshr_imm(lane, i64::from(imm8 as u8)),
-                _ => fx.bcx.ins().iconst(types::I32, 0),
-            });
-        }
-        "llvm.x86.avx2.pslli.w" => {
-            let (a, imm8) = match args {
-                [a, imm8] => (a, imm8),
-                _ => bug!("wrong number of args for intrinsic {intrinsic}"),
-            };
-            let a = codegen_operand(fx, a);
-            let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
-                .expect("llvm.x86.avx.pslli.w imm8 not const");
-
-            simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
-                .try_to_bits(Size::from_bytes(4))
-                .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
-            {
-                imm8 if imm8 < 16 => fx.bcx.ins().ishl_imm(lane, i64::from(imm8 as u8)),
-                _ => fx.bcx.ins().iconst(types::I32, 0),
-            });
-        }
         "llvm.x86.ssse3.pshuf.b.128" | "llvm.x86.avx2.pshuf.b" => {
             let (a, b) = match args {
                 [a, b] => (a, b),
@@ -506,14 +268,6 @@
             ret.place_lane(fx, 2).to_ptr().store(fx, res_2, MemFlags::trusted());
             ret.place_lane(fx, 3).to_ptr().store(fx, res_3, MemFlags::trusted());
         }
-        "llvm.x86.sse2.storeu.dq" | "llvm.x86.sse2.storeu.pd" => {
-            intrinsic_args!(fx, args => (mem_addr, a); intrinsic);
-            let mem_addr = mem_addr.load_scalar(fx);
-
-            // FIXME correctly handle the unalignment
-            let dest = CPlace::for_ptr(Pointer::new(mem_addr), a.layout());
-            dest.write_cvalue(fx, a);
-        }
         "llvm.x86.ssse3.pabs.b.128" | "llvm.x86.ssse3.pabs.w.128" | "llvm.x86.ssse3.pabs.d.128" => {
             let a = match args {
                 [a] => a,
@@ -571,8 +325,6 @@
 // llvm.x86.avx2.vperm2i128
 // llvm.x86.ssse3.pshuf.b.128
 // llvm.x86.avx2.pshuf.b
-// llvm.x86.avx2.psrli.w
-// llvm.x86.sse2.psrli.w
 
 fn llvm_add_sub<'tcx>(
     fx: &mut FunctionCx<'_, '_, 'tcx>,
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs
index 9863e40..6efbe14 100644
--- a/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs
@@ -21,7 +21,7 @@
 pub(super) fn codegen_simd_intrinsic_call<'tcx>(
     fx: &mut FunctionCx<'_, '_, 'tcx>,
     intrinsic: Symbol,
-    _args: GenericArgsRef<'tcx>,
+    generic_args: GenericArgsRef<'tcx>,
     args: &[mir::Operand<'tcx>],
     ret: CPlace<'tcx>,
     target: BasicBlock,
@@ -117,6 +117,54 @@
             });
         }
 
+        // simd_shuffle_generic<T, U, const I: &[u32]>(x: T, y: T) -> U
+        sym::simd_shuffle_generic => {
+            let [x, y] = args else {
+                bug!("wrong number of args for intrinsic {intrinsic}");
+            };
+            let x = codegen_operand(fx, x);
+            let y = codegen_operand(fx, y);
+
+            if !x.layout().ty.is_simd() {
+                report_simd_type_validation_error(fx, intrinsic, span, x.layout().ty);
+                return;
+            }
+
+            let idx = generic_args[2]
+                .expect_const()
+                .eval(fx.tcx, ty::ParamEnv::reveal_all(), Some(span))
+                .unwrap()
+                .unwrap_branch();
+
+            assert_eq!(x.layout(), y.layout());
+            let layout = x.layout();
+
+            let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
+            let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
+
+            assert_eq!(lane_ty, ret_lane_ty);
+            assert_eq!(idx.len() as u64, ret_lane_count);
+
+            let total_len = lane_count * 2;
+
+            let indexes =
+                idx.iter().map(|idx| idx.unwrap_leaf().try_to_u16().unwrap()).collect::<Vec<u16>>();
+
+            for &idx in &indexes {
+                assert!(u64::from(idx) < total_len, "idx {} out of range 0..{}", idx, total_len);
+            }
+
+            for (out_idx, in_idx) in indexes.into_iter().enumerate() {
+                let in_lane = if u64::from(in_idx) < lane_count {
+                    x.value_lane(fx, in_idx.into())
+                } else {
+                    y.value_lane(fx, u64::from(in_idx) - lane_count)
+                };
+                let out_lane = ret.place_lane(fx, u64::try_from(out_idx).unwrap());
+                out_lane.write_cvalue(fx, in_lane);
+            }
+        }
+
         // simd_shuffle<T, I, U>(x: T, y: T, idx: I) -> U
         sym::simd_shuffle => {
             let (x, y, idx) = match args {
@@ -172,7 +220,8 @@
                     .expect("simd_shuffle idx not const");
 
                 let idx_bytes = match idx_const {
-                    ConstValue::ByRef { alloc, offset } => {
+                    ConstValue::Indirect { alloc_id, offset } => {
+                        let alloc = fx.tcx.global_alloc(alloc_id).unwrap_memory();
                         let size = Size::from_bytes(
                             4 * ret_lane_count, /* size_of([u32; ret_lane_count]) */
                         );
diff --git a/compiler/rustc_codegen_cranelift/src/unsize.rs b/compiler/rustc_codegen_cranelift/src/unsize.rs
index 6aeba13..c6133f2 100644
--- a/compiler/rustc_codegen_cranelift/src/unsize.rs
+++ b/compiler/rustc_codegen_cranelift/src/unsize.rs
@@ -88,7 +88,8 @@
                 let src_f = src_layout.field(fx, i);
                 assert_eq!(src_layout.fields.offset(i).bytes(), 0);
                 assert_eq!(dst_layout.fields.offset(i).bytes(), 0);
-                if src_f.is_zst() {
+                if src_f.is_1zst() {
+                    // We are looking for the one non-1-ZST field; this is not it.
                     continue;
                 }
                 assert_eq!(src_layout.size, src_f.size);
@@ -151,6 +152,7 @@
                 let dst_f = dst.place_field(fx, FieldIdx::new(i));
 
                 if dst_f.layout().is_zst() {
+                    // No data here, nothing to copy/coerce.
                     continue;
                 }
 
diff --git a/compiler/rustc_codegen_cranelift/src/value_and_place.rs b/compiler/rustc_codegen_cranelift/src/value_and_place.rs
index ff95141..45893a4 100644
--- a/compiler/rustc_codegen_cranelift/src/value_and_place.rs
+++ b/compiler/rustc_codegen_cranelift/src/value_and_place.rs
@@ -674,7 +674,9 @@
         }
     }
 
-    pub(crate) fn place_opaque_cast(
+    /// Used for `ProjectionElem::Subtype`, `ty` has to be monomorphized before
+    /// passed on.
+    pub(crate) fn place_transmute_type(
         self,
         fx: &mut FunctionCx<'_, '_, 'tcx>,
         ty: Ty<'tcx>,
diff --git a/compiler/rustc_codegen_cranelift/src/vtable.rs b/compiler/rustc_codegen_cranelift/src/vtable.rs
index b309695..41ea0b1 100644
--- a/compiler/rustc_codegen_cranelift/src/vtable.rs
+++ b/compiler/rustc_codegen_cranelift/src/vtable.rs
@@ -48,19 +48,12 @@
 ) -> (Pointer, Value) {
     let (ptr, vtable) = 'block: {
         if let Abi::Scalar(_) = arg.layout().abi {
-            'descend_newtypes: while !arg.layout().ty.is_unsafe_ptr() && !arg.layout().ty.is_ref() {
-                for i in 0..arg.layout().fields.count() {
-                    let field = arg.value_field(fx, FieldIdx::new(i));
-                    if !field.layout().is_zst() {
-                        // we found the one non-zero-sized field that is allowed
-                        // now find *its* non-zero-sized field, or stop if it's a
-                        // pointer
-                        arg = field;
-                        continue 'descend_newtypes;
-                    }
-                }
-
-                bug!("receiver has no non-zero-sized fields {:?}", arg);
+            while !arg.layout().ty.is_unsafe_ptr() && !arg.layout().ty.is_ref() {
+                let (idx, _) = arg
+                    .layout()
+                    .non_1zst_field(fx)
+                    .expect("not exactly one non-1-ZST field in a `DispatchFromDyn` type");
+                arg = arg.value_field(fx, FieldIdx::new(idx));
             }
         }
 
diff --git a/compiler/rustc_codegen_gcc/src/abi.rs b/compiler/rustc_codegen_gcc/src/abi.rs
index 6fb1cbf..a49530e 100644
--- a/compiler/rustc_codegen_gcc/src/abi.rs
+++ b/compiler/rustc_codegen_gcc/src/abi.rs
@@ -113,7 +113,7 @@
             match self.ret.mode {
                 PassMode::Ignore => cx.type_void(),
                 PassMode::Direct(_) | PassMode::Pair(..) => self.ret.layout.immediate_gcc_type(cx),
-                PassMode::Cast(ref cast, _) => cast.gcc_type(cx),
+                PassMode::Cast { ref cast, .. } => cast.gcc_type(cx),
                 PassMode::Indirect { .. } => {
                     argument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx)));
                     cx.type_void()
@@ -125,25 +125,25 @@
                 PassMode::Ignore => continue,
                 PassMode::Direct(_) => arg.layout.immediate_gcc_type(cx),
                 PassMode::Pair(..) => {
-                    argument_tys.push(arg.layout.scalar_pair_element_gcc_type(cx, 0, true));
-                    argument_tys.push(arg.layout.scalar_pair_element_gcc_type(cx, 1, true));
+                    argument_tys.push(arg.layout.scalar_pair_element_gcc_type(cx, 0));
+                    argument_tys.push(arg.layout.scalar_pair_element_gcc_type(cx, 1));
                     continue;
                 }
-                PassMode::Indirect { extra_attrs: Some(_), .. } => {
+                PassMode::Indirect { meta_attrs: Some(_), .. } => {
                     unimplemented!();
                 }
-                PassMode::Cast(ref cast, pad_i32) => {
+                PassMode::Cast { ref cast, pad_i32 } => {
                     // add padding
                     if pad_i32 {
                         argument_tys.push(Reg::i32().gcc_type(cx));
                     }
                     cast.gcc_type(cx)
                 }
-                PassMode::Indirect { extra_attrs: None, on_stack: true, .. } => {
+                PassMode::Indirect { meta_attrs: None, on_stack: true, .. } => {
                     on_stack_param_indices.insert(argument_tys.len());
                     arg.memory_ty(cx)
                 },
-                PassMode::Indirect { extra_attrs: None, on_stack: false, .. } => cx.type_ptr_to(arg.memory_ty(cx)),
+                PassMode::Indirect { meta_attrs: None, on_stack: false, .. } => cx.type_ptr_to(arg.memory_ty(cx)),
             };
             argument_tys.push(arg_ty);
         }
diff --git a/compiler/rustc_codegen_gcc/src/builder.rs b/compiler/rustc_codegen_gcc/src/builder.rs
index 0b1f2fe..308cb04 100644
--- a/compiler/rustc_codegen_gcc/src/builder.rs
+++ b/compiler/rustc_codegen_gcc/src/builder.rs
@@ -821,7 +821,7 @@
 
                 let mut load = |i, scalar: &abi::Scalar, align| {
                     let llptr = self.struct_gep(pair_type, place.llval, i as u64);
-                    let llty = place.layout.scalar_pair_element_gcc_type(self, i, false);
+                    let llty = place.layout.scalar_pair_element_gcc_type(self, i);
                     let load = self.load(llty, llptr, align);
                     scalar_load_metadata(self, load, scalar);
                     if scalar.is_bool() { self.trunc(load, self.type_i1()) } else { load }
diff --git a/compiler/rustc_codegen_gcc/src/callee.rs b/compiler/rustc_codegen_gcc/src/callee.rs
index a96bd66..9fc7762 100644
--- a/compiler/rustc_codegen_gcc/src/callee.rs
+++ b/compiler/rustc_codegen_gcc/src/callee.rs
@@ -100,7 +100,7 @@
             // whether we are sharing generics or not. The important thing here is
             // that the visibility we apply to the declaration is the same one that
             // has been applied to the definition (wherever that definition may be).
-            let is_generic = instance.args.non_erasable_generics().next().is_some();
+            let is_generic = instance.args.non_erasable_generics(tcx, instance.def_id()).next().is_some();
 
             if is_generic {
                 // This is a monomorphization. Its expected visibility depends
diff --git a/compiler/rustc_codegen_gcc/src/context.rs b/compiler/rustc_codegen_gcc/src/context.rs
index 88dcafa..dcebd92 100644
--- a/compiler/rustc_codegen_gcc/src/context.rs
+++ b/compiler/rustc_codegen_gcc/src/context.rs
@@ -7,6 +7,7 @@
     BaseTypeMethods,
     MiscMethods,
 };
+use rustc_codegen_ssa::errors as ssa_errors;
 use rustc_data_structures::base_n;
 use rustc_data_structures::fx::{FxHashMap, FxHashSet};
 use rustc_middle::span_bug;
@@ -479,7 +480,7 @@
         if let LayoutError::SizeOverflow(_) | LayoutError::ReferencesError(_) = err {
             self.sess().emit_fatal(respan(span, err.into_diagnostic()))
         } else {
-            span_bug!(span, "failed to get layout for `{}`: {}", ty, err)
+            self.tcx.sess.emit_fatal(ssa_errors::FailedToGetLayout { span, ty, err })
         }
     }
 }
diff --git a/compiler/rustc_codegen_gcc/src/debuginfo.rs b/compiler/rustc_codegen_gcc/src/debuginfo.rs
index a81585d..d1bfd83 100644
--- a/compiler/rustc_codegen_gcc/src/debuginfo.rs
+++ b/compiler/rustc_codegen_gcc/src/debuginfo.rs
@@ -55,7 +55,7 @@
         _fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
         _llfn: RValue<'gcc>,
         _mir: &mir::Body<'tcx>,
-    ) -> Option<FunctionDebugContext<Self::DIScope, Self::DILocation>> {
+    ) -> Option<FunctionDebugContext<'tcx, Self::DIScope, Self::DILocation>> {
         // TODO(antoyo)
         None
     }
diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs
index f8c32c6..68a087a 100644
--- a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs
+++ b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs
@@ -144,7 +144,7 @@
                 sym::volatile_load | sym::unaligned_volatile_load => {
                     let tp_ty = fn_args.type_at(0);
                     let mut ptr = args[0].immediate();
-                    if let PassMode::Cast(ty, _) = &fn_abi.ret.mode {
+                    if let PassMode::Cast { cast: ty, .. } = &fn_abi.ret.mode {
                         ptr = self.pointercast(ptr, self.type_ptr_to(ty.gcc_type(self)));
                     }
                     let load = self.volatile_load(ptr.get_type(), ptr);
@@ -353,7 +353,7 @@
             };
 
         if !fn_abi.ret.is_ignore() {
-            if let PassMode::Cast(ty, _) = &fn_abi.ret.mode {
+            if let PassMode::Cast { cast: ty, .. } = &fn_abi.ret.mode {
                 let ptr_llty = self.type_ptr_to(ty.gcc_type(self));
                 let ptr = self.pointercast(result.llval, ptr_llty);
                 self.store(llval, ptr, result.align);
@@ -449,7 +449,7 @@
         else if self.is_unsized_indirect() {
             bug!("unsized `ArgAbi` must be handled through `store_fn_arg`");
         }
-        else if let PassMode::Cast(ref cast, _) = self.mode {
+        else if let PassMode::Cast { ref cast, .. } = self.mode {
             // FIXME(eddyb): Figure out when the simpler Store is safe, clang
             // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
             let can_store_through_cast_ptr = false;
@@ -511,10 +511,10 @@
             PassMode::Pair(..) => {
                 OperandValue::Pair(next(), next()).store(bx, dst);
             },
-            PassMode::Indirect { extra_attrs: Some(_), .. } => {
+            PassMode::Indirect { meta_attrs: Some(_), .. } => {
                 OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst);
             },
-            PassMode::Direct(_) | PassMode::Indirect { extra_attrs: None, .. } | PassMode::Cast(..) => {
+            PassMode::Direct(_) | PassMode::Indirect { meta_attrs: None, .. } | PassMode::Cast { .. } => {
                 let next_arg = next();
                 self.store(bx, next_arg, dst);
             },
diff --git a/compiler/rustc_codegen_gcc/src/lib.rs b/compiler/rustc_codegen_gcc/src/lib.rs
index 697ae01..ce7e316 100644
--- a/compiler/rustc_codegen_gcc/src/lib.rs
+++ b/compiler/rustc_codegen_gcc/src/lib.rs
@@ -80,7 +80,7 @@
 use rustc_fluent_macro::fluent_messages;
 use rustc_metadata::EncodedMetadata;
 use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
-use rustc_middle::query::Providers;
+use rustc_middle::util::Providers;
 use rustc_middle::ty::TyCtxt;
 use rustc_session::config::{Lto, OptLevel, OutputFilenames};
 use rustc_session::Session;
diff --git a/compiler/rustc_codegen_gcc/src/type_of.rs b/compiler/rustc_codegen_gcc/src/type_of.rs
index 84d5783..cc46780 100644
--- a/compiler/rustc_codegen_gcc/src/type_of.rs
+++ b/compiler/rustc_codegen_gcc/src/type_of.rs
@@ -4,7 +4,7 @@
 use crate::rustc_codegen_ssa::traits::{BaseTypeMethods, DerivedTypeMethods, LayoutTypeMethods};
 use rustc_middle::bug;
 use rustc_middle::ty::{self, Ty, TypeVisitableExt};
-use rustc_middle::ty::layout::{FnAbiOf, LayoutOf, TyAndLayout};
+use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
 use rustc_middle::ty::print::with_no_trimmed_paths;
 use rustc_target::abi::{self, Abi, Align, F32, F64, FieldsShape, Int, Integer, Pointer, PointeeInfo, Size, TyAbiInterface, Variants};
 use rustc_target::abi::call::{CastTarget, FnAbi, Reg};
@@ -74,8 +74,8 @@
         Abi::ScalarPair(..) => {
             return cx.type_struct(
                 &[
-                    layout.scalar_pair_element_gcc_type(cx, 0, false),
-                    layout.scalar_pair_element_gcc_type(cx, 1, false),
+                    layout.scalar_pair_element_gcc_type(cx, 0),
+                    layout.scalar_pair_element_gcc_type(cx, 1),
                 ],
                 false,
             );
@@ -150,7 +150,7 @@
     fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>;
     fn immediate_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>;
     fn scalar_gcc_type_at<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, scalar: &abi::Scalar, offset: Size) -> Type<'gcc>;
-    fn scalar_pair_element_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, index: usize, immediate: bool) -> Type<'gcc>;
+    fn scalar_pair_element_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, index: usize) -> Type<'gcc>;
     fn gcc_field_index(&self, index: usize) -> u64;
     fn pointee_info_at<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, offset: Size) -> Option<PointeeInfo>;
 }
@@ -182,23 +182,16 @@
     /// of that field's type - this is useful for taking the address of
     /// that field and ensuring the struct has the right alignment.
     fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
+        // This must produce the same result for `repr(transparent)` wrappers as for the inner type!
+        // In other words, this should generally not look at the type at all, but only at the
+        // layout.
         if let Abi::Scalar(ref scalar) = self.abi {
             // Use a different cache for scalars because pointers to DSTs
             // can be either fat or thin (data pointers of fat pointers).
             if let Some(&ty) = cx.scalar_types.borrow().get(&self.ty) {
                 return ty;
             }
-            let ty =
-                match *self.ty.kind() {
-                    ty::Ref(_, ty, _) | ty::RawPtr(ty::TypeAndMut { ty, .. }) => {
-                        cx.type_ptr_to(cx.layout_of(ty).gcc_type(cx))
-                    }
-                    ty::Adt(def, _) if def.is_box() => {
-                        cx.type_ptr_to(cx.layout_of(self.ty.boxed_ty()).gcc_type(cx))
-                    }
-                    ty::FnPtr(sig) => cx.fn_ptr_backend_type(&cx.fn_abi_of_fn_ptr(sig, ty::List::empty())),
-                    _ => self.scalar_gcc_type_at(cx, scalar, Size::ZERO),
-                };
+            let ty = self.scalar_gcc_type_at(cx, scalar, Size::ZERO);
             cx.scalar_types.borrow_mut().insert(self.ty, ty);
             return ty;
         }
@@ -272,23 +265,10 @@
         }
     }
 
-    fn scalar_pair_element_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, index: usize, immediate: bool) -> Type<'gcc> {
-        // TODO(antoyo): remove llvm hack:
-        // HACK(eddyb) special-case fat pointers until LLVM removes
-        // pointee types, to avoid bitcasting every `OperandRef::deref`.
-        match self.ty.kind() {
-            ty::Ref(..) | ty::RawPtr(_) => {
-                return self.field(cx, index).gcc_type(cx);
-            }
-            // only wide pointer boxes are handled as pointers
-            // thin pointer boxes with scalar allocators are handled by the general logic below
-            ty::Adt(def, args) if def.is_box() && cx.layout_of(args.type_at(1)).is_zst() => {
-                let ptr_ty = Ty::new_mut_ptr(cx.tcx,self.ty.boxed_ty());
-                return cx.layout_of(ptr_ty).scalar_pair_element_gcc_type(cx, index, immediate);
-            }
-            _ => {}
-        }
-
+    fn scalar_pair_element_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, index: usize) -> Type<'gcc> {
+        // This must produce the same result for `repr(transparent)` wrappers as for the inner type!
+        // In other words, this should generally not look at the type at all, but only at the
+        // layout.
         let (a, b) = match self.abi {
             Abi::ScalarPair(ref a, ref b) => (a, b),
             _ => bug!("TyAndLayout::scalar_pair_element_llty({:?}): not applicable", self),
@@ -367,8 +347,8 @@
         layout.gcc_field_index(index)
     }
 
-    fn scalar_pair_element_backend_type(&self, layout: TyAndLayout<'tcx>, index: usize, immediate: bool) -> Type<'gcc> {
-        layout.scalar_pair_element_gcc_type(self, index, immediate)
+    fn scalar_pair_element_backend_type(&self, layout: TyAndLayout<'tcx>, index: usize, _immediate: bool) -> Type<'gcc> {
+        layout.scalar_pair_element_gcc_type(self, index)
     }
 
     fn cast_backend_type(&self, ty: &CastTarget) -> Type<'gcc> {
diff --git a/compiler/rustc_codegen_llvm/messages.ftl b/compiler/rustc_codegen_llvm/messages.ftl
index aed4a8f..c0cfe39 100644
--- a/compiler/rustc_codegen_llvm/messages.ftl
+++ b/compiler/rustc_codegen_llvm/messages.ftl
@@ -37,6 +37,8 @@
 
 codegen_llvm_lto_dylib = lto cannot be used for `dylib` crate type without `-Zdylib-lto`
 
+codegen_llvm_lto_proc_macro = lto cannot be used for `proc-macro` crate type without `-Zdylib-lto`
+
 codegen_llvm_missing_features =
     add the missing features in a `target_feature` attribute
 
@@ -83,6 +85,8 @@
     unknown feature specified for `-Ctarget-feature`: `{$feature}`
     .note = features must begin with a `+` to enable or `-` to disable it
 
+codegen_llvm_unknown_debuginfo_compression = unknown debuginfo compression algorithm {$algorithm} - will fall back to uncompressed debuginfo
+
 codegen_llvm_write_bytecode = failed to write bytecode to {$path}: {$err}
 
 codegen_llvm_write_ir = failed to write LLVM IR to {$path}
diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs
index c6a7dc9..9e834b8 100644
--- a/compiler/rustc_codegen_llvm/src/abi.rs
+++ b/compiler/rustc_codegen_llvm/src/abi.rs
@@ -211,7 +211,7 @@
             OperandValue::Ref(val, None, self.layout.align.abi).store(bx, dst)
         } else if self.is_unsized_indirect() {
             bug!("unsized `ArgAbi` must be handled through `store_fn_arg`");
-        } else if let PassMode::Cast(cast, _) = &self.mode {
+        } else if let PassMode::Cast { cast, pad_i32: _ } = &self.mode {
             // FIXME(eddyb): Figure out when the simpler Store is safe, clang
             // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
             let can_store_through_cast_ptr = false;
@@ -274,12 +274,12 @@
             PassMode::Pair(..) => {
                 OperandValue::Pair(next(), next()).store(bx, dst);
             }
-            PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+            PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ } => {
                 OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst);
             }
             PassMode::Direct(_)
-            | PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ }
-            | PassMode::Cast(..) => {
+            | PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ }
+            | PassMode::Cast { .. } => {
                 let next_arg = next();
                 self.store(bx, next_arg, dst);
             }
@@ -332,7 +332,7 @@
         let llreturn_ty = match &self.ret.mode {
             PassMode::Ignore => cx.type_void(),
             PassMode::Direct(_) | PassMode::Pair(..) => self.ret.layout.immediate_llvm_type(cx),
-            PassMode::Cast(cast, _) => cast.llvm_type(cx),
+            PassMode::Cast { cast, pad_i32: _ } => cast.llvm_type(cx),
             PassMode::Indirect { .. } => {
                 llargument_tys.push(cx.type_ptr());
                 cx.type_void()
@@ -340,29 +340,78 @@
         };
 
         for arg in args {
+            // Note that the exact number of arguments pushed here is carefully synchronized with
+            // code all over the place, both in the codegen_llvm and codegen_ssa crates. That's how
+            // other code then knows which LLVM argument(s) correspond to the n-th Rust argument.
             let llarg_ty = match &arg.mode {
                 PassMode::Ignore => continue,
-                PassMode::Direct(_) => arg.layout.immediate_llvm_type(cx),
+                PassMode::Direct(_) => {
+                    // ABI-compatible Rust types have the same `layout.abi` (up to validity ranges),
+                    // and for Scalar ABIs the LLVM type is fully determined by `layout.abi`,
+                    // guarnateeing that we generate ABI-compatible LLVM IR. Things get tricky for
+                    // aggregates...
+                    if matches!(arg.layout.abi, abi::Abi::Aggregate { .. }) {
+                        assert!(
+                            arg.layout.is_sized(),
+                            "`PassMode::Direct` for unsized type: {}",
+                            arg.layout.ty
+                        );
+                        // This really shouldn't happen, since `immediate_llvm_type` will use
+                        // `layout.fields` to turn this Rust type into an LLVM type. This means all
+                        // sorts of Rust type details leak into the ABI. However wasm sadly *does*
+                        // currently use this mode so we have to allow it -- but we absolutely
+                        // shouldn't let any more targets do that.
+                        // (Also see <https://github.com/rust-lang/rust/issues/115666>.)
+                        assert!(
+                            matches!(&*cx.tcx.sess.target.arch, "wasm32" | "wasm64"),
+                            "`PassMode::Direct` for aggregates only allowed on wasm targets\nProblematic type: {:#?}",
+                            arg.layout,
+                        );
+                    }
+                    arg.layout.immediate_llvm_type(cx)
+                }
                 PassMode::Pair(..) => {
+                    // ABI-compatible Rust types have the same `layout.abi` (up to validity ranges),
+                    // so for ScalarPair we can easily be sure that we are generating ABI-compatible
+                    // LLVM IR.
+                    assert!(
+                        matches!(arg.layout.abi, abi::Abi::ScalarPair(..)),
+                        "PassMode::Pair for type {}",
+                        arg.layout.ty
+                    );
                     llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 0, true));
                     llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 1, true));
                     continue;
                 }
-                PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+                PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack } => {
+                    // `Indirect` with metadata is only for unsized types, and doesn't work with
+                    // on-stack passing.
+                    assert!(arg.layout.is_unsized() && !on_stack);
+                    // Construct the type of a (wide) pointer to `ty`, and pass its two fields.
+                    // Any two ABI-compatible unsized types have the same metadata type and
+                    // moreover the same metadata value leads to the same dynamic size and
+                    // alignment, so this respects ABI compatibility.
                     let ptr_ty = Ty::new_mut_ptr(cx.tcx, arg.layout.ty);
                     let ptr_layout = cx.layout_of(ptr_ty);
                     llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 0, true));
                     llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 1, true));
                     continue;
                 }
-                PassMode::Cast(cast, pad_i32) => {
+                PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ } => {
+                    assert!(arg.layout.is_sized());
+                    cx.type_ptr()
+                }
+                PassMode::Cast { cast, pad_i32 } => {
+                    // `Cast` means "transmute to `CastType`"; that only makes sense for sized types.
+                    assert!(arg.layout.is_sized());
                     // add padding
                     if *pad_i32 {
                         llargument_tys.push(Reg::i32().llvm_type(cx));
                     }
+                    // Compute the LLVM type we use for this function from the cast type.
+                    // We assume here that ABI-compatible Rust types have the same cast type.
                     cast.llvm_type(cx)
                 }
-                PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => cx.type_ptr(),
             };
             llargument_tys.push(llarg_ty);
         }
@@ -405,13 +454,13 @@
             PassMode::Direct(attrs) => {
                 attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, cx, llfn);
             }
-            PassMode::Indirect { attrs, extra_attrs: _, on_stack } => {
+            PassMode::Indirect { attrs, meta_attrs: _, on_stack } => {
                 assert!(!on_stack);
                 let i = apply(attrs);
                 let sret = llvm::CreateStructRetAttr(cx.llcx, self.ret.layout.llvm_type(cx));
                 attributes::apply_to_llfn(llfn, llvm::AttributePlace::Argument(i), &[sret]);
             }
-            PassMode::Cast(cast, _) => {
+            PassMode::Cast { cast, pad_i32: _ } => {
                 cast.attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, cx, llfn);
             }
             _ => {}
@@ -419,25 +468,25 @@
         for arg in self.args.iter() {
             match &arg.mode {
                 PassMode::Ignore => {}
-                PassMode::Indirect { attrs, extra_attrs: None, on_stack: true } => {
+                PassMode::Indirect { attrs, meta_attrs: None, on_stack: true } => {
                     let i = apply(attrs);
                     let byval = llvm::CreateByValAttr(cx.llcx, arg.layout.llvm_type(cx));
                     attributes::apply_to_llfn(llfn, llvm::AttributePlace::Argument(i), &[byval]);
                 }
                 PassMode::Direct(attrs)
-                | PassMode::Indirect { attrs, extra_attrs: None, on_stack: false } => {
+                | PassMode::Indirect { attrs, meta_attrs: None, on_stack: false } => {
                     apply(attrs);
                 }
-                PassMode::Indirect { attrs, extra_attrs: Some(extra_attrs), on_stack } => {
+                PassMode::Indirect { attrs, meta_attrs: Some(meta_attrs), on_stack } => {
                     assert!(!on_stack);
                     apply(attrs);
-                    apply(extra_attrs);
+                    apply(meta_attrs);
                 }
                 PassMode::Pair(a, b) => {
                     apply(a);
                     apply(b);
                 }
-                PassMode::Cast(cast, pad_i32) => {
+                PassMode::Cast { cast, pad_i32 } => {
                     if *pad_i32 {
                         apply(&ArgAttributes::new());
                     }
@@ -467,13 +516,13 @@
             PassMode::Direct(attrs) => {
                 attrs.apply_attrs_to_callsite(llvm::AttributePlace::ReturnValue, bx.cx, callsite);
             }
-            PassMode::Indirect { attrs, extra_attrs: _, on_stack } => {
+            PassMode::Indirect { attrs, meta_attrs: _, on_stack } => {
                 assert!(!on_stack);
                 let i = apply(bx.cx, attrs);
                 let sret = llvm::CreateStructRetAttr(bx.cx.llcx, self.ret.layout.llvm_type(bx));
                 attributes::apply_to_callsite(callsite, llvm::AttributePlace::Argument(i), &[sret]);
             }
-            PassMode::Cast(cast, _) => {
+            PassMode::Cast { cast, pad_i32: _ } => {
                 cast.attrs.apply_attrs_to_callsite(
                     llvm::AttributePlace::ReturnValue,
                     &bx.cx,
@@ -495,7 +544,7 @@
         for arg in self.args.iter() {
             match &arg.mode {
                 PassMode::Ignore => {}
-                PassMode::Indirect { attrs, extra_attrs: None, on_stack: true } => {
+                PassMode::Indirect { attrs, meta_attrs: None, on_stack: true } => {
                     let i = apply(bx.cx, attrs);
                     let byval = llvm::CreateByValAttr(bx.cx.llcx, arg.layout.llvm_type(bx));
                     attributes::apply_to_callsite(
@@ -505,18 +554,18 @@
                     );
                 }
                 PassMode::Direct(attrs)
-                | PassMode::Indirect { attrs, extra_attrs: None, on_stack: false } => {
+                | PassMode::Indirect { attrs, meta_attrs: None, on_stack: false } => {
                     apply(bx.cx, attrs);
                 }
-                PassMode::Indirect { attrs, extra_attrs: Some(extra_attrs), on_stack: _ } => {
+                PassMode::Indirect { attrs, meta_attrs: Some(meta_attrs), on_stack: _ } => {
                     apply(bx.cx, attrs);
-                    apply(bx.cx, extra_attrs);
+                    apply(bx.cx, meta_attrs);
                 }
                 PassMode::Pair(a, b) => {
                     apply(bx.cx, a);
                     apply(bx.cx, b);
                 }
-                PassMode::Cast(cast, pad_i32) => {
+                PassMode::Cast { cast, pad_i32 } => {
                     if *pad_i32 {
                         apply(bx.cx, &ArgAttributes::new());
                     }
@@ -571,7 +620,9 @@
             Conv::C | Conv::Rust | Conv::CCmseNonSecureCall | Conv::RiscvInterrupt { .. } => {
                 llvm::CCallConv
             }
-            Conv::RustCold => llvm::ColdCallConv,
+            Conv::Cold => llvm::ColdCallConv,
+            Conv::PreserveMost => llvm::PreserveMost,
+            Conv::PreserveAll => llvm::PreserveAll,
             Conv::AmdGpuKernel => llvm::AmdGpuKernel,
             Conv::AvrInterrupt => llvm::AvrInterrupt,
             Conv::AvrNonBlockingInterrupt => llvm::AvrNonBlockingInterrupt,
diff --git a/compiler/rustc_codegen_llvm/src/back/archive.rs b/compiler/rustc_codegen_llvm/src/back/archive.rs
index a82d2c5..f33075a 100644
--- a/compiler/rustc_codegen_llvm/src/back/archive.rs
+++ b/compiler/rustc_codegen_llvm/src/back/archive.rs
@@ -367,7 +367,7 @@
                 match addition {
                     Addition::File { path, name_in_archive } => {
                         let path = CString::new(path.to_str().unwrap())?;
-                        let name = CString::new(name_in_archive.clone())?;
+                        let name = CString::new(name_in_archive.as_bytes())?;
                         members.push(llvm::LLVMRustArchiveMemberNew(
                             path.as_ptr(),
                             name.as_ptr(),
diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs
index b2d28ce..a3b0dc6 100644
--- a/compiler/rustc_codegen_llvm/src/back/lto.rs
+++ b/compiler/rustc_codegen_llvm/src/back/lto.rs
@@ -1,6 +1,8 @@
-use crate::back::write::{self, save_temp_bitcode, CodegenDiagnosticsStage, DiagnosticHandlers};
+use crate::back::write::{
+    self, bitcode_section_name, save_temp_bitcode, CodegenDiagnosticsStage, DiagnosticHandlers,
+};
 use crate::errors::{
-    DynamicLinkingWithLTO, LlvmError, LtoBitcodeFromRlib, LtoDisallowed, LtoDylib,
+    DynamicLinkingWithLTO, LlvmError, LtoBitcodeFromRlib, LtoDisallowed, LtoDylib, LtoProcMacro,
 };
 use crate::llvm::{self, build_string};
 use crate::{LlvmCodegenBackend, ModuleLlvm};
@@ -24,6 +26,7 @@
 use std::fs::File;
 use std::io;
 use std::iter;
+use std::mem::ManuallyDrop;
 use std::path::Path;
 use std::slice;
 use std::sync::Arc;
@@ -34,8 +37,12 @@
 
 pub fn crate_type_allows_lto(crate_type: CrateType) -> bool {
     match crate_type {
-        CrateType::Executable | CrateType::Dylib | CrateType::Staticlib | CrateType::Cdylib => true,
-        CrateType::Rlib | CrateType::ProcMacro => false,
+        CrateType::Executable
+        | CrateType::Dylib
+        | CrateType::Staticlib
+        | CrateType::Cdylib
+        | CrateType::ProcMacro => true,
+        CrateType::Rlib => false,
     }
 }
 
@@ -85,6 +92,11 @@
                     diag_handler.emit_err(LtoDylib);
                     return Err(FatalError);
                 }
+            } else if *crate_type == CrateType::ProcMacro {
+                if !cgcx.opts.unstable_opts.dylib_lto {
+                    diag_handler.emit_err(LtoProcMacro);
+                    return Err(FatalError);
+                }
             }
         }
 
@@ -120,6 +132,7 @@
                 info!("adding bitcode from {}", name);
                 match get_bitcode_slice_from_object_data(
                     child.data(&*archive_data).expect("corrupt rlib"),
+                    cgcx,
                 ) {
                     Ok(data) => {
                         let module = SerializedModule::FromRlib(data.to_vec());
@@ -141,10 +154,29 @@
     Ok((symbols_below_threshold, upstream_modules))
 }
 
-fn get_bitcode_slice_from_object_data(obj: &[u8]) -> Result<&[u8], LtoBitcodeFromRlib> {
+fn get_bitcode_slice_from_object_data<'a>(
+    obj: &'a [u8],
+    cgcx: &CodegenContext<LlvmCodegenBackend>,
+) -> Result<&'a [u8], LtoBitcodeFromRlib> {
+    // We're about to assume the data here is an object file with sections, but if it's raw LLVM IR that
+    // won't work. Fortunately, if that's what we have we can just return the object directly, so we sniff
+    // the relevant magic strings here and return.
+    if obj.starts_with(b"\xDE\xC0\x17\x0B") || obj.starts_with(b"BC\xC0\xDE") {
+        return Ok(obj);
+    }
+    // We drop the "__LLVM," prefix here because on Apple platforms there's a notion of "segment name"
+    // which in the public API for sections gets treated as part of the section name, but internally
+    // in MachOObjectFile.cpp gets treated separately.
+    let section_name = bitcode_section_name(cgcx).trim_start_matches("__LLVM,");
     let mut len = 0;
-    let data =
-        unsafe { llvm::LLVMRustGetBitcodeSliceFromObjectData(obj.as_ptr(), obj.len(), &mut len) };
+    let data = unsafe {
+        llvm::LLVMRustGetSliceFromObjectDataByName(
+            obj.as_ptr(),
+            obj.len(),
+            section_name.as_ptr(),
+            &mut len,
+        )
+    };
     if !data.is_null() {
         assert!(len != 0);
         let bc = unsafe { slice::from_raw_parts(data, len) };
@@ -441,7 +473,7 @@
 
         for (i, (name, buffer)) in modules.into_iter().enumerate() {
             info!("local module: {} - {}", i, name);
-            let cname = CString::new(name.clone()).unwrap();
+            let cname = CString::new(name.as_bytes()).unwrap();
             thin_modules.push(llvm::ThinLTOModule {
                 identifier: cname.as_ptr(),
                 data: buffer.data().as_ptr(),
@@ -583,7 +615,7 @@
     module: &mut ModuleCodegen<ModuleLlvm>,
     thin: bool,
 ) -> Result<(), FatalError> {
-    let _timer = cgcx.prof.verbose_generic_activity_with_arg("LLVM_lto_optimize", &*module.name);
+    let _timer = cgcx.prof.generic_activity_with_arg("LLVM_lto_optimize", &*module.name);
     let config = cgcx.config(module.kind);
 
     // Now we have one massive module inside of llmod. Time to run the
@@ -705,7 +737,7 @@
     let llcx = llvm::LLVMRustContextCreate(cgcx.fewer_names);
     let llmod_raw = parse_module(llcx, module_name, thin_module.data(), &diag_handler)? as *const _;
     let mut module = ModuleCodegen {
-        module_llvm: ModuleLlvm { llmod_raw, llcx, tm },
+        module_llvm: ModuleLlvm { llmod_raw, llcx, tm: ManuallyDrop::new(tm) },
         name: thin_module.name().to_string(),
         kind: ModuleKind::Regular,
     };
diff --git a/compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs b/compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs
new file mode 100644
index 0000000..36484c3
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs
@@ -0,0 +1,103 @@
+use std::{
+    ffi::{c_char, CStr},
+    marker::PhantomData,
+    ops::Deref,
+    ptr::NonNull,
+};
+
+use rustc_data_structures::small_c_str::SmallCStr;
+
+use crate::{errors::LlvmError, llvm};
+
+/// Responsible for safely creating and disposing llvm::TargetMachine via ffi functions.
+/// Not cloneable as there is no clone function for llvm::TargetMachine.
+#[repr(transparent)]
+pub struct OwnedTargetMachine {
+    tm_unique: NonNull<llvm::TargetMachine>,
+    phantom: PhantomData<llvm::TargetMachine>,
+}
+
+impl OwnedTargetMachine {
+    pub fn new(
+        triple: &CStr,
+        cpu: &CStr,
+        features: &CStr,
+        abi: &CStr,
+        model: llvm::CodeModel,
+        reloc: llvm::RelocModel,
+        level: llvm::CodeGenOptLevel,
+        use_soft_fp: bool,
+        function_sections: bool,
+        data_sections: bool,
+        unique_section_names: bool,
+        trap_unreachable: bool,
+        singletree: bool,
+        asm_comments: bool,
+        emit_stack_size_section: bool,
+        relax_elf_relocations: bool,
+        use_init_array: bool,
+        split_dwarf_file: &CStr,
+        output_obj_file: &CStr,
+        debug_info_compression: &CStr,
+        force_emulated_tls: bool,
+        args_cstr_buff: &[u8],
+    ) -> Result<Self, LlvmError<'static>> {
+        assert!(args_cstr_buff.len() > 0);
+        assert!(
+            *args_cstr_buff.last().unwrap() == 0,
+            "The last character must be a null terminator."
+        );
+
+        // SAFETY: llvm::LLVMRustCreateTargetMachine copies pointed to data
+        let tm_ptr = unsafe {
+            llvm::LLVMRustCreateTargetMachine(
+                triple.as_ptr(),
+                cpu.as_ptr(),
+                features.as_ptr(),
+                abi.as_ptr(),
+                model,
+                reloc,
+                level,
+                use_soft_fp,
+                function_sections,
+                data_sections,
+                unique_section_names,
+                trap_unreachable,
+                singletree,
+                asm_comments,
+                emit_stack_size_section,
+                relax_elf_relocations,
+                use_init_array,
+                split_dwarf_file.as_ptr(),
+                output_obj_file.as_ptr(),
+                debug_info_compression.as_ptr(),
+                force_emulated_tls,
+                args_cstr_buff.as_ptr() as *const c_char,
+                args_cstr_buff.len(),
+            )
+        };
+
+        NonNull::new(tm_ptr)
+            .map(|tm_unique| Self { tm_unique, phantom: PhantomData })
+            .ok_or_else(|| LlvmError::CreateTargetMachine { triple: SmallCStr::from(triple) })
+    }
+}
+
+impl Deref for OwnedTargetMachine {
+    type Target = llvm::TargetMachine;
+
+    fn deref(&self) -> &Self::Target {
+        // SAFETY: constructing ensures we have a valid pointer created by llvm::LLVMRustCreateTargetMachine
+        unsafe { self.tm_unique.as_ref() }
+    }
+}
+
+impl Drop for OwnedTargetMachine {
+    fn drop(&mut self) {
+        // SAFETY: constructing ensures we have a valid pointer created by llvm::LLVMRustCreateTargetMachine
+        // OwnedTargetMachine is not copyable so there is no double free or use after free
+        unsafe {
+            llvm::LLVMRustDisposeTargetMachine(self.tm_unique.as_mut());
+        }
+    }
+}
diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs
index 47cc5bd..c778a6e 100644
--- a/compiler/rustc_codegen_llvm/src/back/write.rs
+++ b/compiler/rustc_codegen_llvm/src/back/write.rs
@@ -1,17 +1,22 @@
 use crate::back::lto::ThinBuffer;
+use crate::back::owned_target_machine::OwnedTargetMachine;
 use crate::back::profiling::{
     selfprofile_after_pass_callback, selfprofile_before_pass_callback, LlvmSelfProfiler,
 };
 use crate::base;
 use crate::common;
 use crate::errors::{
-    CopyBitcode, FromLlvmDiag, FromLlvmOptimizationDiag, LlvmError, WithLlvmError, WriteBytecode,
+    CopyBitcode, FromLlvmDiag, FromLlvmOptimizationDiag, LlvmError, UnknownCompression,
+    WithLlvmError, WriteBytecode,
 };
 use crate::llvm::{self, DiagnosticInfo, PassManager};
 use crate::llvm_util;
 use crate::type_::Type;
 use crate::LlvmCodegenBackend;
 use crate::ModuleLlvm;
+use llvm::{
+    LLVMRustLLVMHasZlibCompressionForDebugSymbols, LLVMRustLLVMHasZstdCompressionForDebugSymbols,
+};
 use rustc_codegen_ssa::back::link::ensure_removed;
 use rustc_codegen_ssa::back::write::{
     BitcodeSection, CodegenContext, EmitObj, ModuleConfig, TargetMachineFactoryConfig,
@@ -94,8 +99,8 @@
     }
 }
 
-pub fn create_informational_target_machine(sess: &Session) -> &'static mut llvm::TargetMachine {
-    let config = TargetMachineFactoryConfig { split_dwarf_file: None };
+pub fn create_informational_target_machine(sess: &Session) -> OwnedTargetMachine {
+    let config = TargetMachineFactoryConfig { split_dwarf_file: None, output_obj_file: None };
     // Can't use query system here quite yet because this function is invoked before the query
     // system/tcx is set up.
     let features = llvm_util::global_llvm_features(sess, false);
@@ -103,7 +108,7 @@
         .unwrap_or_else(|err| llvm_err(sess.diagnostic(), err).raise())
 }
 
-pub fn create_target_machine(tcx: TyCtxt<'_>, mod_name: &str) -> &'static mut llvm::TargetMachine {
+pub fn create_target_machine(tcx: TyCtxt<'_>, mod_name: &str) -> OwnedTargetMachine {
     let split_dwarf_file = if tcx.sess.target_can_use_split_dwarf() {
         tcx.output_filenames(()).split_dwarf_path(
             tcx.sess.split_debuginfo(),
@@ -113,7 +118,11 @@
     } else {
         None
     };
-    let config = TargetMachineFactoryConfig { split_dwarf_file };
+
+    let output_obj_file =
+        Some(tcx.output_filenames(()).temp_path(OutputType::Object, Some(mod_name)));
+    let config = TargetMachineFactoryConfig { split_dwarf_file, output_obj_file };
+
     target_machine_factory(
         &tcx.sess,
         tcx.backend_optimization_level(()),
@@ -216,36 +225,73 @@
 
     let force_emulated_tls = sess.target.force_emulated_tls;
 
-    Arc::new(move |config: TargetMachineFactoryConfig| {
-        let split_dwarf_file =
-            path_mapping.map_prefix(config.split_dwarf_file.unwrap_or_default()).0;
-        let split_dwarf_file = CString::new(split_dwarf_file.to_str().unwrap()).unwrap();
+    // copy the exe path, followed by path all into one buffer
+    // null terminating them so we can use them as null terminated strings
+    let args_cstr_buff = {
+        let mut args_cstr_buff: Vec<u8> = Vec::new();
+        let exe_path = std::env::current_exe().unwrap_or_default();
+        let exe_path_str = exe_path.into_os_string().into_string().unwrap_or_default();
 
-        let tm = unsafe {
-            llvm::LLVMRustCreateTargetMachine(
-                triple.as_ptr(),
-                cpu.as_ptr(),
-                features.as_ptr(),
-                abi.as_ptr(),
-                code_model,
-                reloc_model,
-                opt_level,
-                use_softfp,
-                ffunction_sections,
-                fdata_sections,
-                funique_section_names,
-                trap_unreachable,
-                singlethread,
-                asm_comments,
-                emit_stack_size_section,
-                relax_elf_relocations,
-                use_init_array,
-                split_dwarf_file.as_ptr(),
-                force_emulated_tls,
-            )
+        args_cstr_buff.extend_from_slice(exe_path_str.as_bytes());
+        args_cstr_buff.push(0);
+
+        for arg in sess.expanded_args.iter() {
+            args_cstr_buff.extend_from_slice(arg.as_bytes());
+            args_cstr_buff.push(0);
+        }
+
+        args_cstr_buff
+    };
+
+    let debuginfo_compression = sess.opts.debuginfo_compression.to_string();
+    match sess.opts.debuginfo_compression {
+        rustc_session::config::DebugInfoCompression::Zlib => {
+            if !unsafe { LLVMRustLLVMHasZlibCompressionForDebugSymbols() } {
+                sess.emit_warning(UnknownCompression { algorithm: "zlib" });
+            }
+        }
+        rustc_session::config::DebugInfoCompression::Zstd => {
+            if !unsafe { LLVMRustLLVMHasZstdCompressionForDebugSymbols() } {
+                sess.emit_warning(UnknownCompression { algorithm: "zstd" });
+            }
+        }
+        rustc_session::config::DebugInfoCompression::None => {}
+    };
+    let debuginfo_compression = SmallCStr::new(&debuginfo_compression);
+
+    Arc::new(move |config: TargetMachineFactoryConfig| {
+        let path_to_cstring_helper = |path: Option<PathBuf>| -> CString {
+            let path = path_mapping.map_prefix(path.unwrap_or_default()).0;
+            CString::new(path.to_str().unwrap()).unwrap()
         };
 
-        tm.ok_or_else(|| LlvmError::CreateTargetMachine { triple: triple.clone() })
+        let split_dwarf_file = path_to_cstring_helper(config.split_dwarf_file);
+        let output_obj_file = path_to_cstring_helper(config.output_obj_file);
+
+        OwnedTargetMachine::new(
+            &triple,
+            &cpu,
+            &features,
+            &abi,
+            code_model,
+            reloc_model,
+            opt_level,
+            use_softfp,
+            ffunction_sections,
+            fdata_sections,
+            funique_section_names,
+            trap_unreachable,
+            singlethread,
+            asm_comments,
+            emit_stack_size_section,
+            relax_elf_relocations,
+            use_init_array,
+            &split_dwarf_file,
+            &output_obj_file,
+            &debuginfo_compression,
+            force_emulated_tls,
+            &args_cstr_buff,
+        )
     })
 }
 
@@ -853,6 +899,27 @@
     asm
 }
 
+fn target_is_apple(cgcx: &CodegenContext<LlvmCodegenBackend>) -> bool {
+    cgcx.opts.target_triple.triple().contains("-ios")
+        || cgcx.opts.target_triple.triple().contains("-darwin")
+        || cgcx.opts.target_triple.triple().contains("-tvos")
+        || cgcx.opts.target_triple.triple().contains("-watchos")
+}
+
+fn target_is_aix(cgcx: &CodegenContext<LlvmCodegenBackend>) -> bool {
+    cgcx.opts.target_triple.triple().contains("-aix")
+}
+
+pub(crate) fn bitcode_section_name(cgcx: &CodegenContext<LlvmCodegenBackend>) -> &'static str {
+    if target_is_apple(cgcx) {
+        "__LLVM,__bitcode\0"
+    } else if target_is_aix(cgcx) {
+        ".ipa\0"
+    } else {
+        ".llvmbc\0"
+    }
+}
+
 /// Embed the bitcode of an LLVM module in the LLVM module itself.
 ///
 /// This is done primarily for iOS where it appears to be standard to compile C
@@ -913,11 +980,8 @@
     // Unfortunately, LLVM provides no way to set custom section flags. For ELF
     // and COFF we emit the sections using module level inline assembly for that
     // reason (see issue #90326 for historical background).
-    let is_aix = cgcx.opts.target_triple.triple().contains("-aix");
-    let is_apple = cgcx.opts.target_triple.triple().contains("-ios")
-        || cgcx.opts.target_triple.triple().contains("-darwin")
-        || cgcx.opts.target_triple.triple().contains("-tvos")
-        || cgcx.opts.target_triple.triple().contains("-watchos");
+    let is_aix = target_is_aix(cgcx);
+    let is_apple = target_is_apple(cgcx);
     if is_apple
         || is_aix
         || cgcx.opts.target_triple.triple().starts_with("wasm")
@@ -932,13 +996,7 @@
         );
         llvm::LLVMSetInitializer(llglobal, llconst);
 
-        let section = if is_apple {
-            "__LLVM,__bitcode\0"
-        } else if is_aix {
-            ".ipa\0"
-        } else {
-            ".llvmbc\0"
-        };
+        let section = bitcode_section_name(cgcx);
         llvm::LLVMSetSection(llglobal, section.as_ptr().cast());
         llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage);
         llvm::LLVMSetGlobalConstant(llglobal, llvm::True);
diff --git a/compiler/rustc_codegen_llvm/src/callee.rs b/compiler/rustc_codegen_llvm/src/callee.rs
index 36c0982..5254c3f 100644
--- a/compiler/rustc_codegen_llvm/src/callee.rs
+++ b/compiler/rustc_codegen_llvm/src/callee.rs
@@ -95,7 +95,8 @@
         unsafe {
             llvm::LLVMRustSetLinkage(llfn, llvm::Linkage::ExternalLinkage);
 
-            let is_generic = instance.args.non_erasable_generics().next().is_some();
+            let is_generic =
+                instance.args.non_erasable_generics(tcx, instance.def_id()).next().is_some();
 
             if is_generic {
                 // This is a monomorphization. Its expected visibility depends
diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs
index 24fd5bb..b4b2ab1 100644
--- a/compiler/rustc_codegen_llvm/src/context.rs
+++ b/compiler/rustc_codegen_llvm/src/context.rs
@@ -10,6 +10,7 @@
 
 use cstr::cstr;
 use rustc_codegen_ssa::base::{wants_msvc_seh, wants_wasm_eh};
+use rustc_codegen_ssa::errors as ssa_errors;
 use rustc_codegen_ssa::traits::*;
 use rustc_data_structures::base_n;
 use rustc_data_structures::fx::FxHashMap;
@@ -159,9 +160,9 @@
 
     // Ensure the data-layout values hardcoded remain the defaults.
     if sess.target.is_builtin {
+        // tm is disposed by its drop impl
         let tm = crate::back::write::create_informational_target_machine(tcx.sess);
-        llvm::LLVMRustSetDataLayoutFromTargetMachine(llmod, tm);
-        llvm::LLVMRustDisposeTargetMachine(tm);
+        llvm::LLVMRustSetDataLayoutFromTargetMachine(llmod, &tm);
 
         let llvm_data_layout = llvm::LLVMGetDataLayoutStr(llmod);
         let llvm_data_layout = str::from_utf8(CStr::from_ptr(llvm_data_layout).to_bytes())
@@ -1000,7 +1001,7 @@
         if let LayoutError::SizeOverflow(_) | LayoutError::ReferencesError(_) = err {
             self.sess().emit_fatal(Spanned { span, node: err.into_diagnostic() })
         } else {
-            span_bug!(span, "failed to get layout for `{ty}`: {err:?}")
+            self.tcx.sess.emit_fatal(ssa_errors::FailedToGetLayout { span, ty, err })
         }
     }
 }
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs
index 7a82d05..763186a 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs
@@ -1,4 +1,4 @@
-use rustc_middle::mir::coverage::{CounterId, MappedExpressionIndex};
+use rustc_middle::mir::coverage::{CounterId, ExpressionId, Operand};
 
 /// Must match the layout of `LLVMRustCounterKind`.
 #[derive(Copy, Clone, Debug)]
@@ -30,11 +30,8 @@
 }
 
 impl Counter {
-    /// Constructs a new `Counter` of kind `Zero`. For this `CounterKind`, the
-    /// `id` is not used.
-    pub fn zero() -> Self {
-        Self { kind: CounterKind::Zero, id: 0 }
-    }
+    /// A `Counter` of kind `Zero`. For this counter kind, the `id` is not used.
+    pub(crate) const ZERO: Self = Self { kind: CounterKind::Zero, id: 0 };
 
     /// Constructs a new `Counter` of kind `CounterValueReference`.
     pub fn counter_value_reference(counter_id: CounterId) -> Self {
@@ -42,20 +39,16 @@
     }
 
     /// Constructs a new `Counter` of kind `Expression`.
-    pub fn expression(mapped_expression_index: MappedExpressionIndex) -> Self {
-        Self { kind: CounterKind::Expression, id: mapped_expression_index.into() }
+    pub(crate) fn expression(expression_id: ExpressionId) -> Self {
+        Self { kind: CounterKind::Expression, id: expression_id.as_u32() }
     }
 
-    /// Returns true if the `Counter` kind is `Zero`.
-    pub fn is_zero(&self) -> bool {
-        matches!(self.kind, CounterKind::Zero)
-    }
-
-    /// An explicitly-named function to get the ID value, making it more obvious
-    /// that the stored value is now 0-based.
-    pub fn zero_based_id(&self) -> u32 {
-        debug_assert!(!self.is_zero(), "`id` is undefined for CounterKind::Zero");
-        self.id
+    pub(crate) fn from_operand(operand: Operand) -> Self {
+        match operand {
+            Operand::Zero => Self::ZERO,
+            Operand::Counter(id) => Self::counter_value_reference(id),
+            Operand::Expression(id) => Self::expression(id),
+        }
     }
 }
 
@@ -81,6 +74,11 @@
 }
 
 impl CounterExpression {
+    /// The dummy expression `(0 - 0)` has a representation of all zeroes,
+    /// making it marginally more efficient to initialize than `(0 + 0)`.
+    pub(crate) const DUMMY: Self =
+        Self { lhs: Counter::ZERO, kind: ExprKind::Subtract, rhs: Counter::ZERO };
+
     pub fn new(lhs: Counter, kind: ExprKind, rhs: Counter) -> Self {
         Self { kind, lhs, rhs }
     }
@@ -172,7 +170,7 @@
     ) -> Self {
         Self {
             counter,
-            false_counter: Counter::zero(),
+            false_counter: Counter::ZERO,
             file_id,
             expanded_file_id: 0,
             start_line,
@@ -220,8 +218,8 @@
         end_col: u32,
     ) -> Self {
         Self {
-            counter: Counter::zero(),
-            false_counter: Counter::zero(),
+            counter: Counter::ZERO,
+            false_counter: Counter::ZERO,
             file_id,
             expanded_file_id,
             start_line,
@@ -243,8 +241,8 @@
         end_col: u32,
     ) -> Self {
         Self {
-            counter: Counter::zero(),
-            false_counter: Counter::zero(),
+            counter: Counter::ZERO,
+            false_counter: Counter::ZERO,
             file_id,
             expanded_file_id: 0,
             start_line,
@@ -268,7 +266,7 @@
     ) -> Self {
         Self {
             counter,
-            false_counter: Counter::zero(),
+            false_counter: Counter::ZERO,
             file_id,
             expanded_file_id: 0,
             start_line,
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs
index f1e68af..e83110d 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs
@@ -1,10 +1,8 @@
 use crate::coverageinfo::ffi::{Counter, CounterExpression, ExprKind};
 
-use rustc_index::{IndexSlice, IndexVec};
-use rustc_middle::bug;
-use rustc_middle::mir::coverage::{
-    CodeRegion, CounterId, ExpressionId, MappedExpressionIndex, Op, Operand,
-};
+use rustc_data_structures::fx::FxIndexSet;
+use rustc_index::IndexVec;
+use rustc_middle::mir::coverage::{CodeRegion, CounterId, ExpressionId, Op, Operand};
 use rustc_middle::ty::Instance;
 use rustc_middle::ty::TyCtxt;
 
@@ -128,6 +126,58 @@
         self.unreachable_regions.push(region)
     }
 
+    /// Perform some simplifications to make the final coverage mappings
+    /// slightly smaller.
+    ///
+    /// This method mainly exists to preserve the simplifications that were
+    /// already being performed by the Rust-side expression renumbering, so that
+    /// the resulting coverage mappings don't get worse.
+    pub(crate) fn simplify_expressions(&mut self) {
+        // The set of expressions that either were optimized out entirely, or
+        // have zero as both of their operands, and will therefore always have
+        // a value of zero. Other expressions that refer to these as operands
+        // can have those operands replaced with `Operand::Zero`.
+        let mut zero_expressions = FxIndexSet::default();
+
+        // For each expression, perform simplifications based on lower-numbered
+        // expressions, and then update the set of always-zero expressions if
+        // necessary.
+        // (By construction, expressions can only refer to other expressions
+        // that have lower IDs, so one simplification pass is sufficient.)
+        for (id, maybe_expression) in self.expressions.iter_enumerated_mut() {
+            let Some(expression) = maybe_expression else {
+                // If an expression is missing, it must have been optimized away,
+                // so any operand that refers to it can be replaced with zero.
+                zero_expressions.insert(id);
+                continue;
+            };
+
+            // If an operand refers to an expression that is always zero, then
+            // that operand can be replaced with `Operand::Zero`.
+            let maybe_set_operand_to_zero = |operand: &mut Operand| match &*operand {
+                Operand::Expression(id) if zero_expressions.contains(id) => {
+                    *operand = Operand::Zero;
+                }
+                _ => (),
+            };
+            maybe_set_operand_to_zero(&mut expression.lhs);
+            maybe_set_operand_to_zero(&mut expression.rhs);
+
+            // Coverage counter values cannot be negative, so if an expression
+            // involves subtraction from zero, assume that its RHS must also be zero.
+            // (Do this after simplifications that could set the LHS to zero.)
+            if let Expression { lhs: Operand::Zero, op: Op::Subtract, .. } = expression {
+                expression.rhs = Operand::Zero;
+            }
+
+            // After the above simplifications, if both operands are zero, then
+            // we know that this expression is always zero too.
+            if let Expression { lhs: Operand::Zero, rhs: Operand::Zero, .. } = expression {
+                zero_expressions.insert(id);
+            }
+        }
+    }
+
     /// Return the source hash, generated from the HIR node structure, and used to indicate whether
     /// or not the source code structure changed between different compilations.
     pub fn source_hash(&self) -> u64 {
@@ -146,8 +196,14 @@
             self.instance
         );
 
+        let counter_expressions = self.counter_expressions();
+        // Expression IDs are indices into `self.expressions`, and on the LLVM
+        // side they will be treated as indices into `counter_expressions`, so
+        // the two vectors should correspond 1:1.
+        assert_eq!(self.expressions.len(), counter_expressions.len());
+
         let counter_regions = self.counter_regions();
-        let (counter_expressions, expression_regions) = self.expressions_with_regions();
+        let expression_regions = self.expression_regions();
         let unreachable_regions = self.unreachable_regions();
 
         let counter_regions =
@@ -163,149 +219,53 @@
         })
     }
 
-    fn expressions_with_regions(
-        &self,
-    ) -> (Vec<CounterExpression>, impl Iterator<Item = (Counter, &CodeRegion)>) {
-        let mut counter_expressions = Vec::with_capacity(self.expressions.len());
-        let mut expression_regions = Vec::with_capacity(self.expressions.len());
-        let mut new_indexes = IndexVec::from_elem_n(None, self.expressions.len());
+    /// Convert this function's coverage expression data into a form that can be
+    /// passed through FFI to LLVM.
+    fn counter_expressions(&self) -> Vec<CounterExpression> {
+        // We know that LLVM will optimize out any unused expressions before
+        // producing the final coverage map, so there's no need to do the same
+        // thing on the Rust side unless we're confident we can do much better.
+        // (See `CounterExpressionsMinimizer` in `CoverageMappingWriter.cpp`.)
 
-        // This closure converts any `Expression` operand (`lhs` or `rhs` of the `Op::Add` or
-        // `Op::Subtract` operation) into its native `llvm::coverage::Counter::CounterKind` type
-        // and value.
-        //
-        // Expressions will be returned from this function in a sequential vector (array) of
-        // `CounterExpression`, so the expression IDs must be mapped from their original,
-        // potentially sparse set of indexes.
-        //
-        // An `Expression` as an operand will have already been encountered as an `Expression` with
-        // operands, so its new_index will already have been generated (as a 1-up index value).
-        // (If an `Expression` as an operand does not have a corresponding new_index, it was
-        // probably optimized out, after the expression was injected into the MIR, so it will
-        // get a `CounterKind::Zero` instead.)
-        //
-        // In other words, an `Expression`s at any given index can include other expressions as
-        // operands, but expression operands can only come from the subset of expressions having
-        // `expression_index`s lower than the referencing `Expression`. Therefore, it is
-        // reasonable to look up the new index of an expression operand while the `new_indexes`
-        // vector is only complete up to the current `ExpressionIndex`.
-        type NewIndexes = IndexSlice<ExpressionId, Option<MappedExpressionIndex>>;
-        let id_to_counter = |new_indexes: &NewIndexes, operand: Operand| match operand {
-            Operand::Zero => Some(Counter::zero()),
-            Operand::Counter(id) => Some(Counter::counter_value_reference(id)),
-            Operand::Expression(id) => {
-                self.expressions
-                    .get(id)
-                    .expect("expression id is out of range")
-                    .as_ref()
-                    // If an expression was optimized out, assume it would have produced a count
-                    // of zero. This ensures that expressions dependent on optimized-out
-                    // expressions are still valid.
-                    .map_or(Some(Counter::zero()), |_| new_indexes[id].map(Counter::expression))
-            }
-        };
-
-        for (original_index, expression) in
-            self.expressions.iter_enumerated().filter_map(|(original_index, entry)| {
-                // Option::map() will return None to filter out missing expressions. This may happen
-                // if, for example, a MIR-instrumented expression is removed during an optimization.
-                entry.as_ref().map(|expression| (original_index, expression))
+        self.expressions
+            .iter()
+            .map(|expression| match expression {
+                None => {
+                    // This expression ID was allocated, but we never saw the
+                    // actual expression, so it must have been optimized out.
+                    // Replace it with a dummy expression, and let LLVM take
+                    // care of omitting it from the expression list.
+                    CounterExpression::DUMMY
+                }
+                &Some(Expression { lhs, op, rhs, .. }) => {
+                    // Convert the operands and operator as normal.
+                    CounterExpression::new(
+                        Counter::from_operand(lhs),
+                        match op {
+                            Op::Add => ExprKind::Add,
+                            Op::Subtract => ExprKind::Subtract,
+                        },
+                        Counter::from_operand(rhs),
+                    )
+                }
             })
-        {
-            let optional_region = &expression.region;
-            let Expression { lhs, op, rhs, .. } = *expression;
+            .collect::<Vec<_>>()
+    }
 
-            if let Some(Some((lhs_counter, mut rhs_counter))) = id_to_counter(&new_indexes, lhs)
-                .map(|lhs_counter| {
-                    id_to_counter(&new_indexes, rhs).map(|rhs_counter| (lhs_counter, rhs_counter))
-                })
-            {
-                if lhs_counter.is_zero() && op.is_subtract() {
-                    // The left side of a subtraction was probably optimized out. As an example,
-                    // a branch condition might be evaluated as a constant expression, and the
-                    // branch could be removed, dropping unused counters in the process.
-                    //
-                    // Since counters are unsigned, we must assume the result of the expression
-                    // can be no more and no less than zero. An expression known to evaluate to zero
-                    // does not need to be added to the coverage map.
-                    //
-                    // Coverage test `loops_branches.rs` includes multiple variations of branches
-                    // based on constant conditional (literal `true` or `false`), and demonstrates
-                    // that the expected counts are still correct.
-                    debug!(
-                        "Expression subtracts from zero (assume unreachable): \
-                        original_index={:?}, lhs={:?}, op={:?}, rhs={:?}, region={:?}",
-                        original_index, lhs, op, rhs, optional_region,
-                    );
-                    rhs_counter = Counter::zero();
-                }
-                debug_assert!(
-                    lhs_counter.is_zero()
-                        // Note: with `as usize` the ID _could_ overflow/wrap if `usize = u16`
-                        || ((lhs_counter.zero_based_id() as usize)
-                            <= usize::max(self.counters.len(), self.expressions.len())),
-                    "lhs id={} > both counters.len()={} and expressions.len()={}
-                    ({:?} {:?} {:?})",
-                    lhs_counter.zero_based_id(),
-                    self.counters.len(),
-                    self.expressions.len(),
-                    lhs_counter,
-                    op,
-                    rhs_counter,
-                );
-
-                debug_assert!(
-                    rhs_counter.is_zero()
-                        // Note: with `as usize` the ID _could_ overflow/wrap if `usize = u16`
-                        || ((rhs_counter.zero_based_id() as usize)
-                            <= usize::max(self.counters.len(), self.expressions.len())),
-                    "rhs id={} > both counters.len()={} and expressions.len()={}
-                    ({:?} {:?} {:?})",
-                    rhs_counter.zero_based_id(),
-                    self.counters.len(),
-                    self.expressions.len(),
-                    lhs_counter,
-                    op,
-                    rhs_counter,
-                );
-
-                // Both operands exist. `Expression` operands exist in `self.expressions` and have
-                // been assigned a `new_index`.
-                let mapped_expression_index =
-                    MappedExpressionIndex::from(counter_expressions.len());
-                let expression = CounterExpression::new(
-                    lhs_counter,
-                    match op {
-                        Op::Add => ExprKind::Add,
-                        Op::Subtract => ExprKind::Subtract,
-                    },
-                    rhs_counter,
-                );
-                debug!(
-                    "Adding expression {:?} = {:?}, region: {:?}",
-                    mapped_expression_index, expression, optional_region
-                );
-                counter_expressions.push(expression);
-                new_indexes[original_index] = Some(mapped_expression_index);
-                if let Some(region) = optional_region {
-                    expression_regions.push((Counter::expression(mapped_expression_index), region));
-                }
-            } else {
-                bug!(
-                    "expression has one or more missing operands \
-                      original_index={:?}, lhs={:?}, op={:?}, rhs={:?}, region={:?}",
-                    original_index,
-                    lhs,
-                    op,
-                    rhs,
-                    optional_region,
-                );
-            }
-        }
-        (counter_expressions, expression_regions.into_iter())
+    fn expression_regions(&self) -> Vec<(Counter, &CodeRegion)> {
+        // Find all of the expression IDs that weren't optimized out AND have
+        // an attached code region, and return the corresponding mapping as a
+        // counter/region pair.
+        self.expressions
+            .iter_enumerated()
+            .filter_map(|(id, expression)| {
+                let code_region = expression.as_ref()?.region.as_ref()?;
+                Some((Counter::expression(id), code_region))
+            })
+            .collect::<Vec<_>>()
     }
 
     fn unreachable_regions(&self) -> impl Iterator<Item = (Counter, &CodeRegion)> {
-        self.unreachable_regions.iter().map(|region| (Counter::zero(), region))
+        self.unreachable_regions.iter().map(|region| (Counter::ZERO, region))
     }
 }
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
index 97a99e5..d4e7752 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
@@ -1,13 +1,14 @@
 use crate::common::CodegenCx;
 use crate::coverageinfo;
-use crate::coverageinfo::ffi::{Counter, CounterExpression, CounterMappingRegion};
+use crate::coverageinfo::ffi::CounterMappingRegion;
+use crate::coverageinfo::map_data::FunctionCoverage;
 use crate::llvm;
 
 use rustc_codegen_ssa::traits::ConstMethods;
 use rustc_data_structures::fx::FxIndexSet;
 use rustc_hir::def::DefKind;
 use rustc_hir::def_id::DefId;
-use rustc_llvm::RustString;
+use rustc_index::IndexVec;
 use rustc_middle::bug;
 use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
 use rustc_middle::mir::coverage::CodeRegion;
@@ -55,21 +56,21 @@
         return;
     }
 
-    let mut mapgen = CoverageMapGenerator::new(tcx);
+    let mut global_file_table = GlobalFileTable::new(tcx);
 
     // Encode coverage mappings and generate function records
     let mut function_data = Vec::new();
-    for (instance, function_coverage) in function_coverage_map {
+    for (instance, mut function_coverage) in function_coverage_map {
         debug!("Generate function coverage for {}, {:?}", cx.codegen_unit.name(), instance);
+        function_coverage.simplify_expressions();
+        let function_coverage = function_coverage;
+
         let mangled_function_name = tcx.symbol_name(instance).name;
         let source_hash = function_coverage.source_hash();
         let is_used = function_coverage.is_used();
-        let (expressions, counter_regions) =
-            function_coverage.get_expressions_and_counter_regions();
 
-        let coverage_mapping_buffer = llvm::build_byte_buffer(|coverage_mapping_buffer| {
-            mapgen.write_coverage_mapping(expressions, counter_regions, coverage_mapping_buffer);
-        });
+        let coverage_mapping_buffer =
+            encode_mappings_for_function(&mut global_file_table, &function_coverage);
 
         if coverage_mapping_buffer.is_empty() {
             if function_coverage.is_used() {
@@ -87,19 +88,14 @@
     }
 
     // Encode all filenames referenced by counters/expressions in this module
-    let filenames_buffer = llvm::build_byte_buffer(|filenames_buffer| {
-        coverageinfo::write_filenames_section_to_buffer(
-            mapgen.filenames.iter().map(Symbol::as_str),
-            filenames_buffer,
-        );
-    });
+    let filenames_buffer = global_file_table.into_filenames_buffer();
 
     let filenames_size = filenames_buffer.len();
     let filenames_val = cx.const_bytes(&filenames_buffer);
     let filenames_ref = coverageinfo::hash_bytes(&filenames_buffer);
 
     // Generate the LLVM IR representation of the coverage map and store it in a well-known global
-    let cov_data_val = mapgen.generate_coverage_map(cx, version, filenames_size, filenames_val);
+    let cov_data_val = generate_coverage_map(cx, version, filenames_size, filenames_val);
 
     let covfun_section_name = coverageinfo::covfun_section_name(cx);
     for (mangled_function_name, source_hash, is_used, coverage_mapping_buffer) in function_data {
@@ -118,13 +114,13 @@
     coverageinfo::save_cov_data_to_mod(cx, cov_data_val);
 }
 
-struct CoverageMapGenerator {
-    filenames: FxIndexSet<Symbol>,
+struct GlobalFileTable {
+    global_file_table: FxIndexSet<Symbol>,
 }
 
-impl CoverageMapGenerator {
+impl GlobalFileTable {
     fn new(tcx: TyCtxt<'_>) -> Self {
-        let mut filenames = FxIndexSet::default();
+        let mut global_file_table = FxIndexSet::default();
         // LLVM Coverage Mapping Format version 6 (zero-based encoded as 5)
         // requires setting the first filename to the compilation directory.
         // Since rustc generates coverage maps with relative paths, the
@@ -133,94 +129,114 @@
         let working_dir = Symbol::intern(
             &tcx.sess.opts.working_dir.remapped_path_if_available().to_string_lossy(),
         );
-        filenames.insert(working_dir);
-        Self { filenames }
+        global_file_table.insert(working_dir);
+        Self { global_file_table }
     }
 
-    /// Using the `expressions` and `counter_regions` collected for the current function, generate
-    /// the `mapping_regions` and `virtual_file_mapping`, and capture any new filenames. Then use
-    /// LLVM APIs to encode the `virtual_file_mapping`, `expressions`, and `mapping_regions` into
-    /// the given `coverage_mapping` byte buffer, compliant with the LLVM Coverage Mapping format.
-    fn write_coverage_mapping<'a>(
-        &mut self,
-        expressions: Vec<CounterExpression>,
-        counter_regions: impl Iterator<Item = (Counter, &'a CodeRegion)>,
-        coverage_mapping_buffer: &RustString,
-    ) {
-        let mut counter_regions = counter_regions.collect::<Vec<_>>();
-        if counter_regions.is_empty() {
-            return;
-        }
+    fn global_file_id_for_file_name(&mut self, file_name: Symbol) -> u32 {
+        let (global_file_id, _) = self.global_file_table.insert_full(file_name);
+        global_file_id as u32
+    }
 
-        let mut virtual_file_mapping = Vec::new();
-        let mut mapping_regions = Vec::new();
-        let mut current_file_name = None;
-        let mut current_file_id = 0;
+    fn into_filenames_buffer(self) -> Vec<u8> {
+        // This method takes `self` so that the caller can't accidentally
+        // modify the original file table after encoding it into a buffer.
 
-        // Convert the list of (Counter, CodeRegion) pairs to an array of `CounterMappingRegion`, sorted
-        // by filename and position. Capture any new files to compute the `CounterMappingRegion`s
-        // `file_id` (indexing files referenced by the current function), and construct the
-        // function-specific `virtual_file_mapping` from `file_id` to its index in the module's
-        // `filenames` array.
-        counter_regions.sort_unstable_by_key(|(_counter, region)| *region);
-        for (counter, region) in counter_regions {
-            let CodeRegion { file_name, start_line, start_col, end_line, end_col } = *region;
-            let same_file = current_file_name.is_some_and(|p| p == file_name);
-            if !same_file {
-                if current_file_name.is_some() {
-                    current_file_id += 1;
-                }
-                current_file_name = Some(file_name);
-                debug!("  file_id: {} = '{:?}'", current_file_id, file_name);
-                let (filenames_index, _) = self.filenames.insert_full(file_name);
-                virtual_file_mapping.push(filenames_index as u32);
-            }
-            debug!("Adding counter {:?} to map for {:?}", counter, region);
+        llvm::build_byte_buffer(|buffer| {
+            coverageinfo::write_filenames_section_to_buffer(
+                self.global_file_table.iter().map(Symbol::as_str),
+                buffer,
+            );
+        })
+    }
+}
+
+/// Using the expressions and counter regions collected for a single function,
+/// generate the variable-sized payload of its corresponding `__llvm_covfun`
+/// entry. The payload is returned as a vector of bytes.
+///
+/// Newly-encountered filenames will be added to the global file table.
+fn encode_mappings_for_function(
+    global_file_table: &mut GlobalFileTable,
+    function_coverage: &FunctionCoverage<'_>,
+) -> Vec<u8> {
+    let (expressions, counter_regions) = function_coverage.get_expressions_and_counter_regions();
+
+    let mut counter_regions = counter_regions.collect::<Vec<_>>();
+    if counter_regions.is_empty() {
+        return Vec::new();
+    }
+
+    let mut virtual_file_mapping = IndexVec::<u32, u32>::new();
+    let mut mapping_regions = Vec::with_capacity(counter_regions.len());
+
+    // Sort the list of (counter, region) mapping pairs by region, so that they
+    // can be grouped by filename. Prepare file IDs for each filename, and
+    // prepare the mapping data so that we can pass it through FFI to LLVM.
+    counter_regions.sort_by_key(|(_counter, region)| *region);
+    for counter_regions_for_file in
+        counter_regions.group_by(|(_, a), (_, b)| a.file_name == b.file_name)
+    {
+        // Look up (or allocate) the global file ID for this filename.
+        let file_name = counter_regions_for_file[0].1.file_name;
+        let global_file_id = global_file_table.global_file_id_for_file_name(file_name);
+
+        // Associate that global file ID with a local file ID for this function.
+        let local_file_id: u32 = virtual_file_mapping.push(global_file_id);
+        debug!("  file id: local {local_file_id} => global {global_file_id} = '{file_name:?}'");
+
+        // For each counter/region pair in this function+file, convert it to a
+        // form suitable for FFI.
+        for &(counter, region) in counter_regions_for_file {
+            let CodeRegion { file_name: _, start_line, start_col, end_line, end_col } = *region;
+
+            debug!("Adding counter {counter:?} to map for {region:?}");
             mapping_regions.push(CounterMappingRegion::code_region(
                 counter,
-                current_file_id,
+                local_file_id,
                 start_line,
                 start_col,
                 end_line,
                 end_col,
             ));
         }
+    }
 
-        // Encode and append the current function's coverage mapping data
+    // Encode the function's coverage mappings into a buffer.
+    llvm::build_byte_buffer(|buffer| {
         coverageinfo::write_mapping_to_buffer(
-            virtual_file_mapping,
+            virtual_file_mapping.raw,
             expressions,
             mapping_regions,
-            coverage_mapping_buffer,
+            buffer,
         );
-    }
+    })
+}
 
-    /// Construct coverage map header and the array of function records, and combine them into the
-    /// coverage map. Save the coverage map data into the LLVM IR as a static global using a
-    /// specific, well-known section and name.
-    fn generate_coverage_map<'ll>(
-        self,
-        cx: &CodegenCx<'ll, '_>,
-        version: u32,
-        filenames_size: usize,
-        filenames_val: &'ll llvm::Value,
-    ) -> &'ll llvm::Value {
-        debug!("cov map: filenames_size = {}, 0-based version = {}", filenames_size, version);
+/// Construct coverage map header and the array of function records, and combine them into the
+/// coverage map. Save the coverage map data into the LLVM IR as a static global using a
+/// specific, well-known section and name.
+fn generate_coverage_map<'ll>(
+    cx: &CodegenCx<'ll, '_>,
+    version: u32,
+    filenames_size: usize,
+    filenames_val: &'ll llvm::Value,
+) -> &'ll llvm::Value {
+    debug!("cov map: filenames_size = {}, 0-based version = {}", filenames_size, version);
 
-        // Create the coverage data header (Note, fields 0 and 2 are now always zero,
-        // as of `llvm::coverage::CovMapVersion::Version4`.)
-        let zero_was_n_records_val = cx.const_u32(0);
-        let filenames_size_val = cx.const_u32(filenames_size as u32);
-        let zero_was_coverage_size_val = cx.const_u32(0);
-        let version_val = cx.const_u32(version);
-        let cov_data_header_val = cx.const_struct(
-            &[zero_was_n_records_val, filenames_size_val, zero_was_coverage_size_val, version_val],
-            /*packed=*/ false,
-        );
+    // Create the coverage data header (Note, fields 0 and 2 are now always zero,
+    // as of `llvm::coverage::CovMapVersion::Version4`.)
+    let zero_was_n_records_val = cx.const_u32(0);
+    let filenames_size_val = cx.const_u32(filenames_size as u32);
+    let zero_was_coverage_size_val = cx.const_u32(0);
+    let version_val = cx.const_u32(version);
+    let cov_data_header_val = cx.const_struct(
+        &[zero_was_n_records_val, filenames_size_val, zero_was_coverage_size_val, version_val],
+        /*packed=*/ false,
+    );
 
-        // Create the complete LLVM coverage data value to add to the LLVM IR
-        cx.const_struct(&[cov_data_header_val, filenames_val], /*packed=*/ false)
-    }
+    // Create the complete LLVM coverage data value to add to the LLVM IR
+    cx.const_struct(&[cov_data_header_val, filenames_val], /*packed=*/ false)
 }
 
 /// Construct a function record and combine it with the function's coverage mapping data.
@@ -317,10 +333,10 @@
     {
         let codegen_fn_attrs = tcx.codegen_fn_attrs(non_codegenned_def_id);
 
-        // If a function is marked `#[no_coverage]`, then skip generating a
+        // If a function is marked `#[coverage(off)]`, then skip generating a
         // dead code stub for it.
         if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NO_COVERAGE) {
-            debug!("skipping unused fn marked #[no_coverage]: {:?}", non_codegenned_def_id);
+            debug!("skipping unused fn marked #[coverage(off)]: {:?}", non_codegenned_def_id);
             continue;
         }
 
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
index 621fd36..c70cb67 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
@@ -16,7 +16,7 @@
 use rustc_hir::def_id::DefId;
 use rustc_llvm::RustString;
 use rustc_middle::bug;
-use rustc_middle::mir::coverage::{CodeRegion, CounterId, CoverageKind, ExpressionId, Op, Operand};
+use rustc_middle::mir::coverage::{CounterId, CoverageKind};
 use rustc_middle::mir::Coverage;
 use rustc_middle::ty;
 use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt};
@@ -104,144 +104,67 @@
     fn add_coverage(&mut self, instance: Instance<'tcx>, coverage: &Coverage) {
         let bx = self;
 
+        let Some(coverage_context) = bx.coverage_context() else { return };
+        let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
+        let func_coverage = coverage_map
+            .entry(instance)
+            .or_insert_with(|| FunctionCoverage::new(bx.tcx(), instance));
+
         let Coverage { kind, code_region } = coverage.clone();
         match kind {
             CoverageKind::Counter { function_source_hash, id } => {
-                if bx.set_function_source_hash(instance, function_source_hash) {
-                    // If `set_function_source_hash()` returned true, the coverage map is enabled,
-                    // so continue adding the counter.
-                    if let Some(code_region) = code_region {
-                        // Note: Some counters do not have code regions, but may still be referenced
-                        // from expressions. In that case, don't add the counter to the coverage map,
-                        // but do inject the counter intrinsic.
-                        bx.add_coverage_counter(instance, id, code_region);
-                    }
+                debug!(
+                    "ensuring function source hash is set for instance={:?}; function_source_hash={}",
+                    instance, function_source_hash,
+                );
+                func_coverage.set_function_source_hash(function_source_hash);
 
-                    let coverageinfo = bx.tcx().coverageinfo(instance.def);
-
-                    let fn_name = bx.get_pgo_func_name_var(instance);
-                    let hash = bx.const_u64(function_source_hash);
-                    let num_counters = bx.const_u32(coverageinfo.num_counters);
-                    let index = bx.const_u32(id.as_u32());
+                if let Some(code_region) = code_region {
+                    // Note: Some counters do not have code regions, but may still be referenced
+                    // from expressions. In that case, don't add the counter to the coverage map,
+                    // but do inject the counter intrinsic.
                     debug!(
-                        "codegen intrinsic instrprof.increment(fn_name={:?}, hash={:?}, num_counters={:?}, index={:?})",
-                        fn_name, hash, num_counters, index,
+                        "adding counter to coverage_map: instance={:?}, id={:?}, region={:?}",
+                        instance, id, code_region,
                     );
-                    bx.instrprof_increment(fn_name, hash, num_counters, index);
+                    func_coverage.add_counter(id, code_region);
                 }
+                // We need to explicitly drop the `RefMut` before calling into `instrprof_increment`,
+                // as that needs an exclusive borrow.
+                drop(coverage_map);
+
+                let coverageinfo = bx.tcx().coverageinfo(instance.def);
+
+                let fn_name = bx.get_pgo_func_name_var(instance);
+                let hash = bx.const_u64(function_source_hash);
+                let num_counters = bx.const_u32(coverageinfo.num_counters);
+                let index = bx.const_u32(id.as_u32());
+                debug!(
+                    "codegen intrinsic instrprof.increment(fn_name={:?}, hash={:?}, num_counters={:?}, index={:?})",
+                    fn_name, hash, num_counters, index,
+                );
+                bx.instrprof_increment(fn_name, hash, num_counters, index);
             }
             CoverageKind::Expression { id, lhs, op, rhs } => {
-                bx.add_coverage_counter_expression(instance, id, lhs, op, rhs, code_region);
+                debug!(
+                    "adding counter expression to coverage_map: instance={:?}, id={:?}, {:?} {:?} {:?}; region: {:?}",
+                    instance, id, lhs, op, rhs, code_region,
+                );
+                func_coverage.add_counter_expression(id, lhs, op, rhs, code_region);
             }
             CoverageKind::Unreachable => {
-                bx.add_coverage_unreachable(
-                    instance,
-                    code_region.expect("unreachable regions always have code regions"),
+                let code_region =
+                    code_region.expect("unreachable regions always have code regions");
+                debug!(
+                    "adding unreachable code to coverage_map: instance={:?}, at {:?}",
+                    instance, code_region,
                 );
+                func_coverage.add_unreachable_region(code_region);
             }
         }
     }
 }
 
-// These methods used to be part of trait `CoverageInfoBuilderMethods`, but
-// after moving most coverage code out of SSA they are now just ordinary methods.
-impl<'tcx> Builder<'_, '_, 'tcx> {
-    /// Returns true if the function source hash was added to the coverage map (even if it had
-    /// already been added, for this instance). Returns false *only* if `-C instrument-coverage` is
-    /// not enabled (a coverage map is not being generated).
-    fn set_function_source_hash(
-        &mut self,
-        instance: Instance<'tcx>,
-        function_source_hash: u64,
-    ) -> bool {
-        if let Some(coverage_context) = self.coverage_context() {
-            debug!(
-                "ensuring function source hash is set for instance={:?}; function_source_hash={}",
-                instance, function_source_hash,
-            );
-            let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
-            coverage_map
-                .entry(instance)
-                .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
-                .set_function_source_hash(function_source_hash);
-            true
-        } else {
-            false
-        }
-    }
-
-    /// Returns true if the counter was added to the coverage map; false if `-C instrument-coverage`
-    /// is not enabled (a coverage map is not being generated).
-    fn add_coverage_counter(
-        &mut self,
-        instance: Instance<'tcx>,
-        id: CounterId,
-        region: CodeRegion,
-    ) -> bool {
-        if let Some(coverage_context) = self.coverage_context() {
-            debug!(
-                "adding counter to coverage_map: instance={:?}, id={:?}, region={:?}",
-                instance, id, region,
-            );
-            let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
-            coverage_map
-                .entry(instance)
-                .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
-                .add_counter(id, region);
-            true
-        } else {
-            false
-        }
-    }
-
-    /// Returns true if the expression was added to the coverage map; false if
-    /// `-C instrument-coverage` is not enabled (a coverage map is not being generated).
-    fn add_coverage_counter_expression(
-        &mut self,
-        instance: Instance<'tcx>,
-        id: ExpressionId,
-        lhs: Operand,
-        op: Op,
-        rhs: Operand,
-        region: Option<CodeRegion>,
-    ) -> bool {
-        if let Some(coverage_context) = self.coverage_context() {
-            debug!(
-                "adding counter expression to coverage_map: instance={:?}, id={:?}, {:?} {:?} {:?}; \
-                region: {:?}",
-                instance, id, lhs, op, rhs, region,
-            );
-            let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
-            coverage_map
-                .entry(instance)
-                .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
-                .add_counter_expression(id, lhs, op, rhs, region);
-            true
-        } else {
-            false
-        }
-    }
-
-    /// Returns true if the region was added to the coverage map; false if `-C instrument-coverage`
-    /// is not enabled (a coverage map is not being generated).
-    fn add_coverage_unreachable(&mut self, instance: Instance<'tcx>, region: CodeRegion) -> bool {
-        if let Some(coverage_context) = self.coverage_context() {
-            debug!(
-                "adding unreachable code to coverage_map: instance={:?}, at {:?}",
-                instance, region,
-            );
-            let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
-            coverage_map
-                .entry(instance)
-                .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
-                .add_unreachable_region(region);
-            true
-        } else {
-            false
-        }
-    }
-}
-
 fn declare_unused_fn<'tcx>(cx: &CodegenCx<'_, 'tcx>, def_id: DefId) -> Instance<'tcx> {
     let tcx = cx.tcx;
 
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs b/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs
index d174a35..aff764f 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs
@@ -20,7 +20,7 @@
     cx: &CodegenCx<'ll, 'tcx>,
     instance: Instance<'tcx>,
     mir: &Body<'tcx>,
-    debug_context: &mut FunctionDebugContext<&'ll DIScope, &'ll DILocation>,
+    debug_context: &mut FunctionDebugContext<'tcx, &'ll DIScope, &'ll DILocation>,
 ) {
     // Find all scopes with variables defined in them.
     let variables = if cx.sess().opts.debuginfo == DebugInfo::Full {
@@ -51,7 +51,7 @@
     instance: Instance<'tcx>,
     mir: &Body<'tcx>,
     variables: &Option<BitSet<SourceScope>>,
-    debug_context: &mut FunctionDebugContext<&'ll DIScope, &'ll DILocation>,
+    debug_context: &mut FunctionDebugContext<'tcx, &'ll DIScope, &'ll DILocation>,
     instantiated: &mut BitSet<SourceScope>,
     scope: SourceScope,
 ) {
@@ -68,7 +68,7 @@
         let file = cx.sess().source_map().lookup_source_file(mir.span.lo());
         debug_context.scopes[scope] = DebugScope {
             file_start_pos: file.start_pos,
-            file_end_pos: file.end_pos,
+            file_end_pos: file.end_position(),
             ..debug_context.scopes[scope]
         };
         instantiated.insert(scope);
@@ -86,27 +86,31 @@
     let loc = cx.lookup_debug_loc(scope_data.span.lo());
     let file_metadata = file_metadata(cx, &loc.file);
 
-    let dbg_scope = match scope_data.inlined {
+    let parent_dbg_scope = match scope_data.inlined {
         Some((callee, _)) => {
             // FIXME(eddyb) this would be `self.monomorphize(&callee)`
             // if this is moved to `rustc_codegen_ssa::mir::debuginfo`.
-            let callee = cx.tcx.subst_and_normalize_erasing_regions(
+            let callee = cx.tcx.instantiate_and_normalize_erasing_regions(
                 instance.args,
                 ty::ParamEnv::reveal_all(),
                 ty::EarlyBinder::bind(callee),
             );
-            let callee_fn_abi = cx.fn_abi_of_instance(callee, ty::List::empty());
-            cx.dbg_scope_fn(callee, callee_fn_abi, None)
+            debug_context.inlined_function_scopes.entry(callee).or_insert_with(|| {
+                let callee_fn_abi = cx.fn_abi_of_instance(callee, ty::List::empty());
+                cx.dbg_scope_fn(callee, callee_fn_abi, None)
+            })
         }
-        None => unsafe {
-            llvm::LLVMRustDIBuilderCreateLexicalBlock(
-                DIB(cx),
-                parent_scope.dbg_scope,
-                file_metadata,
-                loc.line,
-                loc.col,
-            )
-        },
+        None => parent_scope.dbg_scope,
+    };
+
+    let dbg_scope = unsafe {
+        llvm::LLVMRustDIBuilderCreateLexicalBlock(
+            DIB(cx),
+            parent_dbg_scope,
+            file_metadata,
+            loc.line,
+            loc.col,
+        )
     };
 
     let inlined_at = scope_data.inlined.map(|(_, callsite_span)| {
@@ -120,7 +124,7 @@
         dbg_scope,
         inlined_at: inlined_at.or(parent_scope.inlined_at),
         file_start_pos: loc.file.start_pos,
-        file_end_pos: loc.file.end_pos,
+        file_end_pos: loc.file.end_position(),
     };
     instantiated.insert(scope);
 }
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
index f8cbcbd..ed93876 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
@@ -445,9 +445,9 @@
         ty::RawPtr(ty::TypeAndMut { ty: pointee_type, .. }) | ty::Ref(_, pointee_type, _) => {
             build_pointer_or_reference_di_node(cx, t, pointee_type, unique_type_id)
         }
-        // Box<T, A> may have a non-ZST allocator A. In that case, we
+        // Box<T, A> may have a non-1-ZST allocator A. In that case, we
         // cannot treat Box<T, A> as just an owned alias of `*mut T`.
-        ty::Adt(def, args) if def.is_box() && cx.layout_of(args.type_at(1)).is_zst() => {
+        ty::Adt(def, args) if def.is_box() && cx.layout_of(args.type_at(1)).is_1zst() => {
             build_pointer_or_reference_di_node(cx, t, t.boxed_ty(), unique_type_id)
         }
         ty::FnDef(..) | ty::FnPtr(_) => build_subroutine_type_di_node(cx, unique_type_id),
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
index 40714a0..30cc9ea 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
@@ -263,11 +263,11 @@
     pub fn lookup_debug_loc(&self, pos: BytePos) -> DebugLoc {
         let (file, line, col) = match self.sess().source_map().lookup_line(pos) {
             Ok(SourceFileAndLine { sf: file, line }) => {
-                let line_pos = file.lines(|lines| lines[line]);
+                let line_pos = file.lines()[line];
 
                 // Use 1-based indexing.
                 let line = (line + 1) as u32;
-                let col = (pos - line_pos).to_u32() + 1;
+                let col = (file.relative_position(pos) - line_pos).to_u32() + 1;
 
                 (file, line, col)
             }
@@ -292,7 +292,7 @@
         fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
         llfn: &'ll Value,
         mir: &mir::Body<'tcx>,
-    ) -> Option<FunctionDebugContext<&'ll DIScope, &'ll DILocation>> {
+    ) -> Option<FunctionDebugContext<'tcx, &'ll DIScope, &'ll DILocation>> {
         if self.sess().opts.debuginfo == DebugInfo::None {
             return None;
         }
@@ -304,8 +304,10 @@
             file_start_pos: BytePos(0),
             file_end_pos: BytePos(0),
         };
-        let mut fn_debug_context =
-            FunctionDebugContext { scopes: IndexVec::from_elem(empty_scope, &mir.source_scopes) };
+        let mut fn_debug_context = FunctionDebugContext {
+            scopes: IndexVec::from_elem(empty_scope, &mir.source_scopes),
+            inlined_function_scopes: Default::default(),
+        };
 
         // Fill in all the scopes, with the information from the MIR body.
         compute_mir_scopes(self, instance, mir, &mut fn_debug_context);
@@ -347,6 +349,7 @@
         type_names::push_generic_params(
             tcx,
             tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), args),
+            enclosing_fn_def_id,
             &mut name,
         );
 
@@ -526,7 +529,7 @@
             if let Some(impl_def_id) = cx.tcx.impl_of_method(instance.def_id()) {
                 // If the method does *not* belong to a trait, proceed
                 if cx.tcx.trait_id_of_impl(impl_def_id).is_none() {
-                    let impl_self_ty = cx.tcx.subst_and_normalize_erasing_regions(
+                    let impl_self_ty = cx.tcx.instantiate_and_normalize_erasing_regions(
                         instance.args,
                         ty::ParamEnv::reveal_all(),
                         cx.tcx.type_of(impl_def_id),
diff --git a/compiler/rustc_codegen_llvm/src/errors.rs b/compiler/rustc_codegen_llvm/src/errors.rs
index fced6d5..665d195 100644
--- a/compiler/rustc_codegen_llvm/src/errors.rs
+++ b/compiler/rustc_codegen_llvm/src/errors.rs
@@ -139,6 +139,10 @@
 pub(crate) struct LtoDylib;
 
 #[derive(Diagnostic)]
+#[diag(codegen_llvm_lto_proc_macro)]
+pub(crate) struct LtoProcMacro;
+
+#[derive(Diagnostic)]
 #[diag(codegen_llvm_lto_bitcode_from_rlib)]
 pub(crate) struct LtoBitcodeFromRlib {
     pub llvm_err: String,
@@ -226,3 +230,9 @@
 pub(crate) struct CopyBitcode {
     pub err: std::io::Error,
 }
+
+#[derive(Diagnostic)]
+#[diag(codegen_llvm_unknown_debuginfo_compression)]
+pub struct UnknownCompression {
+    pub algorithm: &'static str,
+}
diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs
index a9b0603..a97b803 100644
--- a/compiler/rustc_codegen_llvm/src/intrinsic.rs
+++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs
@@ -15,7 +15,7 @@
 use rustc_codegen_ssa::traits::*;
 use rustc_hir as hir;
 use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt, LayoutOf};
-use rustc_middle::ty::{self, Ty};
+use rustc_middle::ty::{self, GenericArgsRef, Ty};
 use rustc_middle::{bug, span_bug};
 use rustc_span::{sym, symbol::kw, Span, Symbol};
 use rustc_target::abi::{self, Align, HasDataLayout, Primitive};
@@ -165,7 +165,7 @@
             sym::volatile_load | sym::unaligned_volatile_load => {
                 let tp_ty = fn_args.type_at(0);
                 let ptr = args[0].immediate();
-                let load = if let PassMode::Cast(ty, _) = &fn_abi.ret.mode {
+                let load = if let PassMode::Cast { cast: ty, pad_i32: _ } = &fn_abi.ret.mode {
                     let llty = ty.llvm_type(self);
                     self.volatile_load(llty, ptr)
                 } else {
@@ -376,7 +376,9 @@
             }
 
             _ if name.as_str().starts_with("simd_") => {
-                match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, span) {
+                match generic_simd_intrinsic(
+                    self, name, callee_ty, fn_args, args, ret_ty, llret_ty, span,
+                ) {
                     Ok(llval) => llval,
                     Err(()) => return,
                 }
@@ -386,7 +388,7 @@
         };
 
         if !fn_abi.ret.is_ignore() {
-            if let PassMode::Cast(_, _) = &fn_abi.ret.mode {
+            if let PassMode::Cast { .. } = &fn_abi.ret.mode {
                 self.store(llval, result.llval, result.align);
             } else {
                 OperandRef::from_immediate_or_packed_pair(self, llval, result.layout)
@@ -911,6 +913,7 @@
     bx: &mut Builder<'_, 'll, 'tcx>,
     name: Symbol,
     callee_ty: Ty<'tcx>,
+    fn_args: GenericArgsRef<'tcx>,
     args: &[OperandRef<'tcx, &'ll Value>],
     ret_ty: Ty<'tcx>,
     llret_ty: &'ll Type,
@@ -1030,6 +1033,56 @@
         ));
     }
 
+    if name == sym::simd_shuffle_generic {
+        let idx = fn_args[2]
+            .expect_const()
+            .eval(tcx, ty::ParamEnv::reveal_all(), Some(span))
+            .unwrap()
+            .unwrap_branch();
+        let n = idx.len() as u64;
+
+        require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty });
+        let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx());
+        require!(
+            out_len == n,
+            InvalidMonomorphization::ReturnLength { span, name, in_len: n, ret_ty, out_len }
+        );
+        require!(
+            in_elem == out_ty,
+            InvalidMonomorphization::ReturnElement { span, name, in_elem, in_ty, ret_ty, out_ty }
+        );
+
+        let total_len = in_len * 2;
+
+        let indices: Option<Vec<_>> = idx
+            .iter()
+            .enumerate()
+            .map(|(arg_idx, val)| {
+                let idx = val.unwrap_leaf().try_to_i32().unwrap();
+                if idx >= i32::try_from(total_len).unwrap() {
+                    bx.sess().emit_err(InvalidMonomorphization::ShuffleIndexOutOfBounds {
+                        span,
+                        name,
+                        arg_idx: arg_idx as u64,
+                        total_len: total_len.into(),
+                    });
+                    None
+                } else {
+                    Some(bx.const_i32(idx))
+                }
+            })
+            .collect();
+        let Some(indices) = indices else {
+            return Ok(bx.const_null(llret_ty));
+        };
+
+        return Ok(bx.shuffle_vector(
+            args[0].immediate(),
+            args[1].immediate(),
+            bx.const_vector(&indices),
+        ));
+    }
+
     if name == sym::simd_shuffle {
         // Make sure this is actually an array, since typeck only checks the length-suffixed
         // version of this intrinsic.
diff --git a/compiler/rustc_codegen_llvm/src/lib.rs b/compiler/rustc_codegen_llvm/src/lib.rs
index d283299..9c5edd6 100644
--- a/compiler/rustc_codegen_llvm/src/lib.rs
+++ b/compiler/rustc_codegen_llvm/src/lib.rs
@@ -10,6 +10,7 @@
 #![feature(iter_intersperse)]
 #![feature(let_chains)]
 #![feature(never_type)]
+#![feature(slice_group_by)]
 #![feature(impl_trait_in_assoc_type)]
 #![recursion_limit = "256"]
 #![allow(rustc::potential_query_instability)]
@@ -21,6 +22,7 @@
 #[macro_use]
 extern crate tracing;
 
+use back::owned_target_machine::OwnedTargetMachine;
 use back::write::{create_informational_target_machine, create_target_machine};
 
 use errors::ParseTargetMachineConfig;
@@ -38,8 +40,8 @@
 use rustc_fluent_macro::fluent_messages;
 use rustc_metadata::EncodedMetadata;
 use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
-use rustc_middle::query::Providers;
 use rustc_middle::ty::TyCtxt;
+use rustc_middle::util::Providers;
 use rustc_session::config::{OptLevel, OutputFilenames, PrintKind, PrintRequest};
 use rustc_session::Session;
 use rustc_span::symbol::Symbol;
@@ -47,10 +49,12 @@
 use std::any::Any;
 use std::ffi::CStr;
 use std::io::Write;
+use std::mem::ManuallyDrop;
 
 mod back {
     pub mod archive;
     pub mod lto;
+    pub mod owned_target_machine;
     mod profiling;
     pub mod write;
 }
@@ -161,7 +165,7 @@
 impl WriteBackendMethods for LlvmCodegenBackend {
     type Module = ModuleLlvm;
     type ModuleBuffer = back::lto::ModuleBuffer;
-    type TargetMachine = &'static mut llvm::TargetMachine;
+    type TargetMachine = OwnedTargetMachine;
     type TargetMachineError = crate::errors::LlvmError<'static>;
     type ThinData = back::lto::ThinData;
     type ThinBuffer = back::lto::ThinBuffer;
@@ -400,7 +404,10 @@
 pub struct ModuleLlvm {
     llcx: &'static mut llvm::Context,
     llmod_raw: *const llvm::Module,
-    tm: &'static mut llvm::TargetMachine,
+
+    // This field is `ManuallyDrop` because it is important that the `TargetMachine`
+    // is disposed prior to the `Context` being disposed otherwise UAFs can occur.
+    tm: ManuallyDrop<OwnedTargetMachine>,
 }
 
 unsafe impl Send for ModuleLlvm {}
@@ -411,7 +418,11 @@
         unsafe {
             let llcx = llvm::LLVMRustContextCreate(tcx.sess.fewer_names());
             let llmod_raw = context::create_module(tcx, llcx, mod_name) as *const _;
-            ModuleLlvm { llmod_raw, llcx, tm: create_target_machine(tcx, mod_name) }
+            ModuleLlvm {
+                llmod_raw,
+                llcx,
+                tm: ManuallyDrop::new(create_target_machine(tcx, mod_name)),
+            }
         }
     }
 
@@ -419,7 +430,11 @@
         unsafe {
             let llcx = llvm::LLVMRustContextCreate(tcx.sess.fewer_names());
             let llmod_raw = context::create_module(tcx, llcx, mod_name) as *const _;
-            ModuleLlvm { llmod_raw, llcx, tm: create_informational_target_machine(tcx.sess) }
+            ModuleLlvm {
+                llmod_raw,
+                llcx,
+                tm: ManuallyDrop::new(create_informational_target_machine(tcx.sess)),
+            }
         }
     }
 
@@ -440,7 +455,7 @@
                 }
             };
 
-            Ok(ModuleLlvm { llmod_raw, llcx, tm })
+            Ok(ModuleLlvm { llmod_raw, llcx, tm: ManuallyDrop::new(tm) })
         }
     }
 
@@ -452,7 +467,7 @@
 impl Drop for ModuleLlvm {
     fn drop(&mut self) {
         unsafe {
-            llvm::LLVMRustDisposeTargetMachine(&mut *(self.tm as *mut _));
+            ManuallyDrop::drop(&mut self.tm);
             llvm::LLVMContextDispose(&mut *(self.llcx as *mut _));
         }
     }
diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
index 84157d1..a038b3a 100644
--- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
@@ -83,12 +83,17 @@
 // Consts for the LLVM CallConv type, pre-cast to usize.
 
 /// LLVM CallingConv::ID. Should we wrap this?
+///
+/// See <https://github.com/llvm/llvm-project/blob/main/llvm/include/llvm/IR/CallingConv.h>
 #[derive(Copy, Clone, PartialEq, Debug)]
 #[repr(C)]
 pub enum CallConv {
     CCallConv = 0,
     FastCallConv = 8,
     ColdCallConv = 9,
+    PreserveMost = 14,
+    PreserveAll = 15,
+    Tail = 18,
     X86StdcallCallConv = 64,
     X86FastcallCallConv = 65,
     ArmAapcsCallConv = 67,
@@ -2107,6 +2112,8 @@
     );
 
     pub fn LLVMRustGetHostCPUName(len: *mut usize) -> *const c_char;
+
+    // This function makes copies of pointed to data, so the data's lifetime may end after this function returns
     pub fn LLVMRustCreateTargetMachine(
         Triple: *const c_char,
         CPU: *const c_char,
@@ -2126,9 +2133,14 @@
         RelaxELFRelocations: bool,
         UseInitArray: bool,
         SplitDwarfFile: *const c_char,
+        OutputObjFile: *const c_char,
+        DebugInfoCompression: *const c_char,
         ForceEmulatedTls: bool,
-    ) -> Option<&'static mut TargetMachine>;
-    pub fn LLVMRustDisposeTargetMachine(T: &'static mut TargetMachine);
+        ArgsCstrBuff: *const c_char,
+        ArgsCstrBuffLen: usize,
+    ) -> *mut TargetMachine;
+
+    pub fn LLVMRustDisposeTargetMachine(T: *mut TargetMachine);
     pub fn LLVMRustAddLibraryInfo<'a>(
         PM: &PassManager<'a>,
         M: &'a Module,
@@ -2314,6 +2326,12 @@
         len: usize,
         out_len: &mut usize,
     ) -> *const u8;
+    pub fn LLVMRustGetSliceFromObjectDataByName(
+        data: *const u8,
+        len: usize,
+        name: *const u8,
+        out_len: &mut usize,
+    ) -> *const u8;
 
     pub fn LLVMRustLinkerNew(M: &Module) -> &mut Linker<'_>;
     pub fn LLVMRustLinkerAdd(
@@ -2352,6 +2370,10 @@
 
     pub fn LLVMRustIsBitcode(ptr: *const u8, len: usize) -> bool;
 
+    pub fn LLVMRustLLVMHasZlibCompressionForDebugSymbols() -> bool;
+
+    pub fn LLVMRustLLVMHasZstdCompressionForDebugSymbols() -> bool;
+
     pub fn LLVMRustGetSymbols(
         buf_ptr: *const u8,
         buf_len: usize,
diff --git a/compiler/rustc_codegen_llvm/src/llvm_util.rs b/compiler/rustc_codegen_llvm/src/llvm_util.rs
index a76c9c9..7c8ef67 100644
--- a/compiler/rustc_codegen_llvm/src/llvm_util.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm_util.rs
@@ -303,7 +303,7 @@
             // check that all features in a given smallvec are enabled
             for llvm_feature in to_llvm_features(sess, feature) {
                 let cstr = SmallCStr::new(llvm_feature);
-                if !unsafe { llvm::LLVMRustHasFeature(target_machine, cstr.as_ptr()) } {
+                if !unsafe { llvm::LLVMRustHasFeature(&target_machine, cstr.as_ptr()) } {
                     return false;
                 }
             }
@@ -422,14 +422,14 @@
             }
             unsafe {
                 llvm::LLVMRustPrintTargetCPUs(
-                    tm,
+                    &tm,
                     cpu_cstring.as_ptr(),
                     callback,
                     &mut out as *mut &mut dyn PrintBackendInfo as *mut c_void,
                 );
             }
         }
-        PrintKind::TargetFeatures => print_target_features(out, sess, tm),
+        PrintKind::TargetFeatures => print_target_features(out, sess, &tm),
         _ => bug!("rustc_codegen_llvm can't handle print request: {:?}", req),
     }
 }
diff --git a/compiler/rustc_codegen_llvm/src/type_of.rs b/compiler/rustc_codegen_llvm/src/type_of.rs
index 8316455..dcc62d3 100644
--- a/compiler/rustc_codegen_llvm/src/type_of.rs
+++ b/compiler/rustc_codegen_llvm/src/type_of.rs
@@ -3,7 +3,7 @@
 use crate::type_::Type;
 use rustc_codegen_ssa::traits::*;
 use rustc_middle::bug;
-use rustc_middle::ty::layout::{FnAbiOf, LayoutOf, TyAndLayout};
+use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
 use rustc_middle::ty::print::{with_no_trimmed_paths, with_no_visible_paths};
 use rustc_middle::ty::{self, Ty, TypeVisitableExt};
 use rustc_target::abi::HasDataLayout;
@@ -215,20 +215,16 @@
     /// of that field's type - this is useful for taking the address of
     /// that field and ensuring the struct has the right alignment.
     fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
+        // This must produce the same result for `repr(transparent)` wrappers as for the inner type!
+        // In other words, this should generally not look at the type at all, but only at the
+        // layout.
         if let Abi::Scalar(scalar) = self.abi {
             // Use a different cache for scalars because pointers to DSTs
             // can be either fat or thin (data pointers of fat pointers).
             if let Some(&llty) = cx.scalar_lltypes.borrow().get(&self.ty) {
                 return llty;
             }
-            let llty = match *self.ty.kind() {
-                ty::Ref(..) | ty::RawPtr(_) => cx.type_ptr(),
-                ty::Adt(def, _) if def.is_box() => cx.type_ptr(),
-                ty::FnPtr(sig) => {
-                    cx.fn_ptr_backend_type(cx.fn_abi_of_fn_ptr(sig, ty::List::empty()))
-                }
-                _ => self.scalar_llvm_type_at(cx, scalar),
-            };
+            let llty = self.scalar_llvm_type_at(cx, scalar);
             cx.scalar_lltypes.borrow_mut().insert(self.ty, llty);
             return llty;
         }
@@ -303,27 +299,9 @@
         index: usize,
         immediate: bool,
     ) -> &'a Type {
-        // HACK(eddyb) special-case fat pointers until LLVM removes
-        // pointee types, to avoid bitcasting every `OperandRef::deref`.
-        match *self.ty.kind() {
-            ty::Ref(..) | ty::RawPtr(_) => {
-                return self.field(cx, index).llvm_type(cx);
-            }
-            // only wide pointer boxes are handled as pointers
-            // thin pointer boxes with scalar allocators are handled by the general logic below
-            ty::Adt(def, args) if def.is_box() && cx.layout_of(args.type_at(1)).is_zst() => {
-                let ptr_ty = Ty::new_mut_ptr(cx.tcx, self.ty.boxed_ty());
-                return cx.layout_of(ptr_ty).scalar_pair_element_llvm_type(cx, index, immediate);
-            }
-            // `dyn* Trait` has the same ABI as `*mut dyn Trait`
-            ty::Dynamic(bounds, region, ty::DynStar) => {
-                let ptr_ty =
-                    Ty::new_mut_ptr(cx.tcx, Ty::new_dynamic(cx.tcx, bounds, region, ty::Dyn));
-                return cx.layout_of(ptr_ty).scalar_pair_element_llvm_type(cx, index, immediate);
-            }
-            _ => {}
-        }
-
+        // This must produce the same result for `repr(transparent)` wrappers as for the inner type!
+        // In other words, this should generally not look at the type at all, but only at the
+        // layout.
         let Abi::ScalarPair(a, b) = self.abi else {
             bug!("TyAndLayout::scalar_pair_element_llty({:?}): not applicable", self);
         };
diff --git a/compiler/rustc_codegen_ssa/messages.ftl b/compiler/rustc_codegen_ssa/messages.ftl
index b6c70c6..6f7d748 100644
--- a/compiler/rustc_codegen_ssa/messages.ftl
+++ b/compiler/rustc_codegen_ssa/messages.ftl
@@ -19,10 +19,10 @@
 
 codegen_ssa_create_temp_dir = couldn't create a temp dir: {$error}
 
-codegen_ssa_erroneous_constant = erroneous constant encountered
-
 codegen_ssa_error_creating_remark_dir = failed to create remark directory: {$error}
 
+codegen_ssa_expected_coverage_symbol = expected `coverage(off)` or `coverage(on)`
+
 codegen_ssa_expected_used_symbol = expected `used`, `used(compiler)` or `used(linker)`
 
 codegen_ssa_extern_funcs_not_found = some `extern` functions couldn't be found; some native libraries may need to be installed or have their path specified
@@ -35,6 +35,8 @@
 codegen_ssa_extract_bundled_libs_read_entry = failed to read entry '{$rlib}': {$error}
 codegen_ssa_extract_bundled_libs_write_file = failed to write file '{$rlib}': {$error}
 
+codegen_ssa_failed_to_get_layout = failed to get layout for {$ty}: {$err}
+
 codegen_ssa_failed_to_write = failed to write {$path}: {$error}
 
 codegen_ssa_ignoring_emit_path = ignoring emit path because multiple .{$extension} files were produced
@@ -44,8 +46,6 @@
 codegen_ssa_illegal_link_ordinal_format = illegal ordinal format in `link_ordinal`
     .note = an unsuffixed integer value, e.g., `1`, is expected
 
-codegen_ssa_incompatible_linking_modifiers = link modifiers combination `+bundle,+whole-archive` is unstable when generating rlibs
-
 codegen_ssa_insufficient_vs_code_product = VS Code is a different product, and is not sufficient.
 
 codegen_ssa_invalid_link_ordinal_nargs = incorrect number of arguments to `#[link_ordinal]`
@@ -170,8 +170,6 @@
 
 codegen_ssa_option_gcc_only = option `-Z gcc-ld` is used even though linker flavor is not gcc
 
-codegen_ssa_polymorphic_constant_too_generic = codegen encountered polymorphic constant: TooGeneric
-
 codegen_ssa_processing_dymutil_failed = processing debug info with `dsymutil` failed: {$status}
     .note = {$output}
 
diff --git a/compiler/rustc_codegen_ssa/src/back/link.rs b/compiler/rustc_codegen_ssa/src/back/link.rs
index a7ac728..c4a0f62 100644
--- a/compiler/rustc_codegen_ssa/src/back/link.rs
+++ b/compiler/rustc_codegen_ssa/src/back/link.rs
@@ -365,15 +365,9 @@
     // loaded from the libraries found here and then encode that into the
     // metadata of the rlib we're generating somehow.
     for lib in codegen_results.crate_info.used_libraries.iter() {
-        let NativeLibKind::Static { bundle: None | Some(true), whole_archive } = lib.kind else {
+        let NativeLibKind::Static { bundle: None | Some(true), .. } = lib.kind else {
             continue;
         };
-        if whole_archive == Some(true)
-            && flavor == RlibFlavor::Normal
-            && !codegen_results.crate_info.feature_packed_bundled_libs
-        {
-            sess.emit_err(errors::IncompatibleLinkingModifiers);
-        }
         if flavor == RlibFlavor::Normal && let Some(filename) = lib.filename {
             let path = find_native_static_library(filename.as_str(), true, &lib_search_paths, sess);
             let src = read(path).map_err(|e| sess.emit_fatal(errors::ReadFileError {message: e }))?;
diff --git a/compiler/rustc_codegen_ssa/src/back/metadata.rs b/compiler/rustc_codegen_ssa/src/back/metadata.rs
index 4c85474..c6f4bd3 100644
--- a/compiler/rustc_codegen_ssa/src/back/metadata.rs
+++ b/compiler/rustc_codegen_ssa/src/back/metadata.rs
@@ -226,9 +226,7 @@
 
     let mut file = write::Object::new(binary_format, architecture, endianness);
     if sess.target.is_like_osx {
-        if let Some(build_version) = macho_object_build_version_for_target(&sess.target) {
-            file.set_macho_build_version(build_version)
-        }
+        file.set_macho_build_version(macho_object_build_version_for_target(&sess.target))
     }
     let e_flags = match architecture {
         Architecture::Mips => {
@@ -334,31 +332,28 @@
     Some(file)
 }
 
-/// Apple's LD, when linking for Mac Catalyst, requires object files to
-/// contain information about what they were built for (LC_BUILD_VERSION):
-/// the platform (macOS/watchOS etc), minimum OS version, and SDK version.
-/// This returns a `MachOBuildVersion` if necessary for the target.
-fn macho_object_build_version_for_target(
-    target: &Target,
-) -> Option<object::write::MachOBuildVersion> {
-    if !target.llvm_target.ends_with("-macabi") {
-        return None;
-    }
+/// Since Xcode 15 Apple's LD requires object files to contain information about what they were
+/// built for (LC_BUILD_VERSION): the platform (macOS/watchOS etc), minimum OS version, and SDK
+/// version. This returns a `MachOBuildVersion` for the target.
+fn macho_object_build_version_for_target(target: &Target) -> object::write::MachOBuildVersion {
     /// The `object` crate demands "X.Y.Z encoded in nibbles as xxxx.yy.zz"
     /// e.g. minOS 14.0 = 0x000E0000, or SDK 16.2 = 0x00100200
     fn pack_version((major, minor): (u32, u32)) -> u32 {
         (major << 16) | (minor << 8)
     }
 
-    let platform = object::macho::PLATFORM_MACCATALYST;
-    let min_os = (14, 0);
-    let sdk = (16, 2);
+    let platform =
+        rustc_target::spec::current_apple_platform(target).expect("unknown Apple target OS");
+    let min_os = rustc_target::spec::current_apple_deployment_target(target)
+        .expect("unknown Apple target OS");
+    let sdk =
+        rustc_target::spec::current_apple_sdk_version(platform).expect("unknown Apple target OS");
 
     let mut build_version = object::write::MachOBuildVersion::default();
     build_version.platform = platform;
     build_version.minos = pack_version(min_os);
     build_version.sdk = pack_version(sdk);
-    Some(build_version)
+    build_version
 }
 
 pub enum MetadataPosition {
diff --git a/compiler/rustc_codegen_ssa/src/back/symbol_export.rs b/compiler/rustc_codegen_ssa/src/back/symbol_export.rs
index 8fb2ccb..9cd4394 100644
--- a/compiler/rustc_codegen_ssa/src/back/symbol_export.rs
+++ b/compiler/rustc_codegen_ssa/src/back/symbol_export.rs
@@ -11,10 +11,10 @@
     metadata_symbol_name, ExportedSymbol, SymbolExportInfo, SymbolExportKind, SymbolExportLevel,
 };
 use rustc_middle::query::LocalCrate;
-use rustc_middle::query::{ExternProviders, Providers};
 use rustc_middle::ty::Instance;
 use rustc_middle::ty::{self, SymbolName, TyCtxt};
 use rustc_middle::ty::{GenericArgKind, GenericArgsRef};
+use rustc_middle::util::Providers;
 use rustc_session::config::{CrateType, OomStrategy};
 use rustc_target::spec::SanitizerSet;
 
@@ -334,7 +334,7 @@
 
             match *mono_item {
                 MonoItem::Fn(Instance { def: InstanceDef::Item(def), args }) => {
-                    if args.non_erasable_generics().next().is_some() {
+                    if args.non_erasable_generics(tcx, def).next().is_some() {
                         let symbol = ExportedSymbol::Generic(def, args);
                         symbols.push((
                             symbol,
@@ -346,10 +346,10 @@
                         ));
                     }
                 }
-                MonoItem::Fn(Instance { def: InstanceDef::DropGlue(_, Some(ty)), args }) => {
+                MonoItem::Fn(Instance { def: InstanceDef::DropGlue(def_id, Some(ty)), args }) => {
                     // A little sanity-check
                     debug_assert_eq!(
-                        args.non_erasable_generics().next(),
+                        args.non_erasable_generics(tcx, def_id).next(),
                         Some(GenericArgKind::Type(ty))
                     );
                     symbols.push((
@@ -457,11 +457,9 @@
     providers.is_unreachable_local_definition = is_unreachable_local_definition_provider;
     providers.upstream_drop_glue_for = upstream_drop_glue_for_provider;
     providers.wasm_import_module_map = wasm_import_module_map;
-}
-
-pub fn provide_extern(providers: &mut ExternProviders) {
-    providers.is_reachable_non_generic = is_reachable_non_generic_provider_extern;
-    providers.upstream_monomorphizations_for = upstream_monomorphizations_for_provider;
+    providers.extern_queries.is_reachable_non_generic = is_reachable_non_generic_provider_extern;
+    providers.extern_queries.upstream_monomorphizations_for =
+        upstream_monomorphizations_for_provider;
 }
 
 fn symbol_export_level(tcx: TyCtxt<'_>, sym_def_id: DefId) -> SymbolExportLevel {
diff --git a/compiler/rustc_codegen_ssa/src/back/write.rs b/compiler/rustc_codegen_ssa/src/back/write.rs
index f485af0..f192747 100644
--- a/compiler/rustc_codegen_ssa/src/back/write.rs
+++ b/compiler/rustc_codegen_ssa/src/back/write.rs
@@ -286,6 +286,10 @@
     /// so the path to the dwarf object has to be provided when we create the target machine.
     /// This can be ignored by backends which do not need it for their Split DWARF support.
     pub split_dwarf_file: Option<PathBuf>,
+
+    /// The name of the output object file. Used for setting OutputFilenames in target options
+    /// so that LLVM can emit the CodeView S_OBJNAME record in pdb files
+    pub output_obj_file: Option<PathBuf>,
 }
 
 impl TargetMachineFactoryConfig {
@@ -302,7 +306,10 @@
         } else {
             None
         };
-        TargetMachineFactoryConfig { split_dwarf_file }
+
+        let output_obj_file =
+            Some(cgcx.output_filenames.temp_path(OutputType::Object, Some(module_name)));
+        TargetMachineFactoryConfig { split_dwarf_file, output_obj_file }
     }
 }
 
@@ -343,6 +350,12 @@
     pub split_debuginfo: rustc_target::spec::SplitDebuginfo,
     pub split_dwarf_kind: rustc_session::config::SplitDwarfKind,
 
+    /// All commandline args used to invoke the compiler, with @file args fully expanded.
+    /// This will only be used within debug info, e.g. in the pdb file on windows
+    /// This is mainly useful for other tools that reads that debuginfo to figure out
+    /// how to call the compiler with the same arguments.
+    pub expanded_args: Vec<String>,
+
     /// Handler to use for diagnostics produced during codegen.
     pub diag_emitter: SharedEmitter,
     /// LLVM optimizations for which we want to print remarks.
@@ -1108,6 +1121,7 @@
         incr_comp_session_dir: sess.incr_comp_session_dir_opt().map(|r| r.clone()),
         cgu_reuse_tracker: sess.cgu_reuse_tracker.clone(),
         coordinator_send,
+        expanded_args: tcx.sess.expanded_args.clone(),
         diag_emitter: shared_emitter.clone(),
         output_filenames: tcx.output_filenames(()).clone(),
         regular_module_config: regular_config,
diff --git a/compiler/rustc_codegen_ssa/src/base.rs b/compiler/rustc_codegen_ssa/src/base.rs
index aa003e4..1e4ea73 100644
--- a/compiler/rustc_codegen_ssa/src/base.rs
+++ b/compiler/rustc_codegen_ssa/src/base.rs
@@ -181,7 +181,7 @@
                 old_info
             }
         }
-        (_, &ty::Dynamic(ref data, _, _)) => meth::get_vtable(cx, source, data.principal()),
+        (_, ty::Dynamic(data, _, _)) => meth::get_vtable(cx, source, data.principal()),
         _ => bug!("unsized_info: invalid unsizing {:?} -> {:?}", source, target),
     }
 }
@@ -202,7 +202,7 @@
             (src, unsized_info(bx, a, b, old_info))
         }
         (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
-            assert_eq!(def_a, def_b);
+            assert_eq!(def_a, def_b); // implies same number of fields
             let src_layout = bx.cx().layout_of(src_ty);
             let dst_layout = bx.cx().layout_of(dst_ty);
             if src_ty == dst_ty {
@@ -211,7 +211,8 @@
             let mut result = None;
             for i in 0..src_layout.fields.count() {
                 let src_f = src_layout.field(bx.cx(), i);
-                if src_f.is_zst() {
+                if src_f.is_1zst() {
+                    // We are looking for the one non-1-ZST field; this is not it.
                     continue;
                 }
 
@@ -272,13 +273,14 @@
         }
 
         (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
-            assert_eq!(def_a, def_b);
+            assert_eq!(def_a, def_b); // implies same number of fields
 
             for i in def_a.variant(FIRST_VARIANT).fields.indices() {
                 let src_f = src.project_field(bx, i.as_usize());
                 let dst_f = dst.project_field(bx, i.as_usize());
 
                 if dst_f.layout.is_zst() {
+                    // No data here, nothing to copy/coerce.
                     continue;
                 }
 
@@ -418,9 +420,11 @@
         rust_main_def_id: DefId,
         entry_type: EntryFnType,
     ) -> Bx::Function {
-        // The entry function is either `int main(void)` or `int main(int argc, char **argv)`,
-        // depending on whether the target needs `argc` and `argv` to be passed in.
-        let llfty = if cx.sess().target.main_needs_argc_argv {
+        // The entry function is either `int main(void)` or `int main(int argc, char **argv)`, or
+        // `usize efi_main(void *handle, void *system_table)` depending on the target.
+        let llfty = if cx.sess().target.os.contains("uefi") {
+            cx.type_func(&[cx.type_ptr(), cx.type_ptr()], cx.type_isize())
+        } else if cx.sess().target.main_needs_argc_argv {
             cx.type_func(&[cx.type_int(), cx.type_ptr()], cx.type_int())
         } else {
             cx.type_func(&[], cx.type_int())
@@ -483,8 +487,12 @@
         };
 
         let result = bx.call(start_ty, None, None, start_fn, &args, None);
-        let cast = bx.intcast(result, cx.type_int(), true);
-        bx.ret(cast);
+        if cx.sess().target.os.contains("uefi") {
+            bx.ret(result);
+        } else {
+            let cast = bx.intcast(result, cx.type_int(), true);
+            bx.ret(cast);
+        }
 
         llfn
     }
@@ -495,7 +503,17 @@
     cx: &'a Bx::CodegenCx,
     bx: &mut Bx,
 ) -> (Bx::Value, Bx::Value) {
-    if cx.sess().target.main_needs_argc_argv {
+    if cx.sess().target.os.contains("uefi") {
+        // Params for UEFI
+        let param_handle = bx.get_param(0);
+        let param_system_table = bx.get_param(1);
+        let arg_argc = bx.const_int(cx.type_isize(), 2);
+        let arg_argv = bx.alloca(cx.type_array(cx.type_ptr(), 2), Align::ONE);
+        bx.store(param_handle, arg_argv, Align::ONE);
+        let arg_argv_el1 = bx.gep(cx.type_ptr(), arg_argv, &[bx.const_int(cx.type_int(), 1)]);
+        bx.store(param_system_table, arg_argv_el1, Align::ONE);
+        (arg_argc, arg_argv)
+    } else if cx.sess().target.main_needs_argc_argv {
         // Params from native `main()` used as args for rust start function
         let param_argc = bx.get_param(0);
         let param_argv = bx.get_param(1);
@@ -839,7 +857,6 @@
             dependency_formats: tcx.dependency_formats(()).clone(),
             windows_subsystem,
             natvis_debugger_visualizers: Default::default(),
-            feature_packed_bundled_libs: tcx.features().packed_bundled_libs,
         };
         let crates = tcx.crates(());
 
diff --git a/compiler/rustc_codegen_ssa/src/codegen_attrs.rs b/compiler/rustc_codegen_ssa/src/codegen_attrs.rs
index f6936c8..59efe4c 100644
--- a/compiler/rustc_codegen_ssa/src/codegen_attrs.rs
+++ b/compiler/rustc_codegen_ssa/src/codegen_attrs.rs
@@ -16,7 +16,10 @@
 
 use crate::errors;
 use crate::target_features::from_target_feature;
-use crate::{errors::ExpectedUsedSymbol, target_features::check_target_feature_trait_unsafe};
+use crate::{
+    errors::{ExpectedCoverageSymbol, ExpectedUsedSymbol},
+    target_features::check_target_feature_trait_unsafe,
+};
 
 fn linkage_by_name(tcx: TyCtxt<'_>, def_id: LocalDefId, name: &str) -> Linkage {
     use rustc_middle::mir::mono::Linkage::*;
@@ -128,7 +131,21 @@
                         .emit();
                 }
             }
-            sym::no_coverage => codegen_fn_attrs.flags |= CodegenFnAttrFlags::NO_COVERAGE,
+            sym::coverage => {
+                let inner = attr.meta_item_list();
+                match inner.as_deref() {
+                    Some([item]) if item.has_name(sym::off) => {
+                        codegen_fn_attrs.flags |= CodegenFnAttrFlags::NO_COVERAGE;
+                    }
+                    Some([item]) if item.has_name(sym::on) => {
+                        // Allow #[coverage(on)] for being explicit, maybe also in future to enable
+                        // coverage on a smaller scope within an excluded larger scope.
+                    }
+                    Some(_) | None => {
+                        tcx.sess.emit_err(ExpectedCoverageSymbol { span: attr.span });
+                    }
+                }
+            }
             sym::rustc_std_internal_symbol => {
                 codegen_fn_attrs.flags |= CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL
             }
diff --git a/compiler/rustc_codegen_ssa/src/common.rs b/compiler/rustc_codegen_ssa/src/common.rs
index 5a68075..641ac3e 100644
--- a/compiler/rustc_codegen_ssa/src/common.rs
+++ b/compiler/rustc_codegen_ssa/src/common.rs
@@ -1,7 +1,7 @@
 #![allow(non_camel_case_types)]
 
 use rustc_hir::LangItem;
-use rustc_middle::mir::interpret::ConstValue;
+use rustc_middle::mir;
 use rustc_middle::ty::{self, layout::TyAndLayout, Ty, TyCtxt};
 use rustc_span::Span;
 
@@ -194,10 +194,10 @@
 pub fn asm_const_to_str<'tcx>(
     tcx: TyCtxt<'tcx>,
     sp: Span,
-    const_value: ConstValue<'tcx>,
+    const_value: mir::ConstValue<'tcx>,
     ty_and_layout: TyAndLayout<'tcx>,
 ) -> String {
-    let ConstValue::Scalar(scalar) = const_value else {
+    let mir::ConstValue::Scalar(scalar) = const_value else {
         span_bug!(sp, "expected Scalar for promoted asm const, but got {:#?}", const_value)
     };
     let value = scalar.assert_bits(ty_and_layout.size);
diff --git a/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs b/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs
index 067c824..989df44 100644
--- a/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs
+++ b/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs
@@ -106,14 +106,14 @@
                     ty_and_layout,
                     &|output, visited| {
                         push_item_name(tcx, def.did(), true, output);
-                        push_generic_params_internal(tcx, args, output, visited);
+                        push_generic_params_internal(tcx, args, def.did(), output, visited);
                     },
                     output,
                     visited,
                 );
             } else {
                 push_item_name(tcx, def.did(), qualified, output);
-                push_generic_params_internal(tcx, args, output, visited);
+                push_generic_params_internal(tcx, args, def.did(), output, visited);
             }
         }
         ty::Tuple(component_types) => {
@@ -237,8 +237,13 @@
                 let principal =
                     tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), principal);
                 push_item_name(tcx, principal.def_id, qualified, output);
-                let principal_has_generic_params =
-                    push_generic_params_internal(tcx, principal.args, output, visited);
+                let principal_has_generic_params = push_generic_params_internal(
+                    tcx,
+                    principal.args,
+                    principal.def_id,
+                    output,
+                    visited,
+                );
 
                 let projection_bounds: SmallVec<[_; 4]> = trait_data
                     .projection_bounds()
@@ -421,7 +426,6 @@
         | ty::Placeholder(..)
         | ty::Alias(..)
         | ty::Bound(..)
-        | ty::GeneratorWitnessMIR(..)
         | ty::GeneratorWitness(..) => {
             bug!(
                 "debuginfo: Trying to create type name for \
@@ -516,7 +520,13 @@
             tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), trait_ref);
         push_item_name(tcx, trait_ref.def_id, true, &mut vtable_name);
         visited.clear();
-        push_generic_params_internal(tcx, trait_ref.args, &mut vtable_name, &mut visited);
+        push_generic_params_internal(
+            tcx,
+            trait_ref.args,
+            trait_ref.def_id,
+            &mut vtable_name,
+            &mut visited,
+        );
     } else {
         vtable_name.push('_');
     }
@@ -610,20 +620,20 @@
 fn push_generic_params_internal<'tcx>(
     tcx: TyCtxt<'tcx>,
     args: GenericArgsRef<'tcx>,
+    def_id: DefId,
     output: &mut String,
     visited: &mut FxHashSet<Ty<'tcx>>,
 ) -> bool {
-    if args.non_erasable_generics().next().is_none() {
+    debug_assert_eq!(args, tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), args));
+    let mut args = args.non_erasable_generics(tcx, def_id).peekable();
+    if args.peek().is_none() {
         return false;
     }
-
-    debug_assert_eq!(args, tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), args));
-
     let cpp_like_debuginfo = cpp_like_debuginfo(tcx);
 
     output.push('<');
 
-    for type_parameter in args.non_erasable_generics() {
+    for type_parameter in args {
         match type_parameter {
             GenericArgKind::Type(type_parameter) => {
                 push_debuginfo_type_name(tcx, type_parameter, true, output, visited);
@@ -649,12 +659,12 @@
         }
         _ => match ct.ty().kind() {
             ty::Int(ity) => {
-                let bits = ct.eval_bits(tcx, ty::ParamEnv::reveal_all(), ct.ty());
+                let bits = ct.eval_bits(tcx, ty::ParamEnv::reveal_all());
                 let val = Integer::from_int_ty(&tcx, *ity).size().sign_extend(bits) as i128;
                 write!(output, "{val}")
             }
             ty::Uint(_) => {
-                let val = ct.eval_bits(tcx, ty::ParamEnv::reveal_all(), ct.ty());
+                let val = ct.eval_bits(tcx, ty::ParamEnv::reveal_all());
                 write!(output, "{val}")
             }
             ty::Bool => {
@@ -670,10 +680,8 @@
                 // avoiding collisions and will make the emitted type names shorter.
                 let hash_short = tcx.with_stable_hashing_context(|mut hcx| {
                     let mut hasher = StableHasher::new();
-                    let ct = ct.eval(tcx, ty::ParamEnv::reveal_all());
-                    hcx.while_hashing_spans(false, |hcx| {
-                        ct.to_valtree().hash_stable(hcx, &mut hasher)
-                    });
+                    let ct = ct.eval(tcx, ty::ParamEnv::reveal_all(), None).unwrap();
+                    hcx.while_hashing_spans(false, |hcx| ct.hash_stable(hcx, &mut hasher));
                     hasher.finish::<Hash64>()
                 });
 
@@ -691,11 +699,12 @@
 pub fn push_generic_params<'tcx>(
     tcx: TyCtxt<'tcx>,
     args: GenericArgsRef<'tcx>,
+    def_id: DefId,
     output: &mut String,
 ) {
     let _prof = tcx.prof.generic_activity("compute_debuginfo_type_name");
     let mut visited = FxHashSet::default();
-    push_generic_params_internal(tcx, args, output, &mut visited);
+    push_generic_params_internal(tcx, args, def_id, output, &mut visited);
 }
 
 fn push_closure_or_generator_name<'tcx>(
@@ -738,7 +747,7 @@
     // Truncate the args to the length of the above generics. This will cut off
     // anything closure- or generator-specific.
     let args = args.truncate_to(tcx, generics);
-    push_generic_params_internal(tcx, args, output, visited);
+    push_generic_params_internal(tcx, args, enclosing_fn_def_id, output, visited);
 }
 
 fn push_close_angle_bracket(cpp_like_debuginfo: bool, output: &mut String) {
diff --git a/compiler/rustc_codegen_ssa/src/errors.rs b/compiler/rustc_codegen_ssa/src/errors.rs
index b7d8b9b..14311ec 100644
--- a/compiler/rustc_codegen_ssa/src/errors.rs
+++ b/compiler/rustc_codegen_ssa/src/errors.rs
@@ -7,6 +7,7 @@
     IntoDiagnosticArg,
 };
 use rustc_macros::Diagnostic;
+use rustc_middle::ty::layout::LayoutError;
 use rustc_middle::ty::Ty;
 use rustc_span::{Span, Symbol};
 use rustc_type_ir::FloatTy;
@@ -107,10 +108,6 @@
 }
 
 #[derive(Diagnostic)]
-#[diag(codegen_ssa_incompatible_linking_modifiers)]
-pub struct IncompatibleLinkingModifiers;
-
-#[derive(Diagnostic)]
 #[diag(codegen_ssa_add_native_library)]
 pub struct AddNativeLibrary {
     pub library_path: PathBuf,
@@ -561,6 +558,13 @@
 }
 
 #[derive(Diagnostic)]
+#[diag(codegen_ssa_expected_coverage_symbol)]
+pub struct ExpectedCoverageSymbol {
+    #[primary_span]
+    pub span: Span,
+}
+
+#[derive(Diagnostic)]
 #[diag(codegen_ssa_expected_used_symbol)]
 pub struct ExpectedUsedSymbol {
     #[primary_span]
@@ -588,20 +592,6 @@
 }
 
 #[derive(Diagnostic)]
-#[diag(codegen_ssa_erroneous_constant)]
-pub struct ErroneousConstant {
-    #[primary_span]
-    pub span: Span,
-}
-
-#[derive(Diagnostic)]
-#[diag(codegen_ssa_polymorphic_constant_too_generic)]
-pub struct PolymorphicConstantTooGeneric {
-    #[primary_span]
-    pub span: Span,
-}
-
-#[derive(Diagnostic)]
 #[diag(codegen_ssa_shuffle_indices_evaluation)]
 pub struct ShuffleIndicesEvaluation {
     #[primary_span]
@@ -1031,6 +1021,15 @@
 }
 
 #[derive(Diagnostic)]
+#[diag(codegen_ssa_failed_to_get_layout)]
+pub struct FailedToGetLayout<'tcx> {
+    #[primary_span]
+    pub span: Span,
+    pub ty: Ty<'tcx>,
+    pub err: LayoutError<'tcx>,
+}
+
+#[derive(Diagnostic)]
 #[diag(codegen_ssa_error_creating_remark_dir)]
 pub struct ErrorCreatingRemarkDir {
     pub error: std::io::Error,
diff --git a/compiler/rustc_codegen_ssa/src/lib.rs b/compiler/rustc_codegen_ssa/src/lib.rs
index 7bed3fa..f6186a2 100644
--- a/compiler/rustc_codegen_ssa/src/lib.rs
+++ b/compiler/rustc_codegen_ssa/src/lib.rs
@@ -31,7 +31,7 @@
 use rustc_middle::middle::debugger_visualizer::DebuggerVisualizerFile;
 use rustc_middle::middle::dependency_format::Dependencies;
 use rustc_middle::middle::exported_symbols::SymbolExportKind;
-use rustc_middle::query::{ExternProviders, Providers};
+use rustc_middle::util::Providers;
 use rustc_serialize::opaque::{FileEncoder, MemDecoder};
 use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
 use rustc_session::config::{CrateType, OutputFilenames, OutputType, RUST_CGU_EXT};
@@ -164,7 +164,6 @@
     pub dependency_formats: Lrc<Dependencies>,
     pub windows_subsystem: Option<String>,
     pub natvis_debugger_visualizers: BTreeSet<DebuggerVisualizerFile>,
-    pub feature_packed_bundled_libs: bool, // unstable feature flag.
 }
 
 #[derive(Encodable, Decodable)]
@@ -190,10 +189,6 @@
     crate::codegen_attrs::provide(providers);
 }
 
-pub fn provide_extern(providers: &mut ExternProviders) {
-    crate::back::symbol_export::provide_extern(providers);
-}
-
 /// Checks if the given filename ends with the `.rcgu.o` extension that `rustc`
 /// uses for the object files it generates.
 pub fn looks_like_rust_object_file(filename: &str) -> bool {
diff --git a/compiler/rustc_codegen_ssa/src/mir/analyze.rs b/compiler/rustc_codegen_ssa/src/mir/analyze.rs
index 22c1f05..d9419db 100644
--- a/compiler/rustc_codegen_ssa/src/mir/analyze.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/analyze.rs
@@ -234,7 +234,7 @@
             | PlaceContext::NonMutatingUse(
                 NonMutatingUseContext::Inspect
                 | NonMutatingUseContext::SharedBorrow
-                | NonMutatingUseContext::ShallowBorrow
+                | NonMutatingUseContext::FakeBorrow
                 | NonMutatingUseContext::AddressOf
                 | NonMutatingUseContext::Projection,
             ) => {
@@ -284,8 +284,8 @@
         for (bb, data) in mir.basic_blocks.iter_enumerated() {
             match data.terminator().kind {
                 TerminatorKind::Goto { .. }
-                | TerminatorKind::Resume
-                | TerminatorKind::Terminate
+                | TerminatorKind::UnwindResume
+                | TerminatorKind::UnwindTerminate(_)
                 | TerminatorKind::Return
                 | TerminatorKind::GeneratorDrop
                 | TerminatorKind::Unreachable
diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs
index 4f26383..bd0707e 100644
--- a/compiler/rustc_codegen_ssa/src/mir/block.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/block.rs
@@ -12,7 +12,7 @@
 use rustc_ast as ast;
 use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
 use rustc_hir::lang_items::LangItem;
-use rustc_middle::mir::{self, AssertKind, SwitchTargets};
+use rustc_middle::mir::{self, AssertKind, SwitchTargets, UnwindTerminateReason};
 use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf, ValidityRequirement};
 use rustc_middle::ty::print::{with_no_trimmed_paths, with_no_visible_paths};
 use rustc_middle::ty::{self, Instance, Ty};
@@ -178,7 +178,7 @@
             mir::UnwindAction::Cleanup(cleanup) => Some(self.llbb_with_cleanup(fx, cleanup)),
             mir::UnwindAction::Continue => None,
             mir::UnwindAction::Unreachable => None,
-            mir::UnwindAction::Terminate => {
+            mir::UnwindAction::Terminate(reason) => {
                 if fx.mir[self.bb].is_cleanup && base::wants_new_eh_instructions(fx.cx.tcx().sess) {
                     // MSVC SEH will abort automatically if an exception tries to
                     // propagate out from cleanup.
@@ -191,7 +191,7 @@
 
                     None
                 } else {
-                    Some(fx.terminate_block())
+                    Some(fx.terminate_block(reason))
                 }
             }
         };
@@ -264,7 +264,7 @@
     ) -> MergingSucc {
         let unwind_target = match unwind {
             mir::UnwindAction::Cleanup(cleanup) => Some(self.llbb_with_cleanup(fx, cleanup)),
-            mir::UnwindAction::Terminate => Some(fx.terminate_block()),
+            mir::UnwindAction::Terminate(reason) => Some(fx.terminate_block(reason)),
             mir::UnwindAction::Continue => None,
             mir::UnwindAction::Unreachable => None,
         };
@@ -416,7 +416,7 @@
                 }
             }
 
-            PassMode::Cast(cast_ty, _) => {
+            PassMode::Cast { cast: cast_ty, pad_i32: _ } => {
                 let op = match self.locals[mir::RETURN_PLACE] {
                     LocalRef::Operand(op) => op,
                     LocalRef::PendingOperand => bug!("use of return before def"),
@@ -649,12 +649,13 @@
         helper: TerminatorCodegenHelper<'tcx>,
         bx: &mut Bx,
         terminator: &mir::Terminator<'tcx>,
+        reason: UnwindTerminateReason,
     ) {
         let span = terminator.source_info.span;
         self.set_debug_loc(bx, terminator.source_info);
 
         // Obtain the panic entry point.
-        let (fn_abi, llfn) = common::build_langcall(bx, Some(span), LangItem::PanicCannotUnwind);
+        let (fn_abi, llfn) = common::build_langcall(bx, Some(span), reason.lang_item());
 
         // Codegen the actual panic invoke/call.
         let merging_succ = helper.do_call(
@@ -927,21 +928,11 @@
                         // we get a value of a built-in pointer type.
                         //
                         // This is also relevant for `Pin<&mut Self>`, where we need to peel the `Pin`.
-                        'descend_newtypes: while !op.layout.ty.is_unsafe_ptr()
-                            && !op.layout.ty.is_ref()
-                        {
-                            for i in 0..op.layout.fields.count() {
-                                let field = op.extract_field(bx, i);
-                                if !field.layout.is_zst() {
-                                    // we found the one non-zero-sized field that is allowed
-                                    // now find *its* non-zero-sized field, or stop if it's a
-                                    // pointer
-                                    op = field;
-                                    continue 'descend_newtypes;
-                                }
-                            }
-
-                            span_bug!(span, "receiver has no non-zero-sized fields {:?}", op);
+                        while !op.layout.ty.is_unsafe_ptr() && !op.layout.ty.is_ref() {
+                            let (idx, _) = op.layout.non_1zst_field(bx).expect(
+                                "not exactly one non-1-ZST field in a `DispatchFromDyn` type",
+                            );
+                            op = op.extract_field(bx, idx);
                         }
 
                         // now that we have `*dyn Trait` or `&dyn Trait`, split it up into its
@@ -969,21 +960,11 @@
                     }
                     Immediate(_) => {
                         // See comment above explaining why we peel these newtypes
-                        'descend_newtypes: while !op.layout.ty.is_unsafe_ptr()
-                            && !op.layout.ty.is_ref()
-                        {
-                            for i in 0..op.layout.fields.count() {
-                                let field = op.extract_field(bx, i);
-                                if !field.layout.is_zst() {
-                                    // we found the one non-zero-sized field that is allowed
-                                    // now find *its* non-zero-sized field, or stop if it's a
-                                    // pointer
-                                    op = field;
-                                    continue 'descend_newtypes;
-                                }
-                            }
-
-                            span_bug!(span, "receiver has no non-zero-sized fields {:?}", op);
+                        while !op.layout.ty.is_unsafe_ptr() && !op.layout.ty.is_ref() {
+                            let (idx, _) = op.layout.non_1zst_field(bx).expect(
+                                "not exactly one non-1-ZST field in a `DispatchFromDyn` type",
+                            );
+                            op = op.extract_field(bx, idx);
                         }
 
                         // Make sure that we've actually unwrapped the rcvr down
@@ -1107,9 +1088,7 @@
                     InlineAsmOperandRef::InOut { reg, late, in_value, out_place }
                 }
                 mir::InlineAsmOperand::Const { ref value } => {
-                    let const_value = self
-                        .eval_mir_constant(value)
-                        .unwrap_or_else(|_| span_bug!(span, "asm const cannot be resolved"));
+                    let const_value = self.eval_mir_constant(value);
                     let string = common::asm_const_to_str(
                         bx.tcx(),
                         span,
@@ -1119,8 +1098,8 @@
                     InlineAsmOperandRef::Const { string }
                 }
                 mir::InlineAsmOperand::SymFn { ref value } => {
-                    let literal = self.monomorphize(value.literal);
-                    if let ty::FnDef(def_id, args) = *literal.ty().kind() {
+                    let const_ = self.monomorphize(value.const_);
+                    if let ty::FnDef(def_id, args) = *const_.ty().kind() {
                         let instance = ty::Instance::resolve_for_fn_ptr(
                             bx.tcx(),
                             ty::ParamEnv::reveal_all(),
@@ -1224,13 +1203,13 @@
 
         self.set_debug_loc(bx, terminator.source_info);
         match terminator.kind {
-            mir::TerminatorKind::Resume => {
+            mir::TerminatorKind::UnwindResume => {
                 self.codegen_resume_terminator(helper, bx);
                 MergingSucc::False
             }
 
-            mir::TerminatorKind::Terminate => {
-                self.codegen_terminate_terminator(helper, bx, terminator);
+            mir::TerminatorKind::UnwindTerminate(reason) => {
+                self.codegen_terminate_terminator(helper, bx, terminator, reason);
                 MergingSucc::False
             }
 
@@ -1329,7 +1308,7 @@
     ) {
         match arg.mode {
             PassMode::Ignore => return,
-            PassMode::Cast(_, true) => {
+            PassMode::Cast { pad_i32: true, .. } => {
                 // Fill padding with undef value, where applicable.
                 llargs.push(bx.const_undef(bx.reg_backend_type(&Reg::i32())));
             }
@@ -1341,7 +1320,7 @@
                 }
                 _ => bug!("codegen_argument: {:?} invalid for pair argument", op),
             },
-            PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => match op.val {
+            PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ } => match op.val {
                 Ref(a, Some(b), _) => {
                     llargs.push(a);
                     llargs.push(b);
@@ -1366,7 +1345,7 @@
                     op.val.store(bx, scratch);
                     (scratch.llval, scratch.align, true)
                 }
-                PassMode::Cast(..) => {
+                PassMode::Cast { .. } => {
                     let scratch = PlaceRef::alloca(bx, arg.layout);
                     op.val.store(bx, scratch);
                     (scratch.llval, scratch.align, true)
@@ -1419,7 +1398,7 @@
 
         if by_ref && !arg.is_indirect() {
             // Have to load the argument, maybe while casting it.
-            if let PassMode::Cast(ty, _) = &arg.mode {
+            if let PassMode::Cast { cast: ty, .. } = &arg.mode {
                 let llty = bx.cast_backend_type(ty);
                 llval = bx.load(llty, llval, align.min(arg.layout.align.abi));
             } else {
@@ -1579,79 +1558,81 @@
         })
     }
 
-    fn terminate_block(&mut self) -> Bx::BasicBlock {
-        self.terminate_block.unwrap_or_else(|| {
-            let funclet;
-            let llbb;
-            let mut bx;
-            if base::wants_msvc_seh(self.cx.sess()) {
-                // This is a basic block that we're aborting the program for,
-                // notably in an `extern` function. These basic blocks are inserted
-                // so that we assert that `extern` functions do indeed not panic,
-                // and if they do we abort the process.
-                //
-                // On MSVC these are tricky though (where we're doing funclets). If
-                // we were to do a cleanuppad (like below) the normal functions like
-                // `longjmp` would trigger the abort logic, terminating the
-                // program. Instead we insert the equivalent of `catch(...)` for C++
-                // which magically doesn't trigger when `longjmp` files over this
-                // frame.
-                //
-                // Lots more discussion can be found on #48251 but this codegen is
-                // modeled after clang's for:
-                //
-                //      try {
-                //          foo();
-                //      } catch (...) {
-                //          bar();
-                //      }
-                //
-                // which creates an IR snippet like
-                //
-                //      cs_terminate:
-                //         %cs = catchswitch within none [%cp_terminate] unwind to caller
-                //      cp_terminate:
-                //         %cp = catchpad within %cs [null, i32 64, null]
-                //         ...
+    fn terminate_block(&mut self, reason: UnwindTerminateReason) -> Bx::BasicBlock {
+        if let Some((cached_bb, cached_reason)) = self.terminate_block && reason == cached_reason {
+            return cached_bb;
+        }
 
-                llbb = Bx::append_block(self.cx, self.llfn, "cs_terminate");
-                let cp_llbb = Bx::append_block(self.cx, self.llfn, "cp_terminate");
+        let funclet;
+        let llbb;
+        let mut bx;
+        if base::wants_msvc_seh(self.cx.sess()) {
+            // This is a basic block that we're aborting the program for,
+            // notably in an `extern` function. These basic blocks are inserted
+            // so that we assert that `extern` functions do indeed not panic,
+            // and if they do we abort the process.
+            //
+            // On MSVC these are tricky though (where we're doing funclets). If
+            // we were to do a cleanuppad (like below) the normal functions like
+            // `longjmp` would trigger the abort logic, terminating the
+            // program. Instead we insert the equivalent of `catch(...)` for C++
+            // which magically doesn't trigger when `longjmp` files over this
+            // frame.
+            //
+            // Lots more discussion can be found on #48251 but this codegen is
+            // modeled after clang's for:
+            //
+            //      try {
+            //          foo();
+            //      } catch (...) {
+            //          bar();
+            //      }
+            //
+            // which creates an IR snippet like
+            //
+            //      cs_terminate:
+            //         %cs = catchswitch within none [%cp_terminate] unwind to caller
+            //      cp_terminate:
+            //         %cp = catchpad within %cs [null, i32 64, null]
+            //         ...
 
-                let mut cs_bx = Bx::build(self.cx, llbb);
-                let cs = cs_bx.catch_switch(None, None, &[cp_llbb]);
+            llbb = Bx::append_block(self.cx, self.llfn, "cs_terminate");
+            let cp_llbb = Bx::append_block(self.cx, self.llfn, "cp_terminate");
 
-                // The "null" here is actually a RTTI type descriptor for the
-                // C++ personality function, but `catch (...)` has no type so
-                // it's null. The 64 here is actually a bitfield which
-                // represents that this is a catch-all block.
-                bx = Bx::build(self.cx, cp_llbb);
-                let null =
-                    bx.const_null(bx.type_ptr_ext(bx.cx().data_layout().instruction_address_space));
-                let sixty_four = bx.const_i32(64);
-                funclet = Some(bx.catch_pad(cs, &[null, sixty_four, null]));
-            } else {
-                llbb = Bx::append_block(self.cx, self.llfn, "terminate");
-                bx = Bx::build(self.cx, llbb);
+            let mut cs_bx = Bx::build(self.cx, llbb);
+            let cs = cs_bx.catch_switch(None, None, &[cp_llbb]);
 
-                let llpersonality = self.cx.eh_personality();
-                bx.filter_landing_pad(llpersonality);
+            // The "null" here is actually a RTTI type descriptor for the
+            // C++ personality function, but `catch (...)` has no type so
+            // it's null. The 64 here is actually a bitfield which
+            // represents that this is a catch-all block.
+            bx = Bx::build(self.cx, cp_llbb);
+            let null =
+                bx.const_null(bx.type_ptr_ext(bx.cx().data_layout().instruction_address_space));
+            let sixty_four = bx.const_i32(64);
+            funclet = Some(bx.catch_pad(cs, &[null, sixty_four, null]));
+        } else {
+            llbb = Bx::append_block(self.cx, self.llfn, "terminate");
+            bx = Bx::build(self.cx, llbb);
 
-                funclet = None;
-            }
+            let llpersonality = self.cx.eh_personality();
+            bx.filter_landing_pad(llpersonality);
 
-            self.set_debug_loc(&mut bx, mir::SourceInfo::outermost(self.mir.span));
+            funclet = None;
+        }
 
-            let (fn_abi, fn_ptr) = common::build_langcall(&bx, None, LangItem::PanicCannotUnwind);
-            let fn_ty = bx.fn_decl_backend_type(&fn_abi);
+        self.set_debug_loc(&mut bx, mir::SourceInfo::outermost(self.mir.span));
 
-            let llret = bx.call(fn_ty, None, Some(&fn_abi), fn_ptr, &[], funclet.as_ref());
-            bx.do_not_inline(llret);
+        let (fn_abi, fn_ptr) = common::build_langcall(&bx, None, reason.lang_item());
+        let fn_ty = bx.fn_decl_backend_type(&fn_abi);
 
-            bx.unreachable();
+        let llret = bx.call(fn_ty, None, Some(&fn_abi), fn_ptr, &[], funclet.as_ref());
+        bx.do_not_inline(llret);
 
-            self.terminate_block = Some(llbb);
-            llbb
-        })
+        bx.unreachable();
+
+        self.terminate_block = Some((llbb, reason));
+        llbb
     }
 
     /// Get the backend `BasicBlock` for a MIR `BasicBlock`, either already
@@ -1761,7 +1742,7 @@
             }
             DirectOperand(index) => {
                 // If there is a cast, we have to store and reload.
-                let op = if let PassMode::Cast(..) = ret_abi.mode {
+                let op = if let PassMode::Cast { .. } = ret_abi.mode {
                     let tmp = PlaceRef::alloca(bx, ret_abi.layout);
                     tmp.storage_live(bx);
                     bx.store_arg(&ret_abi, llval, tmp);
diff --git a/compiler/rustc_codegen_ssa/src/mir/constant.rs b/compiler/rustc_codegen_ssa/src/mir/constant.rs
index babcf9b..fde4e85 100644
--- a/compiler/rustc_codegen_ssa/src/mir/constant.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/constant.rs
@@ -2,7 +2,7 @@
 use crate::mir::operand::OperandRef;
 use crate::traits::*;
 use rustc_middle::mir;
-use rustc_middle::mir::interpret::{ConstValue, ErrorHandled};
+use rustc_middle::mir::interpret::ErrorHandled;
 use rustc_middle::ty::layout::HasTyCtxt;
 use rustc_middle::ty::{self, Ty};
 use rustc_target::abi::Abi;
@@ -13,74 +13,44 @@
     pub fn eval_mir_constant_to_operand(
         &self,
         bx: &mut Bx,
-        constant: &mir::Constant<'tcx>,
-    ) -> Result<OperandRef<'tcx, Bx::Value>, ErrorHandled> {
-        let val = self.eval_mir_constant(constant)?;
+        constant: &mir::ConstOperand<'tcx>,
+    ) -> OperandRef<'tcx, Bx::Value> {
+        let val = self.eval_mir_constant(constant);
         let ty = self.monomorphize(constant.ty());
-        Ok(OperandRef::from_const(bx, val, ty))
+        OperandRef::from_const(bx, val, ty)
     }
 
-    pub fn eval_mir_constant(
-        &self,
-        constant: &mir::Constant<'tcx>,
-    ) -> Result<ConstValue<'tcx>, ErrorHandled> {
-        let ct = self.monomorphize(constant.literal);
-        let uv = match ct {
-            mir::ConstantKind::Ty(ct) => match ct.kind() {
-                ty::ConstKind::Unevaluated(uv) => uv.expand(),
-                ty::ConstKind::Value(val) => {
-                    return Ok(self.cx.tcx().valtree_to_const_val((ct.ty(), val)));
-                }
-                err => span_bug!(
-                    constant.span,
-                    "encountered bad ConstKind after monomorphizing: {:?}",
-                    err
-                ),
-            },
-            mir::ConstantKind::Unevaluated(uv, _) => uv,
-            mir::ConstantKind::Val(val, _) => return Ok(val),
-        };
-
-        self.cx.tcx().const_eval_resolve(ty::ParamEnv::reveal_all(), uv, None).map_err(|err| {
-            match err {
-                ErrorHandled::Reported(_) => {
-                    self.cx.tcx().sess.emit_err(errors::ErroneousConstant { span: constant.span });
-                }
-                ErrorHandled::TooGeneric => {
-                    self.cx
-                        .tcx()
-                        .sess
-                        .diagnostic()
-                        .emit_bug(errors::PolymorphicConstantTooGeneric { span: constant.span });
-                }
-            }
-            err
-        })
+    pub fn eval_mir_constant(&self, constant: &mir::ConstOperand<'tcx>) -> mir::ConstValue<'tcx> {
+        self.monomorphize(constant.const_)
+            .eval(self.cx.tcx(), ty::ParamEnv::reveal_all(), Some(constant.span))
+            .expect("erroneous constant not captured by required_consts")
     }
 
     /// This is a convenience helper for `simd_shuffle_indices`. It has the precondition
-    /// that the given `constant` is an `ConstantKind::Unevaluated` and must be convertible to
+    /// that the given `constant` is an `Const::Unevaluated` and must be convertible to
     /// a `ValTree`. If you want a more general version of this, talk to `wg-const-eval` on zulip.
+    ///
+    /// Note that this function is cursed, since usually MIR consts should not be evaluated to valtrees!
     pub fn eval_unevaluated_mir_constant_to_valtree(
         &self,
-        constant: &mir::Constant<'tcx>,
+        constant: &mir::ConstOperand<'tcx>,
     ) -> Result<Option<ty::ValTree<'tcx>>, ErrorHandled> {
-        let uv = match self.monomorphize(constant.literal) {
-            mir::ConstantKind::Unevaluated(uv, _) => uv.shrink(),
-            mir::ConstantKind::Ty(c) => match c.kind() {
+        let uv = match self.monomorphize(constant.const_) {
+            mir::Const::Unevaluated(uv, _) => uv.shrink(),
+            mir::Const::Ty(c) => match c.kind() {
                 // A constant that came from a const generic but was then used as an argument to old-style
                 // simd_shuffle (passing as argument instead of as a generic param).
                 rustc_type_ir::ConstKind::Value(valtree) => return Ok(Some(valtree)),
                 other => span_bug!(constant.span, "{other:#?}"),
             },
-            // We should never encounter `ConstantKind::Val` unless MIR opts (like const prop) evaluate
+            // We should never encounter `Const::Val` unless MIR opts (like const prop) evaluate
             // a constant and write that value back into `Operand`s. This could happen, but is unlikely.
             // Also: all users of `simd_shuffle` are on unstable and already need to take a lot of care
             // around intrinsics. For an issue to happen here, it would require a macro expanding to a
             // `simd_shuffle` call without wrapping the constant argument in a `const {}` block, but
             // the user pass through arbitrary expressions.
             // FIXME(oli-obk): replace the magic const generic argument of `simd_shuffle` with a real
-            // const generic.
+            // const generic, and get rid of this entire function.
             other => span_bug!(constant.span, "{other:#?}"),
         };
         let uv = self.monomorphize(uv);
@@ -95,7 +65,7 @@
     pub fn simd_shuffle_indices(
         &mut self,
         bx: &Bx,
-        constant: &mir::Constant<'tcx>,
+        constant: &mir::ConstOperand<'tcx>,
     ) -> (Bx::Value, Ty<'tcx>) {
         let ty = self.monomorphize(constant.ty());
         let val = self
diff --git a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs
index 526c16a..0dc30d2 100644
--- a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs
@@ -1,10 +1,12 @@
 use crate::traits::*;
+use rustc_data_structures::fx::FxHashMap;
 use rustc_index::IndexVec;
 use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
 use rustc_middle::mir;
 use rustc_middle::ty;
 use rustc_middle::ty::layout::TyAndLayout;
 use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf};
+use rustc_middle::ty::Instance;
 use rustc_middle::ty::Ty;
 use rustc_session::config::DebugInfo;
 use rustc_span::symbol::{kw, Symbol};
@@ -17,10 +19,13 @@
 
 use std::ops::Range;
 
-pub struct FunctionDebugContext<S, L> {
+pub struct FunctionDebugContext<'tcx, S, L> {
+    /// Maps from source code to the corresponding debug info scope.
     pub scopes: IndexVec<mir::SourceScope, DebugScope<S, L>>,
-}
 
+    /// Maps from an inlined function to its debug info declaration.
+    pub inlined_function_scopes: FxHashMap<Instance<'tcx>, S>,
+}
 #[derive(Copy, Clone)]
 pub enum VariableKind {
     ArgumentVariable(usize /*index*/),
@@ -153,8 +158,7 @@
     L: DebugInfoOffsetLocation<'tcx, Bx>,
 >(
     bx: &mut Bx,
-    local: mir::Local,
-    var: &PerLocalVarDebugInfo<'tcx, Bx::DIVariable>,
+    projection: &[mir::PlaceElem<'tcx>],
     base: L,
 ) -> DebugInfoOffset<L> {
     let mut direct_offset = Size::ZERO;
@@ -162,7 +166,7 @@
     let mut indirect_offsets = vec![];
     let mut place = base;
 
-    for elem in &var.projection[..] {
+    for elem in projection {
         match *elem {
             mir::ProjectionElem::Deref => {
                 indirect_offsets.push(Size::ZERO);
@@ -183,11 +187,7 @@
             } => {
                 let offset = indirect_offsets.last_mut().unwrap_or(&mut direct_offset);
                 let FieldsShape::Array { stride, count: _ } = place.layout().fields else {
-                    span_bug!(
-                        var.source_info.span,
-                        "ConstantIndex on non-array type {:?}",
-                        place.layout()
-                    )
+                    bug!("ConstantIndex on non-array type {:?}", place.layout())
                 };
                 *offset += stride * index;
                 place = place.project_constant_index(bx, index);
@@ -195,11 +195,7 @@
             _ => {
                 // Sanity check for `can_use_in_debuginfo`.
                 debug_assert!(!elem.can_use_in_debuginfo());
-                span_bug!(
-                    var.source_info.span,
-                    "unsupported var debuginfo place `{:?}`",
-                    mir::Place { local, projection: var.projection },
-                )
+                bug!("unsupported var debuginfo projection `{:?}`", projection)
             }
         }
     }
@@ -402,7 +398,7 @@
         let Some(dbg_loc) = self.dbg_loc(var.source_info) else { return };
 
         let DebugInfoOffset { direct_offset, indirect_offsets, result: _ } =
-            calculate_debuginfo_offset(bx, local, &var, base.layout);
+            calculate_debuginfo_offset(bx, &var.projection, base.layout);
 
         // When targeting MSVC, create extra allocas for arguments instead of pointing multiple
         // dbg_var_addr() calls into the same alloca with offsets. MSVC uses CodeView records
@@ -420,7 +416,7 @@
 
         if should_create_individual_allocas {
             let DebugInfoOffset { direct_offset: _, indirect_offsets: _, result: place } =
-                calculate_debuginfo_offset(bx, local, &var, base);
+                calculate_debuginfo_offset(bx, &var.projection, base);
 
             // Create a variable which will be a pointer to the actual value
             let ptr_ty = Ty::new_ptr(
@@ -484,54 +480,75 @@
                 None
             };
 
-            let dbg_var = dbg_scope_and_span.map(|(dbg_scope, _, span)| {
-                let (var_ty, var_kind) = match var.value {
+            let var_ty = if let Some(ref fragment) = var.composite {
+                self.monomorphize(fragment.ty)
+            } else {
+                match var.value {
                     mir::VarDebugInfoContents::Place(place) => {
-                        let var_ty = self.monomorphized_place_ty(place.as_ref());
-                        let var_kind = if let Some(arg_index) = var.argument_index
-                            && place.projection.is_empty()
-                        {
-                            let arg_index = arg_index as usize;
-                            if target_is_msvc {
-                                // ScalarPair parameters are spilled to the stack so they need to
-                                // be marked as a `LocalVariable` for MSVC debuggers to visualize
-                                // their data correctly. (See #81894 & #88625)
-                                let var_ty_layout = self.cx.layout_of(var_ty);
-                                if let Abi::ScalarPair(_, _) = var_ty_layout.abi {
-                                    VariableKind::LocalVariable
-                                } else {
-                                    VariableKind::ArgumentVariable(arg_index)
-                                }
-                            } else {
-                                // FIXME(eddyb) shouldn't `ArgumentVariable` indices be
-                                // offset in closures to account for the hidden environment?
-                                VariableKind::ArgumentVariable(arg_index)
-                            }
-                        } else {
+                        self.monomorphized_place_ty(place.as_ref())
+                    }
+                    mir::VarDebugInfoContents::Const(c) => self.monomorphize(c.ty()),
+                }
+            };
+
+            let dbg_var = dbg_scope_and_span.map(|(dbg_scope, _, span)| {
+                let var_kind = if let Some(arg_index) = var.argument_index
+                    && var.composite.is_none()
+                    && let mir::VarDebugInfoContents::Place(place) = var.value
+                    && place.projection.is_empty()
+                {
+                    let arg_index = arg_index as usize;
+                    if target_is_msvc {
+                        // ScalarPair parameters are spilled to the stack so they need to
+                        // be marked as a `LocalVariable` for MSVC debuggers to visualize
+                        // their data correctly. (See #81894 & #88625)
+                        let var_ty_layout = self.cx.layout_of(var_ty);
+                        if let Abi::ScalarPair(_, _) = var_ty_layout.abi {
                             VariableKind::LocalVariable
-                        };
-                        (var_ty, var_kind)
+                        } else {
+                            VariableKind::ArgumentVariable(arg_index)
+                        }
+                    } else {
+                        // FIXME(eddyb) shouldn't `ArgumentVariable` indices be
+                        // offset in closures to account for the hidden environment?
+                        VariableKind::ArgumentVariable(arg_index)
                     }
-                    mir::VarDebugInfoContents::Const(c) => {
-                        let ty = self.monomorphize(c.ty());
-                        (ty, VariableKind::LocalVariable)
-                    }
-                    mir::VarDebugInfoContents::Composite { ty, fragments: _ } => {
-                        let ty = self.monomorphize(ty);
-                        (ty, VariableKind::LocalVariable)
-                    }
+                } else {
+                    VariableKind::LocalVariable
                 };
 
                 self.cx.create_dbg_var(var.name, var_ty, dbg_scope, var_kind, span)
             });
 
+            let fragment = if let Some(ref fragment) = var.composite {
+                let var_layout = self.cx.layout_of(var_ty);
+
+                let DebugInfoOffset { direct_offset, indirect_offsets, result: fragment_layout } =
+                    calculate_debuginfo_offset(bx, &fragment.projection, var_layout);
+                debug_assert!(indirect_offsets.is_empty());
+
+                if fragment_layout.size == Size::ZERO {
+                    // Fragment is a ZST, so does not represent anything. Avoid generating anything
+                    // as this may conflict with a fragment that covers the entire variable.
+                    continue;
+                } else if fragment_layout.size == var_layout.size {
+                    // Fragment covers entire variable, so as far as
+                    // DWARF is concerned, it's not really a fragment.
+                    None
+                } else {
+                    Some(direct_offset..direct_offset + fragment_layout.size)
+                }
+            } else {
+                None
+            };
+
             match var.value {
                 mir::VarDebugInfoContents::Place(place) => {
                     per_local[place.local].push(PerLocalVarDebugInfo {
                         name: var.name,
                         source_info: var.source_info,
                         dbg_var,
-                        fragment: None,
+                        fragment,
                         projection: place.projection,
                     });
                 }
@@ -539,59 +556,12 @@
                     if let Some(dbg_var) = dbg_var {
                         let Some(dbg_loc) = self.dbg_loc(var.source_info) else { continue };
 
-                        if let Ok(operand) = self.eval_mir_constant_to_operand(bx, &c) {
-                            self.set_debug_loc(bx, var.source_info);
-                            let base = Self::spill_operand_to_stack(
-                                operand,
-                                Some(var.name.to_string()),
-                                bx,
-                            );
+                        let operand = self.eval_mir_constant_to_operand(bx, &c);
+                        self.set_debug_loc(bx, var.source_info);
+                        let base =
+                            Self::spill_operand_to_stack(operand, Some(var.name.to_string()), bx);
 
-                            bx.dbg_var_addr(dbg_var, dbg_loc, base.llval, Size::ZERO, &[], None);
-                        }
-                    }
-                }
-                mir::VarDebugInfoContents::Composite { ty, ref fragments } => {
-                    let var_ty = self.monomorphize(ty);
-                    let var_layout = self.cx.layout_of(var_ty);
-                    for fragment in fragments {
-                        let mut fragment_start = Size::ZERO;
-                        let mut fragment_layout = var_layout;
-
-                        for elem in &fragment.projection {
-                            match *elem {
-                                mir::ProjectionElem::Field(field, _) => {
-                                    let i = field.index();
-                                    fragment_start += fragment_layout.fields.offset(i);
-                                    fragment_layout = fragment_layout.field(self.cx, i);
-                                }
-                                _ => span_bug!(
-                                    var.source_info.span,
-                                    "unsupported fragment projection `{:?}`",
-                                    elem,
-                                ),
-                            }
-                        }
-
-                        let place = fragment.contents;
-                        let fragment = if fragment_layout.size == Size::ZERO {
-                            // Fragment is a ZST, so does not represent anything.
-                            continue;
-                        } else if fragment_layout.size == var_layout.size {
-                            // Fragment covers entire variable, so as far as
-                            // DWARF is concerned, it's not really a fragment.
-                            None
-                        } else {
-                            Some(fragment_start..fragment_start + fragment_layout.size)
-                        };
-
-                        per_local[place.local].push(PerLocalVarDebugInfo {
-                            name: var.name,
-                            source_info: var.source_info,
-                            dbg_var,
-                            fragment,
-                            projection: place.projection,
-                        });
+                        bx.dbg_var_addr(dbg_var, dbg_loc, base.llval, Size::ZERO, &[], fragment);
                     }
                 }
             }
diff --git a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
index 8821fb2..8efef44 100644
--- a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
@@ -462,7 +462,7 @@
         };
 
         if !fn_abi.ret.is_ignore() {
-            if let PassMode::Cast(..) = &fn_abi.ret.mode {
+            if let PassMode::Cast { .. } = &fn_abi.ret.mode {
                 bx.store(llval, result.llval, result.align);
             } else {
                 OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout)
diff --git a/compiler/rustc_codegen_ssa/src/mir/mod.rs b/compiler/rustc_codegen_ssa/src/mir/mod.rs
index 3464f91..a61018f 100644
--- a/compiler/rustc_codegen_ssa/src/mir/mod.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/mod.rs
@@ -3,8 +3,8 @@
 use rustc_index::bit_set::BitSet;
 use rustc_index::IndexVec;
 use rustc_middle::mir;
-use rustc_middle::mir::interpret::ErrorHandled;
 use rustc_middle::mir::traversal;
+use rustc_middle::mir::UnwindTerminateReason;
 use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt, TyAndLayout};
 use rustc_middle::ty::{self, Instance, Ty, TyCtxt, TypeFoldable, TypeVisitableExt};
 use rustc_target::abi::call::{FnAbi, PassMode};
@@ -45,7 +45,7 @@
 
     mir: &'tcx mir::Body<'tcx>,
 
-    debug_context: Option<FunctionDebugContext<Bx::DIScope, Bx::DILocation>>,
+    debug_context: Option<FunctionDebugContext<'tcx, Bx::DIScope, Bx::DILocation>>,
 
     llfn: Bx::Function,
 
@@ -83,8 +83,8 @@
     /// Cached unreachable block
     unreachable_block: Option<Bx::BasicBlock>,
 
-    /// Cached terminate upon unwinding block
-    terminate_block: Option<Bx::BasicBlock>,
+    /// Cached terminate upon unwinding block and its reason
+    terminate_block: Option<(Bx::BasicBlock, UnwindTerminateReason)>,
 
     /// The location where each MIR arg/var/tmp/ret is stored. This is
     /// usually an `PlaceRef` representing an alloca, but not always:
@@ -118,7 +118,7 @@
         T: Copy + TypeFoldable<TyCtxt<'tcx>>,
     {
         debug!("monomorphize: self.instance={:?}", self.instance);
-        self.instance.subst_mir_and_normalize_erasing_regions(
+        self.instance.instantiate_mir_and_normalize_erasing_regions(
             self.cx.tcx(),
             ty::ParamEnv::reveal_all(),
             ty::EarlyBinder::bind(value),
@@ -144,7 +144,7 @@
         if layout.is_zst() {
             // Zero-size temporaries aren't always initialized, which
             // doesn't matter because they don't contain data, but
-            // we need something in the operand.
+            // we need something sufficiently aligned in the operand.
             LocalRef::Operand(OperandRef::zero_sized(layout))
         } else {
             LocalRef::PendingOperand
@@ -174,7 +174,7 @@
     let mut start_bx = Bx::build(cx, start_llbb);
 
     if mir.basic_blocks.iter().any(|bb| {
-        bb.is_cleanup || matches!(bb.terminator().unwind(), Some(mir::UnwindAction::Terminate))
+        bb.is_cleanup || matches!(bb.terminator().unwind(), Some(mir::UnwindAction::Terminate(_)))
     }) {
         start_bx.set_personality_fn(cx.eh_personality());
     }
@@ -211,23 +211,14 @@
 
     fx.per_local_var_debug_info = fx.compute_per_local_var_debug_info(&mut start_bx);
 
-    // Evaluate all required consts; codegen later assumes that CTFE will never fail.
-    let mut all_consts_ok = true;
-    for const_ in &mir.required_consts {
-        if let Err(err) = fx.eval_mir_constant(const_) {
-            all_consts_ok = false;
-            match err {
-                // errored or at least linted
-                ErrorHandled::Reported(_) => {}
-                ErrorHandled::TooGeneric => {
-                    span_bug!(const_.span, "codegen encountered polymorphic constant: {:?}", err)
-                }
-            }
-        }
-    }
-    if !all_consts_ok {
-        // We leave the IR in some half-built state here, and rely on this code not even being
-        // submitted to LLVM once an error was raised.
+    // Rust post-monomorphization checks; we later rely on them.
+    if let Err(err) =
+        mir.post_mono_checks(cx.tcx(), ty::ParamEnv::reveal_all(), |c| Ok(fx.monomorphize(c)))
+    {
+        err.emit_err(cx.tcx());
+        // This IR shouldn't ever be emitted, but let's try to guard against any of this code
+        // ever running.
+        start_bx.abort();
         return;
     }
 
@@ -326,7 +317,7 @@
                 for i in 0..tupled_arg_tys.len() {
                     let arg = &fx.fn_abi.args[idx];
                     idx += 1;
-                    if let PassMode::Cast(_, true) = arg.mode {
+                    if let PassMode::Cast { pad_i32: true, .. } = arg.mode {
                         llarg_idx += 1;
                     }
                     let pr_field = place.project_field(bx, i);
@@ -350,7 +341,7 @@
 
             let arg = &fx.fn_abi.args[idx];
             idx += 1;
-            if let PassMode::Cast(_, true) = arg.mode {
+            if let PassMode::Cast { pad_i32: true, .. } = arg.mode {
                 llarg_idx += 1;
             }
 
diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs
index f90d1a0..0ab2b7e 100644
--- a/compiler/rustc_codegen_ssa/src/mir/operand.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs
@@ -6,8 +6,8 @@
 use crate::traits::*;
 use crate::MemFlags;
 
-use rustc_middle::mir;
-use rustc_middle::mir::interpret::{alloc_range, ConstValue, Pointer, Scalar};
+use rustc_middle::mir::interpret::{alloc_range, Pointer, Scalar};
+use rustc_middle::mir::{self, ConstValue};
 use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
 use rustc_middle::ty::Ty;
 use rustc_target::abi::{self, Abi, Align, Size};
@@ -50,7 +50,8 @@
     /// from [`ConstMethods::const_poison`].
     ///
     /// An `OperandValue` *must* be this variant for any type for which
-    /// `is_zst` on its `Layout` returns `true`.
+    /// `is_zst` on its `Layout` returns `true`. Note however that
+    /// these values can still require alignment.
     ZeroSized,
 }
 
@@ -85,7 +86,7 @@
 
     pub fn from_const<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
         bx: &mut Bx,
-        val: ConstValue<'tcx>,
+        val: mir::ConstValue<'tcx>,
         ty: Ty<'tcx>,
     ) -> Self {
         let layout = bx.layout_of(ty);
@@ -99,12 +100,12 @@
                 OperandValue::Immediate(llval)
             }
             ConstValue::ZeroSized => return OperandRef::zero_sized(layout),
-            ConstValue::Slice { data, start, end } => {
+            ConstValue::Slice { data, meta } => {
                 let Abi::ScalarPair(a_scalar, _) = layout.abi else {
                     bug!("from_const: invalid ScalarPair layout: {:#?}", layout);
                 };
                 let a = Scalar::from_pointer(
-                    Pointer::new(bx.tcx().create_memory_alloc(data), Size::from_bytes(start)),
+                    Pointer::new(bx.tcx().reserve_and_set_memory_alloc(data), Size::ZERO),
                     &bx.tcx(),
                 );
                 let a_llval = bx.scalar_to_backend(
@@ -112,10 +113,11 @@
                     a_scalar,
                     bx.scalar_pair_element_backend_type(layout, 0, true),
                 );
-                let b_llval = bx.const_usize((end - start) as u64);
+                let b_llval = bx.const_usize(meta);
                 OperandValue::Pair(a_llval, b_llval)
             }
-            ConstValue::ByRef { alloc, offset } => {
+            ConstValue::Indirect { alloc_id, offset } => {
+                let alloc = bx.tcx().global_alloc(alloc_id).unwrap_memory();
                 return Self::from_const_alloc(bx, layout, alloc, offset);
             }
         };
@@ -133,15 +135,14 @@
         assert_eq!(alloc_align, layout.align.abi);
 
         let read_scalar = |start, size, s: abi::Scalar, ty| {
-            let val = alloc
-                .0
-                .read_scalar(
-                    bx,
-                    alloc_range(start, size),
-                    /*read_provenance*/ matches!(s.primitive(), abi::Pointer(_)),
-                )
-                .unwrap();
-            bx.scalar_to_backend(val, s, ty)
+            match alloc.0.read_scalar(
+                bx,
+                alloc_range(start, size),
+                /*read_provenance*/ matches!(s.primitive(), abi::Pointer(_)),
+            ) {
+                Ok(val) => bx.scalar_to_backend(val, s, ty),
+                Err(_) => bx.const_poison(ty),
+            }
         };
 
         // It may seem like all types with `Scalar` or `ScalarPair` ABI are fair game at this point.
@@ -154,7 +155,7 @@
             Abi::Scalar(s @ abi::Scalar::Initialized { .. }) => {
                 let size = s.size(bx);
                 assert_eq!(size, layout.size, "abi::Scalar size does not match layout size");
-                let val = read_scalar(Size::ZERO, size, s, bx.type_ptr());
+                let val = read_scalar(offset, size, s, bx.backend_type(layout));
                 OperandRef { val: OperandValue::Immediate(val), layout }
             }
             Abi::ScalarPair(
@@ -162,10 +163,10 @@
                 b @ abi::Scalar::Initialized { .. },
             ) => {
                 let (a_size, b_size) = (a.size(bx), b.size(bx));
-                let b_offset = a_size.align_to(b.align(bx).abi);
+                let b_offset = (offset + a_size).align_to(b.align(bx).abi);
                 assert!(b_offset.bytes() > 0);
                 let a_val = read_scalar(
-                    Size::ZERO,
+                    offset,
                     a_size,
                     a,
                     bx.scalar_pair_element_backend_type(layout, 0, true),
@@ -181,6 +182,8 @@
             _ if layout.is_zst() => OperandRef::zero_sized(layout),
             _ => {
                 // Neither a scalar nor scalar pair. Load from a place
+                // FIXME: should we cache `const_data_from_alloc` to avoid repeating this for the
+                // same `ConstAllocation`?
                 let init = bx.const_data_from_alloc(alloc);
                 let base_addr = bx.static_addr_of(init, alloc_align, None);
 
@@ -568,12 +571,7 @@
                 self.codegen_consume(bx, place.as_ref())
             }
 
-            mir::Operand::Constant(ref constant) => {
-                // This cannot fail because we checked all required_consts in advance.
-                self.eval_mir_constant_to_operand(bx, constant).unwrap_or_else(|_err| {
-                    span_bug!(constant.span, "erroneous constant not captured by required_consts")
-                })
-            }
+            mir::Operand::Constant(ref constant) => self.eval_mir_constant_to_operand(bx, constant),
         }
     }
 }
diff --git a/compiler/rustc_codegen_ssa/src/mir/place.rs b/compiler/rustc_codegen_ssa/src/mir/place.rs
index e7c3906..eb590a4 100644
--- a/compiler/rustc_codegen_ssa/src/mir/place.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/place.rs
@@ -114,7 +114,8 @@
                     bx.struct_gep(ty, self.llval, 1)
                 }
                 Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } if field.is_zst() => {
-                    // ZST fields are not included in Scalar, ScalarPair, and Vector layouts, so manually offset the pointer.
+                    // ZST fields (even some that require alignment) are not included in Scalar,
+                    // ScalarPair, and Vector layouts, so manually offset the pointer.
                     bx.gep(bx.cx().type_i8(), self.llval, &[bx.const_usize(offset.bytes())])
                 }
                 Abi::Scalar(_) | Abi::ScalarPair(..) => {
@@ -462,7 +463,10 @@
                 mir::ProjectionElem::Field(ref field, _) => {
                     cg_base.project_field(bx, field.index())
                 }
-                mir::ProjectionElem::OpaqueCast(ty) => cg_base.project_type(bx, ty),
+                mir::ProjectionElem::OpaqueCast(ty) => {
+                    bug!("encountered OpaqueCast({ty}) in codegen")
+                }
+                mir::ProjectionElem::Subtype(ty) => cg_base.project_type(bx, self.monomorphize(ty)),
                 mir::ProjectionElem::Index(index) => {
                     let index = &mir::Operand::Copy(mir::Place::from(index));
                     let index = self.codegen_operand(bx, index);
diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
index 07c61df..fc8d338 100644
--- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
@@ -1004,6 +1004,7 @@
             mir::Rvalue::Aggregate(..) => {
                 let ty = rvalue.ty(self.mir, self.cx.tcx());
                 let ty = self.monomorphize(ty);
+                // For ZST this can be `OperandValueKind::ZeroSized`.
                 self.cx.spanned_layout_of(ty, span).is_zst()
             }
         }
diff --git a/compiler/rustc_codegen_ssa/src/traits/backend.rs b/compiler/rustc_codegen_ssa/src/traits/backend.rs
index 0a02ca6..ac8123b 100644
--- a/compiler/rustc_codegen_ssa/src/traits/backend.rs
+++ b/compiler/rustc_codegen_ssa/src/traits/backend.rs
@@ -11,9 +11,9 @@
 use rustc_errors::ErrorGuaranteed;
 use rustc_metadata::EncodedMetadata;
 use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
-use rustc_middle::query::{ExternProviders, Providers};
 use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt, LayoutOf, TyAndLayout};
 use rustc_middle::ty::{Ty, TyCtxt};
+use rustc_middle::util::Providers;
 use rustc_session::{
     config::{self, OutputFilenames, PrintRequest},
     cstore::MetadataLoaderDyn,
@@ -85,7 +85,6 @@
     }
 
     fn provide(&self, _providers: &mut Providers) {}
-    fn provide_extern(&self, _providers: &mut ExternProviders) {}
     fn codegen_crate<'tcx>(
         &self,
         tcx: TyCtxt<'tcx>,
diff --git a/compiler/rustc_codegen_ssa/src/traits/debuginfo.rs b/compiler/rustc_codegen_ssa/src/traits/debuginfo.rs
index 63fecaf..4acc0ea 100644
--- a/compiler/rustc_codegen_ssa/src/traits/debuginfo.rs
+++ b/compiler/rustc_codegen_ssa/src/traits/debuginfo.rs
@@ -26,7 +26,7 @@
         fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
         llfn: Self::Function,
         mir: &mir::Body<'tcx>,
-    ) -> Option<FunctionDebugContext<Self::DIScope, Self::DILocation>>;
+    ) -> Option<FunctionDebugContext<'tcx, Self::DIScope, Self::DILocation>>;
 
     // FIXME(eddyb) find a common convention for all of the debuginfo-related
     // names (choose between `dbg`, `debug`, `debuginfo`, `debug_info` etc.).
diff --git a/compiler/rustc_const_eval/messages.ftl b/compiler/rustc_const_eval/messages.ftl
index e5dd572..d23e2a9 100644
--- a/compiler/rustc_const_eval/messages.ftl
+++ b/compiler/rustc_const_eval/messages.ftl
@@ -83,9 +83,6 @@
 const_eval_dyn_star_call_vtable_mismatch =
     `dyn*` call on a pointer whose vtable does not match its type
 
-const_eval_erroneous_constant =
-    erroneous constant used
-
 const_eval_error = {$error_kind ->
     [static] could not evaluate static initializer
     [const] evaluation of constant value failed
@@ -384,7 +381,7 @@
 
 const_eval_unsigned_offset_from_overflow =
     `ptr_offset_from_unsigned` called when first pointer has smaller offset than second: {$a_offset} < {$b_offset}
-
+const_eval_unsized_local = unsized locals are not supported
 const_eval_unstable_const_fn = `{$def_path}` is not yet stable as a const fn
 
 const_eval_unstable_in_stable =
diff --git a/compiler/rustc_const_eval/src/const_eval/error.rs b/compiler/rustc_const_eval/src/const_eval/error.rs
index d39a7e8..bf1e0a3 100644
--- a/compiler/rustc_const_eval/src/const_eval/error.rs
+++ b/compiler/rustc_const_eval/src/const_eval/error.rs
@@ -4,8 +4,7 @@
 use rustc_middle::mir::AssertKind;
 use rustc_middle::ty::TyCtxt;
 use rustc_middle::ty::{layout::LayoutError, ConstInt};
-use rustc_span::source_map::Spanned;
-use rustc_span::{ErrorGuaranteed, Span, Symbol};
+use rustc_span::{ErrorGuaranteed, Span, Symbol, DUMMY_SP};
 
 use super::InterpCx;
 use crate::errors::{self, FrameNote, ReportErrorExt};
@@ -18,7 +17,6 @@
     ModifiedGlobal,
     AssertFailure(AssertKind<ConstInt>),
     Panic { msg: Symbol, line: u32, col: u32, file: Symbol },
-    Abort(String),
 }
 
 impl MachineStopType for ConstEvalErrKind {
@@ -30,7 +28,6 @@
             ModifiedGlobal => const_eval_modified_global,
             Panic { .. } => const_eval_panic,
             AssertFailure(x) => x.diagnostic_message(),
-            Abort(msg) => msg.to_string().into(),
         }
     }
     fn add_args(
@@ -39,7 +36,7 @@
     ) {
         use ConstEvalErrKind::*;
         match *self {
-            ConstAccessesStatic | ModifiedGlobal | Abort(_) => {}
+            ConstAccessesStatic | ModifiedGlobal => {}
             AssertFailure(kind) => kind.add_args(adder),
             Panic { msg, line, col, file } => {
                 adder("msg".into(), msg.into_diagnostic_arg());
@@ -134,35 +131,17 @@
 {
     // Special handling for certain errors
     match error {
-        // Don't emit a new diagnostic for these errors
+        // Don't emit a new diagnostic for these errors, they are already reported elsewhere or
+        // should remain silent.
         err_inval!(Layout(LayoutError::Unknown(_))) | err_inval!(TooGeneric) => {
-            ErrorHandled::TooGeneric
+            ErrorHandled::TooGeneric(span.unwrap_or(DUMMY_SP))
         }
-        err_inval!(AlreadyReported(guar)) => ErrorHandled::Reported(guar),
+        err_inval!(AlreadyReported(guar)) => ErrorHandled::Reported(guar, span.unwrap_or(DUMMY_SP)),
         err_inval!(Layout(LayoutError::ReferencesError(guar))) => {
-            ErrorHandled::Reported(guar.into())
+            ErrorHandled::Reported(guar.into(), span.unwrap_or(DUMMY_SP))
         }
-        err_inval!(Layout(layout_error @ LayoutError::SizeOverflow(_))) => {
-            // We must *always* hard error on these, even if the caller wants just a lint.
-            // The `message` makes little sense here, this is a more serious error than the
-            // caller thinks anyway.
-            // See <https://github.com/rust-lang/rust/pull/63152>.
-            let (our_span, frames) = get_span_and_frames();
-            let span = span.unwrap_or(our_span);
-            let mut err =
-                tcx.sess.create_err(Spanned { span, node: layout_error.into_diagnostic() });
-            err.code(rustc_errors::error_code!(E0080));
-            let Some((mut err, handler)) = err.into_diagnostic() else {
-                panic!("did not emit diag");
-            };
-            for frame in frames {
-                err.eager_subdiagnostic(handler, frame);
-            }
-
-            ErrorHandled::Reported(handler.emit_diagnostic(&mut err).unwrap().into())
-        }
+        // Report remaining errors.
         _ => {
-            // Report as hard error.
             let (our_span, frames) = get_span_and_frames();
             let span = span.unwrap_or(our_span);
             let err = mk(span, frames);
@@ -173,7 +152,7 @@
 
             // Use *our* span to label the interp error
             err.span_label(our_span, msg);
-            ErrorHandled::Reported(err.emit().into())
+            ErrorHandled::Reported(err.emit().into(), span)
         }
     }
 }
diff --git a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
index 4c7e919..3d758cd 100644
--- a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
+++ b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
@@ -4,9 +4,9 @@
 use either::{Left, Right};
 
 use rustc_hir::def::DefKind;
-use rustc_middle::mir;
 use rustc_middle::mir::interpret::{ErrorHandled, InterpErrorInfo};
 use rustc_middle::mir::pretty::write_allocation_bytes;
+use rustc_middle::mir::{self, ConstAlloc, ConstValue};
 use rustc_middle::traits::Reveal;
 use rustc_middle::ty::layout::LayoutOf;
 use rustc_middle::ty::print::with_no_trimmed_paths;
@@ -18,9 +18,8 @@
 use crate::errors;
 use crate::interpret::eval_nullary_intrinsic;
 use crate::interpret::{
-    intern_const_alloc_recursive, Allocation, ConstAlloc, ConstValue, CtfeValidationMode, GlobalId,
-    Immediate, InternKind, InterpCx, InterpError, InterpResult, MPlaceTy, MemoryKind, OpTy,
-    RefTracking, StackPopCleanup,
+    intern_const_alloc_recursive, CtfeValidationMode, GlobalId, Immediate, InternKind, InterpCx,
+    InterpError, InterpResult, MPlaceTy, MemoryKind, OpTy, RefTracking, StackPopCleanup,
 };
 
 // Returns a pointer to where the result lives
@@ -61,6 +60,7 @@
         &ret.clone().into(),
         StackPopCleanup::Root { cleanup: false },
     )?;
+    ecx.storage_live_for_always_live_locals()?;
 
     // The main interpreter loop.
     while ecx.step()? {}
@@ -78,7 +78,7 @@
     intern_const_alloc_recursive(ecx, intern_kind, &ret)?;
     // we leave alignment checks off, since this `ecx` will not be used for further evaluation anyway
 
-    debug!("eval_body_using_ecx done: {:?}", *ret);
+    debug!("eval_body_using_ecx done: {:?}", ret);
     Ok(ret)
 }
 
@@ -104,91 +104,75 @@
     )
 }
 
-/// This function converts an interpreter value into a constant that is meant for use in the
-/// type system.
+/// This function converts an interpreter value into a MIR constant.
 #[instrument(skip(ecx), level = "debug")]
 pub(super) fn op_to_const<'tcx>(
     ecx: &CompileTimeEvalContext<'_, 'tcx>,
     op: &OpTy<'tcx>,
 ) -> ConstValue<'tcx> {
-    // We do not have value optimizations for everything.
-    // Only scalars and slices, since they are very common.
-    // Note that further down we turn scalars of uninitialized bits back to `ByRef`. These can result
-    // from scalar unions that are initialized with one of their zero sized variants. We could
-    // instead allow `ConstValue::Scalar` to store `ScalarMaybeUninit`, but that would affect all
-    // the usual cases of extracting e.g. a `usize`, without there being a real use case for the
-    // `Undef` situation.
-    let try_as_immediate = match op.layout.abi {
+    // Handle ZST consistently and early.
+    if op.layout.is_zst() {
+        return ConstValue::ZeroSized;
+    }
+
+    // All scalar types should be stored as `ConstValue::Scalar`. This is needed to make
+    // `ConstValue::try_to_scalar` efficient; we want that to work for *all* constants of scalar
+    // type (it's used throughout the compiler and having it work just on literals is not enough)
+    // and we want it to be fast (i.e., don't go to an `Allocation` and reconstruct the `Scalar`
+    // from its byte-serialized form).
+    let force_as_immediate = match op.layout.abi {
         Abi::Scalar(abi::Scalar::Initialized { .. }) => true,
-        Abi::ScalarPair(..) => match op.layout.ty.kind() {
-            ty::Ref(_, inner, _) => match *inner.kind() {
-                ty::Slice(elem) => elem == ecx.tcx.types.u8,
-                ty::Str => true,
-                _ => false,
-            },
-            _ => false,
-        },
+        // We don't *force* `ConstValue::Slice` for `ScalarPair`. This has the advantage that if the
+        // input `op` is a place, then turning it into a `ConstValue` and back into a `OpTy` will
+        // not have to generate any duplicate allocations (we preserve the original `AllocId` in
+        // `ConstValue::Indirect`). It means accessing the contents of a slice can be slow (since
+        // they can be stored as `ConstValue::Indirect`), but that's not relevant since we barely
+        // ever have to do this. (`try_get_slice_bytes_for_diagnostics` exists to provide this
+        // functionality.)
         _ => false,
     };
-    let immediate = if try_as_immediate {
+    let immediate = if force_as_immediate {
         Right(ecx.read_immediate(op).expect("normalization works on validated constants"))
     } else {
-        // It is guaranteed that any non-slice scalar pair is actually ByRef here.
-        // When we come back from raw const eval, we are always by-ref. The only way our op here is
-        // by-val is if we are in destructure_mir_constant, i.e., if this is (a field of) something that we
-        // "tried to make immediate" before. We wouldn't do that for non-slice scalar pairs or
-        // structs containing such.
         op.as_mplace_or_imm()
     };
 
     debug!(?immediate);
 
-    // We know `offset` is relative to the allocation, so we can use `into_parts`.
-    let to_const_value = |mplace: &MPlaceTy<'_>| {
-        debug!("to_const_value(mplace: {:?})", mplace);
-        match mplace.ptr.into_parts() {
-            (Some(alloc_id), offset) => {
-                let alloc = ecx.tcx.global_alloc(alloc_id).unwrap_memory();
-                ConstValue::ByRef { alloc, offset }
-            }
-            (None, offset) => {
-                assert!(mplace.layout.is_zst());
-                assert_eq!(
-                    offset.bytes() % mplace.layout.align.abi.bytes(),
-                    0,
-                    "this MPlaceTy must come from a validated constant, thus we can assume the \
-                alignment is correct",
-                );
-                ConstValue::ZeroSized
-            }
-        }
-    };
     match immediate {
-        Left(ref mplace) => to_const_value(mplace),
-        // see comment on `let try_as_immediate` above
+        Left(ref mplace) => {
+            // We know `offset` is relative to the allocation, so we can use `into_parts`.
+            let (alloc_id, offset) = mplace.ptr().into_parts();
+            let alloc_id = alloc_id.expect("cannot have `fake` place fot non-ZST type");
+            ConstValue::Indirect { alloc_id, offset }
+        }
+        // see comment on `let force_as_immediate` above
         Right(imm) => match *imm {
-            _ if imm.layout.is_zst() => ConstValue::ZeroSized,
             Immediate::Scalar(x) => ConstValue::Scalar(x),
             Immediate::ScalarPair(a, b) => {
                 debug!("ScalarPair(a: {:?}, b: {:?})", a, b);
-                // We know `offset` is relative to the allocation, so we can use `into_parts`.
-                let (data, start) = match a.to_pointer(ecx).unwrap().into_parts() {
-                    (Some(alloc_id), offset) => {
-                        (ecx.tcx.global_alloc(alloc_id).unwrap_memory(), offset.bytes())
-                    }
-                    (None, _offset) => (
-                        ecx.tcx.mk_const_alloc(Allocation::from_bytes_byte_aligned_immutable(
-                            b"" as &[u8],
-                        )),
-                        0,
+                // This codepath solely exists for `valtree_to_const_value` to not need to generate
+                // a `ConstValue::Indirect` for wide references, so it is tightly restricted to just
+                // that case.
+                let pointee_ty = imm.layout.ty.builtin_deref(false).unwrap().ty; // `false` = no raw ptrs
+                debug_assert!(
+                    matches!(
+                        ecx.tcx.struct_tail_without_normalization(pointee_ty).kind(),
+                        ty::Str | ty::Slice(..),
                     ),
-                };
-                let len = b.to_target_usize(ecx).unwrap();
-                let start = start.try_into().unwrap();
-                let len: usize = len.try_into().unwrap();
-                ConstValue::Slice { data, start, end: start + len }
+                    "`ConstValue::Slice` is for slice-tailed types only, but got {}",
+                    imm.layout.ty,
+                );
+                let msg = "`op_to_const` on an immediate scalar pair must only be used on slice references to the beginning of an actual allocation";
+                // We know `offset` is relative to the allocation, so we can use `into_parts`.
+                let (alloc_id, offset) = a.to_pointer(ecx).expect(msg).into_parts();
+                let alloc_id = alloc_id.expect(msg);
+                let data = ecx.tcx.global_alloc(alloc_id).unwrap_memory();
+                assert!(offset == abi::Size::ZERO, "{}", msg);
+                let meta = b.to_target_usize(ecx).expect(msg);
+                ConstValue::Slice { data, meta }
             }
-            Immediate::Uninit => to_const_value(&op.assert_mem_place()),
+            Immediate::Uninit => bug!("`Uninit` is not a valid value for {}", op.layout.ty),
         },
     }
 }
@@ -234,7 +218,7 @@
         key.param_env = key.param_env.with_user_facing();
         match tcx.eval_to_const_value_raw(key) {
             // try again with reveal all as requested
-            Err(ErrorHandled::TooGeneric) => {}
+            Err(ErrorHandled::TooGeneric(_)) => {}
             // deduplicate calls
             other => return other,
         }
@@ -281,7 +265,7 @@
         key.param_env = key.param_env.with_user_facing();
         match tcx.eval_to_allocation_raw(key) {
             // try again with reveal all as requested
-            Err(ErrorHandled::TooGeneric) => {}
+            Err(ErrorHandled::TooGeneric(_)) => {}
             // deduplicate calls
             other => return other,
         }
@@ -369,9 +353,9 @@
                     inner = true;
                 }
             };
-            let alloc_id = mplace.ptr.provenance.unwrap();
+            let alloc_id = mplace.ptr().provenance.unwrap();
 
-            // Validation failed, report an error. This is always a hard error.
+            // Validation failed, report an error.
             if let Err(error) = validation {
                 let (error, backtrace) = error.into_parts();
                 backtrace.print_backtrace();
diff --git a/compiler/rustc_const_eval/src/const_eval/machine.rs b/compiler/rustc_const_eval/src/const_eval/machine.rs
index b740b79..14b9894 100644
--- a/compiler/rustc_const_eval/src/const_eval/machine.rs
+++ b/compiler/rustc_const_eval/src/const_eval/machine.rs
@@ -3,7 +3,7 @@
 use rustc_middle::mir;
 use rustc_middle::mir::interpret::PointerArithmetic;
 use rustc_middle::ty::layout::{FnAbiOf, TyAndLayout};
-use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_middle::ty::{self, TyCtxt};
 use rustc_session::lint::builtin::INVALID_ALIGNMENT;
 use std::borrow::Borrow;
 use std::hash::Hash;
@@ -464,6 +464,13 @@
         Ok(Some((ecx.load_mir(instance.def, None)?, orig_instance)))
     }
 
+    fn panic_nounwind(ecx: &mut InterpCx<'mir, 'tcx, Self>, msg: &str) -> InterpResult<'tcx> {
+        let msg = Symbol::intern(msg);
+        let span = ecx.find_closest_untracked_caller_location();
+        let (file, line, col) = ecx.location_triple_for_span(span);
+        Err(ConstEvalErrKind::Panic { msg, file, line, col }.into())
+    }
+
     fn call_intrinsic(
         ecx: &mut InterpCx<'mir, 'tcx, Self>,
         instance: ty::Instance<'tcx>,
@@ -584,16 +591,12 @@
         Err(ConstEvalErrKind::AssertFailure(err).into())
     }
 
-    fn abort(_ecx: &mut InterpCx<'mir, 'tcx, Self>, msg: String) -> InterpResult<'tcx, !> {
-        Err(ConstEvalErrKind::Abort(msg).into())
-    }
-
     fn binary_ptr_op(
         _ecx: &InterpCx<'mir, 'tcx, Self>,
         _bin_op: mir::BinOp,
         _left: &ImmTy<'tcx>,
         _right: &ImmTy<'tcx>,
-    ) -> InterpResult<'tcx, (Scalar, bool, Ty<'tcx>)> {
+    ) -> InterpResult<'tcx, (ImmTy<'tcx>, bool)> {
         throw_unsup_format!("pointer arithmetic or comparison is not supported at compile-time");
     }
 
diff --git a/compiler/rustc_const_eval/src/const_eval/mod.rs b/compiler/rustc_const_eval/src/const_eval/mod.rs
index 8541046..bcbe996 100644
--- a/compiler/rustc_const_eval/src/const_eval/mod.rs
+++ b/compiler/rustc_const_eval/src/const_eval/mod.rs
@@ -1,9 +1,10 @@
 // Not in interpret to make sure we do not use private implementation details
 
 use crate::errors::MaxNumNodesInConstErr;
-use crate::interpret::{intern_const_alloc_recursive, ConstValue, InternKind, InterpCx, Scalar};
+use crate::interpret::{intern_const_alloc_recursive, InternKind, InterpCx, Scalar};
 use rustc_middle::mir;
 use rustc_middle::mir::interpret::{EvalToValTreeResult, GlobalId};
+use rustc_middle::query::TyCtxtAt;
 use rustc_middle::ty::{self, Ty, TyCtxt};
 use rustc_span::{source_map::DUMMY_SP, symbol::Symbol};
 
@@ -22,7 +23,7 @@
 pub(crate) fn const_caller_location(
     tcx: TyCtxt<'_>,
     (file, line, col): (Symbol, u32, u32),
-) -> ConstValue<'_> {
+) -> mir::ConstValue<'_> {
     trace!("const_caller_location: {}:{}:{}", file, line, col);
     let mut ecx = mk_eval_cx(tcx, DUMMY_SP, ty::ParamEnv::reveal_all(), CanAccessStatics::No);
 
@@ -30,7 +31,7 @@
     if intern_const_alloc_recursive(&mut ecx, InternKind::Constant, &loc_place).is_err() {
         bug!("intern_const_alloc_recursive should not error in this case")
     }
-    ConstValue::Scalar(Scalar::from_maybe_pointer(loc_place.ptr, &tcx))
+    mir::ConstValue::Scalar(Scalar::from_maybe_pointer(loc_place.ptr(), &tcx))
 }
 
 // We forbid type-level constants that contain more than `VALTREE_MAX_NODES` nodes.
@@ -85,18 +86,18 @@
 }
 
 #[instrument(skip(tcx), level = "debug")]
-pub fn try_destructure_mir_constant_for_diagnostics<'tcx>(
-    tcx: TyCtxt<'tcx>,
-    val: ConstValue<'tcx>,
+pub(crate) fn try_destructure_mir_constant_for_diagnostics<'tcx>(
+    tcx: TyCtxtAt<'tcx>,
+    val: mir::ConstValue<'tcx>,
     ty: Ty<'tcx>,
 ) -> Option<mir::DestructuredConstant<'tcx>> {
     let param_env = ty::ParamEnv::reveal_all();
-    let ecx = mk_eval_cx(tcx, DUMMY_SP, param_env, CanAccessStatics::No);
+    let ecx = mk_eval_cx(tcx.tcx, tcx.span, param_env, CanAccessStatics::No);
     let op = ecx.const_val_to_op(val, ty, None).ok()?;
 
     // We go to `usize` as we cannot allocate anything bigger anyway.
     let (field_count, variant, down) = match ty.kind() {
-        ty::Array(_, len) => (len.eval_target_usize(tcx, param_env) as usize, None, op),
+        ty::Array(_, len) => (len.eval_target_usize(tcx.tcx, param_env) as usize, None, op),
         ty::Adt(def, _) if def.variants().is_empty() => {
             return None;
         }
diff --git a/compiler/rustc_const_eval/src/const_eval/valtrees.rs b/compiler/rustc_const_eval/src/const_eval/valtrees.rs
index b15a65d..7436ea6 100644
--- a/compiler/rustc_const_eval/src/const_eval/valtrees.rs
+++ b/compiler/rustc_const_eval/src/const_eval/valtrees.rs
@@ -4,10 +4,11 @@
 use crate::const_eval::CanAccessStatics;
 use crate::interpret::MPlaceTy;
 use crate::interpret::{
-    intern_const_alloc_recursive, ConstValue, ImmTy, Immediate, InternKind, MemPlaceMeta,
-    MemoryKind, Place, Projectable, Scalar,
+    intern_const_alloc_recursive, ImmTy, Immediate, InternKind, MemPlaceMeta, MemoryKind, PlaceTy,
+    Projectable, Scalar,
 };
-use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
+use rustc_middle::mir;
+use rustc_middle::ty::layout::{LayoutCx, LayoutOf, TyAndLayout};
 use rustc_middle::ty::{self, ScalarInt, Ty, TyCtxt};
 use rustc_span::source_map::DUMMY_SP;
 use rustc_target::abi::VariantIdx;
@@ -151,7 +152,7 @@
         // FIXME(oli-obk): we can probably encode closures just like structs
         | ty::Closure(..)
         | ty::Generator(..)
-        | ty::GeneratorWitness(..) |ty::GeneratorWitnessMIR(..)=> Err(ValTreeCreationError::NonSupportedType),
+        | ty::GeneratorWitness(..) => Err(ValTreeCreationError::NonSupportedType),
     }
 }
 
@@ -189,12 +190,11 @@
 }
 
 #[instrument(skip(ecx), level = "debug", ret)]
-fn create_pointee_place<'tcx>(
+fn create_valtree_place<'tcx>(
     ecx: &mut CompileTimeEvalContext<'tcx, 'tcx>,
-    ty: Ty<'tcx>,
+    layout: TyAndLayout<'tcx>,
     valtree: ty::ValTree<'tcx>,
 ) -> MPlaceTy<'tcx> {
-    let layout = ecx.layout_of(ty).unwrap();
     let meta = reconstruct_place_meta(layout, valtree, ecx.tcx.tcx);
     ecx.allocate_dyn(layout, MemoryKind::Stack, meta).unwrap()
 }
@@ -207,7 +207,7 @@
     tcx: TyCtxt<'tcx>,
     param_env_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
     valtree: ty::ValTree<'tcx>,
-) -> ConstValue<'tcx> {
+) -> mir::ConstValue<'tcx> {
     // Basic idea: We directly construct `Scalar` values from trivial `ValTree`s
     // (those for constants with type bool, int, uint, float or char).
     // For all other types we create an `MPlace` and fill that by walking
@@ -216,50 +216,56 @@
     // FIXME Does this need an example?
 
     let (param_env, ty) = param_env_ty.into_parts();
-    let mut ecx: crate::interpret::InterpCx<
-        '_,
-        '_,
-        crate::const_eval::CompileTimeInterpreter<'_, '_>,
-    > = mk_eval_cx(tcx, DUMMY_SP, param_env, CanAccessStatics::No);
 
     match ty.kind() {
         ty::FnDef(..) => {
             assert!(valtree.unwrap_branch().is_empty());
-            ConstValue::ZeroSized
+            mir::ConstValue::ZeroSized
         }
         ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char => match valtree {
-            ty::ValTree::Leaf(scalar_int) => ConstValue::Scalar(Scalar::Int(scalar_int)),
+            ty::ValTree::Leaf(scalar_int) => mir::ConstValue::Scalar(Scalar::Int(scalar_int)),
             ty::ValTree::Branch(_) => bug!(
                 "ValTrees for Bool, Int, Uint, Float or Char should have the form ValTree::Leaf"
             ),
         },
-        ty::Ref(_, _, _) | ty::Tuple(_) | ty::Array(_, _) | ty::Adt(..) => {
-            let place = match ty.kind() {
-                ty::Ref(_, inner_ty, _) => {
-                    // Need to create a place for the pointee (the reference itself will be an immediate)
-                    create_pointee_place(&mut ecx, *inner_ty, valtree)
+        ty::Ref(_, inner_ty, _) => {
+            let mut ecx = mk_eval_cx(tcx, DUMMY_SP, param_env, CanAccessStatics::No);
+            let imm = valtree_to_ref(&mut ecx, valtree, *inner_ty);
+            let imm = ImmTy::from_immediate(imm, tcx.layout_of(param_env_ty).unwrap());
+            op_to_const(&ecx, &imm.into())
+        }
+        ty::Tuple(_) | ty::Array(_, _) | ty::Adt(..) => {
+            let layout = tcx.layout_of(param_env_ty).unwrap();
+            if layout.is_zst() {
+                // Fast path to avoid some allocations.
+                return mir::ConstValue::ZeroSized;
+            }
+            if layout.abi.is_scalar()
+                && (matches!(ty.kind(), ty::Tuple(_))
+                    || matches!(ty.kind(), ty::Adt(def, _) if def.is_struct()))
+            {
+                // A Scalar tuple/struct; we can avoid creating an allocation.
+                let branches = valtree.unwrap_branch();
+                // Find the non-ZST field. (There can be aligned ZST!)
+                for (i, &inner_valtree) in branches.iter().enumerate() {
+                    let field = layout.field(&LayoutCx { tcx, param_env }, i);
+                    if !field.is_zst() {
+                        return valtree_to_const_value(tcx, param_env.and(field.ty), inner_valtree);
+                    }
                 }
-                _ => {
-                    // Need to create a place for this valtree.
-                    create_pointee_place(&mut ecx, ty, valtree)
-                }
-            };
-            debug!(?place);
+                bug!("could not find non-ZST field during in {layout:#?}");
+            }
+
+            let mut ecx = mk_eval_cx(tcx, DUMMY_SP, param_env, CanAccessStatics::No);
+
+            // Need to create a place for this valtree.
+            let place = create_valtree_place(&mut ecx, layout, valtree);
 
             valtree_into_mplace(&mut ecx, &place, valtree);
             dump_place(&ecx, &place);
             intern_const_alloc_recursive(&mut ecx, InternKind::Constant, &place).unwrap();
 
-            match ty.kind() {
-                ty::Ref(_, _, _) => {
-                    let ref_place = place.to_ref(&tcx);
-                    let imm =
-                        ImmTy::from_immediate(ref_place, tcx.layout_of(param_env_ty).unwrap());
-
-                    op_to_const(&ecx, &imm.into())
-                }
-                _ => op_to_const(&ecx, &place.into()),
-            }
+            op_to_const(&ecx, &place.into())
         }
         ty::Never
         | ty::Error(_)
@@ -274,7 +280,6 @@
         | ty::Closure(..)
         | ty::Generator(..)
         | ty::GeneratorWitness(..)
-        | ty::GeneratorWitnessMIR(..)
         | ty::FnPtr(_)
         | ty::RawPtr(_)
         | ty::Str
@@ -283,6 +288,22 @@
     }
 }
 
+/// Put a valtree into memory and return a reference to that.
+fn valtree_to_ref<'tcx>(
+    ecx: &mut CompileTimeEvalContext<'tcx, 'tcx>,
+    valtree: ty::ValTree<'tcx>,
+    pointee_ty: Ty<'tcx>,
+) -> Immediate {
+    let pointee_place = create_valtree_place(ecx, ecx.layout_of(pointee_ty).unwrap(), valtree);
+    debug!(?pointee_place);
+
+    valtree_into_mplace(ecx, &pointee_place, valtree);
+    dump_place(ecx, &pointee_place);
+    intern_const_alloc_recursive(ecx, InternKind::Constant, &pointee_place).unwrap();
+
+    pointee_place.to_ref(&ecx.tcx)
+}
+
 #[instrument(skip(ecx), level = "debug")]
 fn valtree_into_mplace<'tcx>(
     ecx: &mut CompileTimeEvalContext<'tcx, 'tcx>,
@@ -292,7 +313,6 @@
     // This will match on valtree and write the value(s) corresponding to the ValTree
     // inside the place recursively.
 
-    let tcx = ecx.tcx.tcx;
     let ty = place.layout.ty;
 
     match ty.kind() {
@@ -305,27 +325,8 @@
             ecx.write_immediate(Immediate::Scalar(scalar_int.into()), place).unwrap();
         }
         ty::Ref(_, inner_ty, _) => {
-            let pointee_place = create_pointee_place(ecx, *inner_ty, valtree);
-            debug!(?pointee_place);
-
-            valtree_into_mplace(ecx, &pointee_place, valtree);
-            dump_place(ecx, &pointee_place);
-            intern_const_alloc_recursive(ecx, InternKind::Constant, &pointee_place).unwrap();
-
-            let imm = match inner_ty.kind() {
-                ty::Slice(_) | ty::Str => {
-                    let len = valtree.unwrap_branch().len();
-                    let len_scalar = Scalar::from_target_usize(len as u64, &tcx);
-
-                    Immediate::ScalarPair(
-                        Scalar::from_maybe_pointer((*pointee_place).ptr, &tcx),
-                        len_scalar,
-                    )
-                }
-                _ => pointee_place.to_ref(&tcx),
-            };
+            let imm = valtree_to_ref(ecx, valtree, *inner_ty);
             debug!(?imm);
-
             ecx.write_immediate(imm, place).unwrap();
         }
         ty::Adt(_, _) | ty::Tuple(_) | ty::Array(_, _) | ty::Str | ty::Slice(_) => {
@@ -383,5 +384,5 @@
 }
 
 fn dump_place<'tcx>(ecx: &CompileTimeEvalContext<'tcx, 'tcx>, place: &MPlaceTy<'tcx>) {
-    trace!("{:?}", ecx.dump_place(Place::Ptr(**place)));
+    trace!("{:?}", ecx.dump_place(&PlaceTy::from(place.clone())));
 }
diff --git a/compiler/rustc_const_eval/src/errors.rs b/compiler/rustc_const_eval/src/errors.rs
index 4362cae..b1599dd 100644
--- a/compiler/rustc_const_eval/src/errors.rs
+++ b/compiler/rustc_const_eval/src/errors.rs
@@ -239,13 +239,6 @@
     pub item_span: Span,
 }
 
-#[derive(Diagnostic)]
-#[diag(const_eval_erroneous_constant)]
-pub(crate) struct ErroneousConstUsed {
-    #[primary_span]
-    pub span: Span,
-}
-
 #[derive(Subdiagnostic)]
 #[note(const_eval_non_const_impl)]
 pub(crate) struct NonConstImplNote {
@@ -482,6 +475,9 @@
         use UndefinedBehaviorInfo::*;
         match self {
             Ub(msg) => msg.clone().into(),
+            Custom(x) => (x.msg)(),
+            ValidationError(e) => e.diagnostic_message(),
+
             Unreachable => const_eval_unreachable,
             BoundsCheckFailed { .. } => const_eval_bounds_check_failed,
             DivisionByZero => const_eval_division_by_zero,
@@ -513,8 +509,8 @@
             ScalarSizeMismatch(_) => const_eval_scalar_size_mismatch,
             UninhabitedEnumVariantWritten(_) => const_eval_uninhabited_enum_variant_written,
             UninhabitedEnumVariantRead(_) => const_eval_uninhabited_enum_variant_read,
-            ValidationError(e) => e.diagnostic_message(),
-            Custom(x) => (x.msg)(),
+            AbiMismatchArgument { .. } => const_eval_incompatible_types,
+            AbiMismatchReturn { .. } => const_eval_incompatible_return_types,
         }
     }
 
@@ -525,8 +521,15 @@
     ) {
         use UndefinedBehaviorInfo::*;
         match self {
-            Ub(_)
-            | Unreachable
+            Ub(_) => {}
+            Custom(custom) => {
+                (custom.add_args)(&mut |name, value| {
+                    builder.set_arg(name, value);
+                });
+            }
+            ValidationError(e) => e.add_args(handler, builder),
+
+            Unreachable
             | DivisionByZero
             | RemainderByZero
             | DivisionOverflow
@@ -593,11 +596,10 @@
                 builder.set_arg("target_size", info.target_size);
                 builder.set_arg("data_size", info.data_size);
             }
-            ValidationError(e) => e.add_args(handler, builder),
-            Custom(custom) => {
-                (custom.add_args)(&mut |name, value| {
-                    builder.set_arg(name, value);
-                });
+            AbiMismatchArgument { caller_ty, callee_ty }
+            | AbiMismatchReturn { caller_ty, callee_ty } => {
+                builder.set_arg("caller_ty", caller_ty.to_string());
+                builder.set_arg("callee_ty", callee_ty.to_string());
             }
         }
     }
@@ -795,6 +797,7 @@
         use crate::fluent_generated::*;
         match self {
             UnsupportedOpInfo::Unsupported(s) => s.clone().into(),
+            UnsupportedOpInfo::UnsizedLocal => const_eval_unsized_local,
             UnsupportedOpInfo::OverwritePartialPointer(_) => const_eval_partial_pointer_overwrite,
             UnsupportedOpInfo::ReadPartialPointer(_) => const_eval_partial_pointer_copy,
             UnsupportedOpInfo::ReadPointerAsInt(_) => const_eval_read_pointer_as_int,
@@ -814,7 +817,7 @@
             // `ReadPointerAsInt(Some(info))` is never printed anyway, it only serves as an error to
             // be further processed by validity checking which then turns it into something nice to
             // print. So it's not worth the effort of having diagnostics that can print the `info`.
-            Unsupported(_) | ReadPointerAsInt(_) => {}
+            UnsizedLocal | Unsupported(_) | ReadPointerAsInt(_) => {}
             OverwritePartialPointer(ptr) | ReadPartialPointer(ptr) => {
                 builder.set_arg("ptr", ptr);
             }
diff --git a/compiler/rustc_const_eval/src/interpret/cast.rs b/compiler/rustc_const_eval/src/interpret/cast.rs
index 98e853d..b9f88cf 100644
--- a/compiler/rustc_const_eval/src/interpret/cast.rs
+++ b/compiler/rustc_const_eval/src/interpret/cast.rs
@@ -24,41 +24,44 @@
         cast_ty: Ty<'tcx>,
         dest: &PlaceTy<'tcx, M::Provenance>,
     ) -> InterpResult<'tcx> {
+        // `cast_ty` will often be the same as `dest.ty`, but not always, since subtyping is still
+        // possible.
+        let cast_layout =
+            if cast_ty == dest.layout.ty { dest.layout } else { self.layout_of(cast_ty)? };
         // FIXME: In which cases should we trigger UB when the source is uninit?
         match cast_kind {
             CastKind::PointerCoercion(PointerCoercion::Unsize) => {
-                let cast_ty = self.layout_of(cast_ty)?;
-                self.unsize_into(src, cast_ty, dest)?;
+                self.unsize_into(src, cast_layout, dest)?;
             }
 
             CastKind::PointerExposeAddress => {
                 let src = self.read_immediate(src)?;
-                let res = self.pointer_expose_address_cast(&src, cast_ty)?;
-                self.write_immediate(res, dest)?;
+                let res = self.pointer_expose_address_cast(&src, cast_layout)?;
+                self.write_immediate(*res, dest)?;
             }
 
             CastKind::PointerFromExposedAddress => {
                 let src = self.read_immediate(src)?;
-                let res = self.pointer_from_exposed_address_cast(&src, cast_ty)?;
-                self.write_immediate(res, dest)?;
+                let res = self.pointer_from_exposed_address_cast(&src, cast_layout)?;
+                self.write_immediate(*res, dest)?;
             }
 
             CastKind::IntToInt | CastKind::IntToFloat => {
                 let src = self.read_immediate(src)?;
-                let res = self.int_to_int_or_float(&src, cast_ty)?;
-                self.write_immediate(res, dest)?;
+                let res = self.int_to_int_or_float(&src, cast_layout)?;
+                self.write_immediate(*res, dest)?;
             }
 
             CastKind::FloatToFloat | CastKind::FloatToInt => {
                 let src = self.read_immediate(src)?;
-                let res = self.float_to_float_or_int(&src, cast_ty)?;
-                self.write_immediate(res, dest)?;
+                let res = self.float_to_float_or_int(&src, cast_layout)?;
+                self.write_immediate(*res, dest)?;
             }
 
             CastKind::FnPtrToPtr | CastKind::PtrToPtr => {
                 let src = self.read_immediate(src)?;
-                let res = self.ptr_to_ptr(&src, cast_ty)?;
-                self.write_immediate(res, dest)?;
+                let res = self.ptr_to_ptr(&src, cast_layout)?;
+                self.write_immediate(*res, dest)?;
             }
 
             CastKind::PointerCoercion(
@@ -84,10 +87,10 @@
                         )
                         .ok_or_else(|| err_inval!(TooGeneric))?;
 
-                        let fn_ptr = self.create_fn_alloc_ptr(FnVal::Instance(instance));
+                        let fn_ptr = self.fn_ptr(FnVal::Instance(instance));
                         self.write_pointer(fn_ptr, dest)?;
                     }
-                    _ => span_bug!(self.cur_span(), "reify fn pointer on {:?}", src.layout.ty),
+                    _ => span_bug!(self.cur_span(), "reify fn pointer on {}", src.layout.ty),
                 }
             }
 
@@ -98,7 +101,7 @@
                         // No change to value
                         self.write_immediate(*src, dest)?;
                     }
-                    _ => span_bug!(self.cur_span(), "fn to unsafe fn cast on {:?}", cast_ty),
+                    _ => span_bug!(self.cur_span(), "fn to unsafe fn cast on {}", cast_ty),
                 }
             }
 
@@ -116,10 +119,10 @@
                             ty::ClosureKind::FnOnce,
                         )
                         .ok_or_else(|| err_inval!(TooGeneric))?;
-                        let fn_ptr = self.create_fn_alloc_ptr(FnVal::Instance(instance));
+                        let fn_ptr = self.fn_ptr(FnVal::Instance(instance));
                         self.write_pointer(fn_ptr, dest)?;
                     }
-                    _ => span_bug!(self.cur_span(), "closure fn pointer on {:?}", src.layout.ty),
+                    _ => span_bug!(self.cur_span(), "closure fn pointer on {}", src.layout.ty),
                 }
             }
 
@@ -140,6 +143,7 @@
             CastKind::Transmute => {
                 assert!(src.layout.is_sized());
                 assert!(dest.layout.is_sized());
+                assert_eq!(cast_ty, dest.layout.ty); // we otherwise ignore `cast_ty` enirely...
                 if src.layout.size != dest.layout.size {
                     let src_bytes = src.layout.size.bytes();
                     let dest_bytes = dest.layout.size.bytes();
@@ -164,62 +168,61 @@
     pub fn int_to_int_or_float(
         &self,
         src: &ImmTy<'tcx, M::Provenance>,
-        cast_ty: Ty<'tcx>,
-    ) -> InterpResult<'tcx, Immediate<M::Provenance>> {
+        cast_to: TyAndLayout<'tcx>,
+    ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
         assert!(src.layout.ty.is_integral() || src.layout.ty.is_char() || src.layout.ty.is_bool());
-        assert!(cast_ty.is_floating_point() || cast_ty.is_integral() || cast_ty.is_char());
+        assert!(cast_to.ty.is_floating_point() || cast_to.ty.is_integral() || cast_to.ty.is_char());
 
-        Ok(self.cast_from_int_like(src.to_scalar(), src.layout, cast_ty)?.into())
+        Ok(ImmTy::from_scalar(
+            self.cast_from_int_like(src.to_scalar(), src.layout, cast_to.ty)?,
+            cast_to,
+        ))
     }
 
     /// Handles 'FloatToFloat' and 'FloatToInt' casts.
     pub fn float_to_float_or_int(
         &self,
         src: &ImmTy<'tcx, M::Provenance>,
-        cast_ty: Ty<'tcx>,
-    ) -> InterpResult<'tcx, Immediate<M::Provenance>> {
+        cast_to: TyAndLayout<'tcx>,
+    ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
         use rustc_type_ir::sty::TyKind::*;
 
-        match src.layout.ty.kind() {
+        let val = match src.layout.ty.kind() {
             // Floating point
-            Float(FloatTy::F32) => {
-                return Ok(self.cast_from_float(src.to_scalar().to_f32()?, cast_ty).into());
-            }
-            Float(FloatTy::F64) => {
-                return Ok(self.cast_from_float(src.to_scalar().to_f64()?, cast_ty).into());
-            }
+            Float(FloatTy::F32) => self.cast_from_float(src.to_scalar().to_f32()?, cast_to.ty),
+            Float(FloatTy::F64) => self.cast_from_float(src.to_scalar().to_f64()?, cast_to.ty),
             _ => {
-                bug!("Can't cast 'Float' type into {:?}", cast_ty);
+                bug!("Can't cast 'Float' type into {}", cast_to.ty);
             }
-        }
+        };
+        Ok(ImmTy::from_scalar(val, cast_to))
     }
 
     /// Handles 'FnPtrToPtr' and 'PtrToPtr' casts.
     pub fn ptr_to_ptr(
         &self,
         src: &ImmTy<'tcx, M::Provenance>,
-        cast_ty: Ty<'tcx>,
-    ) -> InterpResult<'tcx, Immediate<M::Provenance>> {
+        cast_to: TyAndLayout<'tcx>,
+    ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
         assert!(src.layout.ty.is_any_ptr());
-        assert!(cast_ty.is_unsafe_ptr());
+        assert!(cast_to.ty.is_unsafe_ptr());
         // Handle casting any ptr to raw ptr (might be a fat ptr).
-        let dest_layout = self.layout_of(cast_ty)?;
-        if dest_layout.size == src.layout.size {
+        if cast_to.size == src.layout.size {
             // Thin or fat pointer that just hast the ptr kind of target type changed.
-            return Ok(**src);
+            return Ok(ImmTy::from_immediate(**src, cast_to));
         } else {
             // Casting the metadata away from a fat ptr.
             assert_eq!(src.layout.size, 2 * self.pointer_size());
-            assert_eq!(dest_layout.size, self.pointer_size());
+            assert_eq!(cast_to.size, self.pointer_size());
             assert!(src.layout.ty.is_unsafe_ptr());
             return match **src {
-                Immediate::ScalarPair(data, _) => Ok(data.into()),
+                Immediate::ScalarPair(data, _) => Ok(ImmTy::from_scalar(data, cast_to)),
                 Immediate::Scalar(..) => span_bug!(
                     self.cur_span(),
-                    "{:?} input to a fat-to-thin cast ({:?} -> {:?})",
+                    "{:?} input to a fat-to-thin cast ({} -> {})",
                     *src,
                     src.layout.ty,
-                    cast_ty
+                    cast_to.ty
                 ),
                 Immediate::Uninit => throw_ub!(InvalidUninitBytes(None)),
             };
@@ -229,10 +232,10 @@
     pub fn pointer_expose_address_cast(
         &mut self,
         src: &ImmTy<'tcx, M::Provenance>,
-        cast_ty: Ty<'tcx>,
-    ) -> InterpResult<'tcx, Immediate<M::Provenance>> {
+        cast_to: TyAndLayout<'tcx>,
+    ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
         assert_matches!(src.layout.ty.kind(), ty::RawPtr(_) | ty::FnPtr(_));
-        assert!(cast_ty.is_integral());
+        assert!(cast_to.ty.is_integral());
 
         let scalar = src.to_scalar();
         let ptr = scalar.to_pointer(self)?;
@@ -240,16 +243,16 @@
             Ok(ptr) => M::expose_ptr(self, ptr)?,
             Err(_) => {} // Do nothing, exposing an invalid pointer (`None` provenance) is a NOP.
         };
-        Ok(self.cast_from_int_like(scalar, src.layout, cast_ty)?.into())
+        Ok(ImmTy::from_scalar(self.cast_from_int_like(scalar, src.layout, cast_to.ty)?, cast_to))
     }
 
     pub fn pointer_from_exposed_address_cast(
         &self,
         src: &ImmTy<'tcx, M::Provenance>,
-        cast_ty: Ty<'tcx>,
-    ) -> InterpResult<'tcx, Immediate<M::Provenance>> {
+        cast_to: TyAndLayout<'tcx>,
+    ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
         assert!(src.layout.ty.is_integral());
-        assert_matches!(cast_ty.kind(), ty::RawPtr(_));
+        assert_matches!(cast_to.ty.kind(), ty::RawPtr(_));
 
         // First cast to usize.
         let scalar = src.to_scalar();
@@ -258,12 +261,12 @@
 
         // Then turn address into pointer.
         let ptr = M::ptr_from_addr_cast(&self, addr)?;
-        Ok(Scalar::from_maybe_pointer(ptr, self).into())
+        Ok(ImmTy::from_scalar(Scalar::from_maybe_pointer(ptr, self), cast_to))
     }
 
     /// Low-level cast helper function. This works directly on scalars and can take 'int-like' input
     /// type (basically everything with a scalar layout) to int/float/char types.
-    pub fn cast_from_int_like(
+    fn cast_from_int_like(
         &self,
         scalar: Scalar<M::Provenance>, // input value (there is no ScalarTy so we separate data+layout)
         src_layout: TyAndLayout<'tcx>,
@@ -298,7 +301,7 @@
             }
 
             // Casts to bool are not permitted by rustc, no need to handle them here.
-            _ => span_bug!(self.cur_span(), "invalid int to {:?} cast", cast_ty),
+            _ => span_bug!(self.cur_span(), "invalid int to {} cast", cast_ty),
         })
     }
 
@@ -331,7 +334,7 @@
             // float -> f64
             Float(FloatTy::F64) => Scalar::from_f64(f.convert(&mut false).value),
             // That's it.
-            _ => span_bug!(self.cur_span(), "invalid float to {:?} cast", dest_ty),
+            _ => span_bug!(self.cur_span(), "invalid float to {} cast", dest_ty),
         }
     }
 
@@ -351,7 +354,7 @@
 
         match (&src_pointee_ty.kind(), &dest_pointee_ty.kind()) {
             (&ty::Array(_, length), &ty::Slice(_)) => {
-                let ptr = self.read_scalar(src)?;
+                let ptr = self.read_pointer(src)?;
                 // u64 cast is from usize to u64, which is always good
                 let val = Immediate::new_slice(
                     ptr,
@@ -367,6 +370,7 @@
                     return self.write_immediate(*val, dest);
                 }
                 let (old_data, old_vptr) = val.to_scalar_pair();
+                let old_data = old_data.to_pointer(self)?;
                 let old_vptr = old_vptr.to_pointer(self)?;
                 let (ty, old_trait) = self.get_ptr_vtable(old_vptr)?;
                 if old_trait != data_a.principal() {
@@ -378,7 +382,7 @@
             (_, &ty::Dynamic(data, _, ty::Dyn)) => {
                 // Initial cast from sized to dyn trait
                 let vtable = self.get_vtable_ptr(src_pointee_ty, data.principal())?;
-                let ptr = self.read_scalar(src)?;
+                let ptr = self.read_pointer(src)?;
                 let val = Immediate::new_dyn_trait(ptr, vtable, &*self.tcx);
                 self.write_immediate(val, dest)
             }
@@ -389,7 +393,7 @@
 
                 span_bug!(
                     self.cur_span(),
-                    "invalid pointer unsizing {:?} -> {:?}",
+                    "invalid pointer unsizing {} -> {}",
                     src.layout.ty,
                     cast_ty
                 )
@@ -403,28 +407,32 @@
         cast_ty: TyAndLayout<'tcx>,
         dest: &PlaceTy<'tcx, M::Provenance>,
     ) -> InterpResult<'tcx> {
-        trace!("Unsizing {:?} of type {} into {:?}", *src, src.layout.ty, cast_ty.ty);
+        trace!("Unsizing {:?} of type {} into {}", *src, src.layout.ty, cast_ty.ty);
         match (&src.layout.ty.kind(), &cast_ty.ty.kind()) {
             (&ty::Ref(_, s, _), &ty::Ref(_, c, _) | &ty::RawPtr(TypeAndMut { ty: c, .. }))
             | (&ty::RawPtr(TypeAndMut { ty: s, .. }), &ty::RawPtr(TypeAndMut { ty: c, .. })) => {
                 self.unsize_into_ptr(src, dest, *s, *c)
             }
             (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
-                assert_eq!(def_a, def_b);
+                assert_eq!(def_a, def_b); // implies same number of fields
 
-                // unsizing of generic struct with pointer fields
-                // Example: `Arc<T>` -> `Arc<Trait>`
-                // here we need to increase the size of every &T thin ptr field to a fat ptr
+                // Unsizing of generic struct with pointer fields, like `Arc<T>` -> `Arc<Trait>`.
+                // There can be extra fields as long as they don't change their type or are 1-ZST.
+                // There might also be no field that actually needs unsizing.
+                let mut found_cast_field = false;
                 for i in 0..src.layout.fields.count() {
                     let cast_ty_field = cast_ty.field(self, i);
-                    if cast_ty_field.is_zst() {
-                        continue;
-                    }
                     let src_field = self.project_field(src, i)?;
                     let dst_field = self.project_field(dest, i)?;
-                    if src_field.layout.ty == cast_ty_field.ty {
+                    if src_field.layout.is_1zst() && cast_ty_field.is_1zst() {
+                        // Skip 1-ZST fields.
+                    } else if src_field.layout.ty == cast_ty_field.ty {
                         self.copy_op(&src_field, &dst_field, /*allow_transmute*/ false)?;
                     } else {
+                        if found_cast_field {
+                            span_bug!(self.cur_span(), "unsize_into: more than one field to cast");
+                        }
+                        found_cast_field = true;
                         self.unsize_into(&src_field, cast_ty_field, &dst_field)?;
                     }
                 }
diff --git a/compiler/rustc_const_eval/src/interpret/discriminant.rs b/compiler/rustc_const_eval/src/interpret/discriminant.rs
index 6c35fb0..49e0172 100644
--- a/compiler/rustc_const_eval/src/interpret/discriminant.rs
+++ b/compiler/rustc_const_eval/src/interpret/discriminant.rs
@@ -76,7 +76,7 @@
                     let niche_start_val = ImmTy::from_uint(niche_start, tag_layout);
                     let variant_index_relative_val =
                         ImmTy::from_uint(variant_index_relative, tag_layout);
-                    let tag_val = self.binary_op(
+                    let tag_val = self.wrapping_binary_op(
                         mir::BinOp::Add,
                         &variant_index_relative_val,
                         &niche_start_val,
@@ -153,19 +153,18 @@
         // Figure out which discriminant and variant this corresponds to.
         let index = match *tag_encoding {
             TagEncoding::Direct => {
-                let scalar = tag_val.to_scalar();
                 // Generate a specific error if `tag_val` is not an integer.
                 // (`tag_bits` itself is only used for error messages below.)
-                let tag_bits = scalar
+                let tag_bits = tag_val
+                    .to_scalar()
                     .try_to_int()
                     .map_err(|dbg_val| err_ub!(InvalidTag(dbg_val)))?
                     .assert_bits(tag_layout.size);
                 // Cast bits from tag layout to discriminant layout.
                 // After the checks we did above, this cannot fail, as
                 // discriminants are int-like.
-                let discr_val =
-                    self.cast_from_int_like(scalar, tag_val.layout, discr_layout.ty).unwrap();
-                let discr_bits = discr_val.assert_bits(discr_layout.size);
+                let discr_val = self.int_to_int_or_float(&tag_val, discr_layout).unwrap();
+                let discr_bits = discr_val.to_scalar().assert_bits(discr_layout.size);
                 // Convert discriminant to variant index, and catch invalid discriminants.
                 let index = match *ty.kind() {
                     ty::Adt(adt, _) => {
@@ -208,7 +207,7 @@
                         let tag_val = ImmTy::from_uint(tag_bits, tag_layout);
                         let niche_start_val = ImmTy::from_uint(niche_start, tag_layout);
                         let variant_index_relative_val =
-                            self.binary_op(mir::BinOp::Sub, &tag_val, &niche_start_val)?;
+                            self.wrapping_binary_op(mir::BinOp::Sub, &tag_val, &niche_start_val)?;
                         let variant_index_relative =
                             variant_index_relative_val.to_scalar().assert_bits(tag_val.layout.size);
                         // Check if this is in the range that indicates an actual discriminant.
@@ -247,9 +246,9 @@
         &self,
         layout: TyAndLayout<'tcx>,
         variant: VariantIdx,
-    ) -> InterpResult<'tcx, Scalar<M::Provenance>> {
+    ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
         let discr_layout = self.layout_of(layout.ty.discriminant_ty(*self.tcx))?;
-        Ok(match layout.ty.discriminant_for_variant(*self.tcx, variant) {
+        let discr_value = match layout.ty.discriminant_for_variant(*self.tcx, variant) {
             Some(discr) => {
                 // This type actually has discriminants.
                 assert_eq!(discr.ty, discr_layout.ty);
@@ -260,6 +259,7 @@
                 assert_eq!(variant.as_u32(), 0);
                 Scalar::from_uint(variant.as_u32(), discr_layout.size)
             }
-        })
+        };
+        Ok(ImmTy::from_scalar(discr_value, discr_layout))
     }
 }
diff --git a/compiler/rustc_const_eval/src/interpret/eval_context.rs b/compiler/rustc_const_eval/src/interpret/eval_context.rs
index 3ac6f07..af7dfbe 100644
--- a/compiler/rustc_const_eval/src/interpret/eval_context.rs
+++ b/compiler/rustc_const_eval/src/interpret/eval_context.rs
@@ -7,13 +7,13 @@
 use rustc_hir::{self as hir, def_id::DefId, definitions::DefPathData};
 use rustc_index::IndexVec;
 use rustc_middle::mir;
-use rustc_middle::mir::interpret::{ErrorHandled, InterpError, InvalidMetaKind, ReportedErrorInfo};
+use rustc_middle::mir::interpret::{ErrorHandled, InvalidMetaKind, ReportedErrorInfo};
 use rustc_middle::query::TyCtxtAt;
 use rustc_middle::ty::layout::{
     self, FnAbiError, FnAbiOfHelpers, FnAbiRequest, LayoutError, LayoutOf, LayoutOfHelpers,
     TyAndLayout,
 };
-use rustc_middle::ty::{self, GenericArgsRef, ParamEnv, Ty, TyCtxt, TypeFoldable};
+use rustc_middle::ty::{self, GenericArgsRef, ParamEnv, Ty, TyCtxt, TypeFoldable, Variance};
 use rustc_mir_dataflow::storage::always_storage_live_locals;
 use rustc_session::Limit;
 use rustc_span::Span;
@@ -21,12 +21,12 @@
 
 use super::{
     AllocId, GlobalId, Immediate, InterpErrorInfo, InterpResult, MPlaceTy, Machine, MemPlace,
-    MemPlaceMeta, Memory, MemoryKind, Operand, Place, PlaceTy, PointerArithmetic, Provenance,
-    Scalar, StackPopJump,
+    MemPlaceMeta, Memory, MemoryKind, OpTy, Operand, Place, PlaceTy, Pointer, PointerArithmetic,
+    Projectable, Provenance, Scalar, StackPopJump,
 };
-use crate::errors::{self, ErroneousConstUsed};
-use crate::fluent_generated as fluent;
+use crate::errors;
 use crate::util;
+use crate::{fluent_generated as fluent, ReportErrorExt};
 
 pub struct InterpCx<'mir, 'tcx, M: Machine<'mir, 'tcx>> {
     /// Stores the `Machine` instance.
@@ -155,16 +155,26 @@
 }
 
 /// State of a local variable including a memoized layout
-#[derive(Clone, Debug)]
+#[derive(Clone)]
 pub struct LocalState<'tcx, Prov: Provenance = AllocId> {
-    pub value: LocalValue<Prov>,
-    /// Don't modify if `Some`, this is only used to prevent computing the layout twice
-    pub layout: Cell<Option<TyAndLayout<'tcx>>>,
+    value: LocalValue<Prov>,
+    /// Don't modify if `Some`, this is only used to prevent computing the layout twice.
+    /// Avoids computing the layout of locals that are never actually initialized.
+    layout: Cell<Option<TyAndLayout<'tcx>>>,
+}
+
+impl<Prov: Provenance> std::fmt::Debug for LocalState<'_, Prov> {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        f.debug_struct("LocalState")
+            .field("value", &self.value)
+            .field("ty", &self.layout.get().map(|l| l.ty))
+            .finish()
+    }
 }
 
 /// Current value of a local variable
 #[derive(Copy, Clone, Debug)] // Miri debug-prints these
-pub enum LocalValue<Prov: Provenance = AllocId> {
+pub(super) enum LocalValue<Prov: Provenance = AllocId> {
     /// This local is not currently alive, and cannot be used at all.
     Dead,
     /// A normal, live local.
@@ -175,10 +185,27 @@
     Live(Operand<Prov>),
 }
 
-impl<'tcx, Prov: Provenance + 'static> LocalState<'tcx, Prov> {
+impl<'tcx, Prov: Provenance> LocalState<'tcx, Prov> {
+    pub fn make_live_uninit(&mut self) {
+        self.value = LocalValue::Live(Operand::Immediate(Immediate::Uninit));
+    }
+
+    /// This is a hack because Miri needs a way to visit all the provenance in a `LocalState`
+    /// without having a layout or `TyCtxt` available, and we want to keep the `Operand` type
+    /// private.
+    pub fn as_mplace_or_imm(
+        &self,
+    ) -> Option<Either<(Pointer<Option<Prov>>, MemPlaceMeta<Prov>), Immediate<Prov>>> {
+        match self.value {
+            LocalValue::Dead => None,
+            LocalValue::Live(Operand::Indirect(mplace)) => Some(Left((mplace.ptr, mplace.meta))),
+            LocalValue::Live(Operand::Immediate(imm)) => Some(Right(imm)),
+        }
+    }
+
     /// Read the local's value or error if the local is not yet live or not live anymore.
-    #[inline]
-    pub fn access(&self) -> InterpResult<'tcx, &Operand<Prov>> {
+    #[inline(always)]
+    pub(super) fn access(&self) -> InterpResult<'tcx, &Operand<Prov>> {
         match &self.value {
             LocalValue::Dead => throw_ub!(DeadLocal), // could even be "invalid program"?
             LocalValue::Live(val) => Ok(val),
@@ -188,10 +215,10 @@
     /// Overwrite the local. If the local can be overwritten in place, return a reference
     /// to do so; otherwise return the `MemPlace` to consult instead.
     ///
-    /// Note: This may only be invoked from the `Machine::access_local_mut` hook and not from
-    /// anywhere else. You may be invalidating machine invariants if you do!
-    #[inline]
-    pub fn access_mut(&mut self) -> InterpResult<'tcx, &mut Operand<Prov>> {
+    /// Note: Before calling this, call the `before_access_local_mut` machine hook! You may be
+    /// invalidating machine invariants otherwise!
+    #[inline(always)]
+    pub(super) fn access_mut(&mut self) -> InterpResult<'tcx, &mut Operand<Prov>> {
         match &mut self.value {
             LocalValue::Dead => throw_ub!(DeadLocal), // could even be "invalid program"?
             LocalValue::Live(val) => Ok(val),
@@ -357,7 +384,7 @@
     // all normal lifetimes are erased, higher-ranked types with their
     // late-bound lifetimes are still around and can lead to type
     // differences.
-    if util::is_subtype(tcx, param_env, src.ty, dest.ty) {
+    if util::relate_types(tcx, param_env, Variance::Covariant, src.ty, dest.ty) {
         // Make sure the layout is equal, too -- just to be safe. Miri really
         // needs layout equality. For performance reason we skip this check when
         // the types are equal. Equal types *can* have different layouts when
@@ -389,7 +416,7 @@
                 if !mir_assign_valid_types(tcx.tcx, param_env, check_layout, known_layout) {
                     span_bug!(
                         tcx.span,
-                        "expected type differs from actual type.\nexpected: {:?}\nactual: {:?}",
+                        "expected type differs from actual type.\nexpected: {}\nactual: {}",
                         known_layout.ty,
                         check_layout.ty,
                     );
@@ -432,6 +459,27 @@
             .map_or(CRATE_HIR_ID, |def_id| self.tcx.hir().local_def_id_to_hir_id(def_id))
     }
 
+    /// Turn the given error into a human-readable string. Expects the string to be printed, so if
+    /// `RUSTC_CTFE_BACKTRACE` is set this will show a backtrace of the rustc internals that
+    /// triggered the error.
+    ///
+    /// This is NOT the preferred way to render an error; use `report` from `const_eval` instead.
+    /// However, this is useful when error messages appear in ICEs.
+    pub fn format_error(&self, e: InterpErrorInfo<'tcx>) -> String {
+        let (e, backtrace) = e.into_parts();
+        backtrace.print_backtrace();
+        // FIXME(fee1-dead), HACK: we want to use the error as title therefore we can just extract the
+        // label and arguments from the InterpError.
+        let handler = &self.tcx.sess.parse_sess.span_diagnostic;
+        #[allow(rustc::untranslatable_diagnostic)]
+        let mut diag = self.tcx.sess.struct_allow("");
+        let msg = e.diagnostic_message();
+        e.add_args(handler, &mut diag);
+        let s = handler.eagerly_translate_to_string(msg, diag.args());
+        diag.cancel();
+        s
+    }
+
     #[inline(always)]
     pub(crate) fn stack(&self) -> &[Frame<'mir, 'tcx, M::Provenance, M::FrameExtra>] {
         M::stack(self)
@@ -462,7 +510,7 @@
     }
 
     #[inline(always)]
-    pub(super) fn body(&self) -> &'mir mir::Body<'tcx> {
+    pub fn body(&self) -> &'mir mir::Body<'tcx> {
         self.frame().body
     }
 
@@ -508,7 +556,7 @@
     >(
         &self,
         value: T,
-    ) -> Result<T, InterpError<'tcx>> {
+    ) -> Result<T, ErrorHandled> {
         self.subst_from_frame_and_normalize_erasing_regions(self.frame(), value)
     }
 
@@ -518,15 +566,15 @@
         &self,
         frame: &Frame<'mir, 'tcx, M::Provenance, M::FrameExtra>,
         value: T,
-    ) -> Result<T, InterpError<'tcx>> {
+    ) -> Result<T, ErrorHandled> {
         frame
             .instance
-            .try_subst_mir_and_normalize_erasing_regions(
+            .try_instantiate_mir_and_normalize_erasing_regions(
                 *self.tcx,
                 self.param_env,
                 ty::EarlyBinder::bind(value),
             )
-            .map_err(|_| err_inval!(TooGeneric))
+            .map_err(|_| ErrorHandled::TooGeneric(self.cur_span()))
     }
 
     /// The `args` are assumed to already be in our interpreter "universe" (param_env).
@@ -664,7 +712,7 @@
 
             ty::Foreign(_) => Ok(None),
 
-            _ => span_bug!(self.cur_span(), "size_and_align_of::<{:?}> not supported", layout.ty),
+            _ => span_bug!(self.cur_span(), "size_and_align_of::<{}> not supported", layout.ty),
         }
     }
     #[inline]
@@ -672,7 +720,7 @@
         &self,
         mplace: &MPlaceTy<'tcx, M::Provenance>,
     ) -> InterpResult<'tcx, Option<(Size, Align)>> {
-        self.size_and_align_of(&mplace.meta, &mplace.layout)
+        self.size_and_align_of(&mplace.meta(), &mplace.layout)
     }
 
     #[instrument(skip(self, body, return_place, return_to_block), level = "debug")]
@@ -684,15 +732,15 @@
         return_to_block: StackPopCleanup,
     ) -> InterpResult<'tcx> {
         trace!("body: {:#?}", body);
+        let dead_local = LocalState { value: LocalValue::Dead, layout: Cell::new(None) };
+        let locals = IndexVec::from_elem(dead_local, &body.local_decls);
         // First push a stack frame so we have access to the local args
         let pre_frame = Frame {
             body,
             loc: Right(body.span), // Span used for errors caused during preamble.
             return_to_block,
             return_place: return_place.clone(),
-            // empty local array, we fill it in below, after we are inside the stack frame and
-            // all methods actually know about the frame
-            locals: IndexVec::new(),
+            locals,
             instance,
             tracing_span: SpanGuard::new(),
             extra: (),
@@ -701,25 +749,16 @@
         self.stack_mut().push(frame);
 
         // Make sure all the constants required by this frame evaluate successfully (post-monomorphization check).
-        for ct in &body.required_consts {
-            let span = ct.span;
-            let ct = self.subst_from_current_frame_and_normalize_erasing_regions(ct.literal)?;
-            self.eval_mir_constant(&ct, Some(span), None)?;
+        if M::POST_MONO_CHECKS {
+            // `ctfe_query` does some error message decoration that we want to be in effect here.
+            self.ctfe_query(None, |tcx| {
+                body.post_mono_checks(*tcx, self.param_env, |c| {
+                    self.subst_from_current_frame_and_normalize_erasing_regions(c)
+                })
+            })?;
         }
 
-        // Most locals are initially dead.
-        let dummy = LocalState { value: LocalValue::Dead, layout: Cell::new(None) };
-        let mut locals = IndexVec::from_elem(dummy, &body.local_decls);
-
-        // Now mark those locals as live that have no `Storage*` annotations.
-        let always_live = always_storage_live_locals(self.body());
-        for local in locals.indices() {
-            if always_live.contains(local) {
-                locals[local].value = LocalValue::Live(Operand::Immediate(Immediate::Uninit));
-            }
-        }
         // done
-        self.frame_mut().locals = locals;
         M::after_stack_push(self)?;
         self.frame_mut().loc = Left(mir::Location::START);
 
@@ -756,6 +795,7 @@
     ///
     /// If `target` is `UnwindAction::Unreachable`, that indicates the function does not allow
     /// unwinding, and doing so is UB.
+    #[cold] // usually we have normal returns, not unwinding
     pub fn unwind_to_block(&mut self, target: mir::UnwindAction) -> InterpResult<'tcx> {
         self.frame_mut().loc = match target {
             mir::UnwindAction::Cleanup(block) => Left(mir::Location { block, statement_index: 0 }),
@@ -763,9 +803,12 @@
             mir::UnwindAction::Unreachable => {
                 throw_ub_custom!(fluent::const_eval_unreachable_unwind);
             }
-            mir::UnwindAction::Terminate => {
+            mir::UnwindAction::Terminate(reason) => {
                 self.frame_mut().loc = Right(self.frame_mut().body.span);
-                M::abort(self, "panic in a function that cannot unwind".to_owned())?;
+                M::unwind_terminate(self, reason)?;
+                // This might have pushed a new stack frame, or it terminated execution.
+                // Either way, `loc` will not be updated.
+                return Ok(());
             }
         };
         Ok(())
@@ -812,7 +855,7 @@
                 .expect("return place should always be live");
             let dest = self.frame().return_place.clone();
             let err = self.copy_op(&op, &dest, /*allow_transmute*/ true);
-            trace!("return value: {:?}", self.dump_place(*dest));
+            trace!("return value: {:?}", self.dump_place(&dest));
             // We delay actually short-circuiting on this error until *after* the stack frame is
             // popped, since we want this error to be attributed to the caller, whose type defines
             // this transmute.
@@ -865,6 +908,7 @@
                     panic!("encountered StackPopCleanup::Root when unwinding!")
                 }
             };
+            // This must be the very last thing that happens, since it can in fact push a new stack frame.
             self.unwind_to_block(unwind)
         } else {
             // Follow the normal return edge.
@@ -881,12 +925,95 @@
         }
     }
 
-    /// Mark a storage as live, killing the previous content.
-    pub fn storage_live(&mut self, local: mir::Local) -> InterpResult<'tcx> {
-        assert!(local != mir::RETURN_PLACE, "Cannot make return place live");
+    /// In the current stack frame, mark all locals as live that are not arguments and don't have
+    /// `Storage*` annotations (this includes the return place).
+    pub fn storage_live_for_always_live_locals(&mut self) -> InterpResult<'tcx> {
+        self.storage_live(mir::RETURN_PLACE)?;
+
+        let body = self.body();
+        let always_live = always_storage_live_locals(body);
+        for local in body.vars_and_temps_iter() {
+            if always_live.contains(local) {
+                self.storage_live(local)?;
+            }
+        }
+        Ok(())
+    }
+
+    pub fn storage_live_dyn(
+        &mut self,
+        local: mir::Local,
+        meta: MemPlaceMeta<M::Provenance>,
+    ) -> InterpResult<'tcx> {
         trace!("{:?} is now live", local);
 
-        let local_val = LocalValue::Live(Operand::Immediate(Immediate::Uninit));
+        // We avoid `ty.is_trivially_sized` since that (a) cannot assume WF, so it recurses through
+        // all fields of a tuple, and (b) does something expensive for ADTs.
+        fn is_very_trivially_sized(ty: Ty<'_>) -> bool {
+            match ty.kind() {
+                ty::Infer(ty::IntVar(_) | ty::FloatVar(_))
+                | ty::Uint(_)
+                | ty::Int(_)
+                | ty::Bool
+                | ty::Float(_)
+                | ty::FnDef(..)
+                | ty::FnPtr(_)
+                | ty::RawPtr(..)
+                | ty::Char
+                | ty::Ref(..)
+                | ty::Generator(..)
+                | ty::GeneratorWitness(..)
+                | ty::Array(..)
+                | ty::Closure(..)
+                | ty::Never
+                | ty::Error(_) => true,
+
+                ty::Str | ty::Slice(_) | ty::Dynamic(..) | ty::Foreign(..) => false,
+
+                ty::Tuple(tys) => tys.last().iter().all(|ty| is_very_trivially_sized(**ty)),
+
+                // We don't want to do any queries, so there is not much we can do with ADTs.
+                ty::Adt(..) => false,
+
+                ty::Alias(..) | ty::Param(_) | ty::Placeholder(..) => false,
+
+                ty::Infer(ty::TyVar(_)) => false,
+
+                ty::Bound(..)
+                | ty::Infer(ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
+                    bug!("`is_very_trivially_sized` applied to unexpected type: {}", ty)
+                }
+            }
+        }
+
+        // This is a hot function, we avoid computing the layout when possible.
+        // `unsized_` will be `None` for sized types and `Some(layout)` for unsized types.
+        let unsized_ = if is_very_trivially_sized(self.body().local_decls[local].ty) {
+            None
+        } else {
+            // We need the layout.
+            let layout = self.layout_of_local(self.frame(), local, None)?;
+            if layout.is_sized() { None } else { Some(layout) }
+        };
+
+        let local_val = LocalValue::Live(if let Some(layout) = unsized_ {
+            if !meta.has_meta() {
+                throw_unsup!(UnsizedLocal);
+            }
+            // Need to allocate some memory, since `Immediate::Uninit` cannot be unsized.
+            let dest_place = self.allocate_dyn(layout, MemoryKind::Stack, meta)?;
+            Operand::Indirect(*dest_place.mplace())
+        } else {
+            assert!(!meta.has_meta()); // we're dropping the metadata
+            // Just make this an efficient immediate.
+            // Note that not calling `layout_of` here does have one real consequence:
+            // if the type is too big, we'll only notice this when the local is actually initialized,
+            // which is a bit too late -- we should ideally notice this alreayd here, when the memory
+            // is conceptually allocated. But given how rare that error is and that this is a hot function,
+            // we accept this downside for now.
+            Operand::Immediate(Immediate::Uninit)
+        });
+
         // StorageLive expects the local to be dead, and marks it live.
         let old = mem::replace(&mut self.frame_mut().locals[local].value, local_val);
         if !matches!(old, LocalValue::Dead) {
@@ -895,6 +1022,12 @@
         Ok(())
     }
 
+    /// Mark a storage as live, killing the previous content.
+    #[inline(always)]
+    pub fn storage_live(&mut self, local: mir::Local) -> InterpResult<'tcx> {
+        self.storage_live_dyn(local, MemPlaceMeta::None)
+    }
+
     pub fn storage_dead(&mut self, local: mir::Local) -> InterpResult<'tcx> {
         assert!(local != mir::RETURN_PLACE, "Cannot make return place dead");
         trace!("{:?} is now dead", local);
@@ -926,28 +1059,19 @@
         &self,
         span: Option<Span>,
         query: impl FnOnce(TyCtxtAt<'tcx>) -> Result<T, ErrorHandled>,
-    ) -> InterpResult<'tcx, T> {
+    ) -> Result<T, ErrorHandled> {
         // Use a precise span for better cycle errors.
         query(self.tcx.at(span.unwrap_or_else(|| self.cur_span()))).map_err(|err| {
-            match err {
-                ErrorHandled::Reported(err) => {
-                    if !err.is_tainted_by_errors() && let Some(span) = span {
-                        // To make it easier to figure out where this error comes from, also add a note at the current location.
-                        self.tcx.sess.emit_note(ErroneousConstUsed { span });
-                    }
-                    err_inval!(AlreadyReported(err))
-                }
-                ErrorHandled::TooGeneric => err_inval!(TooGeneric),
-            }
-            .into()
+            err.emit_note(*self.tcx);
+            err
         })
     }
 
     pub fn eval_global(
         &self,
-        gid: GlobalId<'tcx>,
-        span: Option<Span>,
+        instance: ty::Instance<'tcx>,
     ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
+        let gid = GlobalId { instance, promoted: None };
         // For statics we pick `ParamEnv::reveal_all`, because statics don't have generics
         // and thus don't care about the parameter environment. While we could just use
         // `self.param_env`, that would mean we invoke the query to evaluate the static
@@ -958,13 +1082,26 @@
         } else {
             self.param_env
         };
-        let val = self.ctfe_query(span, |tcx| tcx.eval_to_allocation_raw(param_env.and(gid)))?;
+        let val = self.ctfe_query(None, |tcx| tcx.eval_to_allocation_raw(param_env.and(gid)))?;
         self.raw_const_to_mplace(val)
     }
 
+    pub fn eval_mir_constant(
+        &self,
+        val: &mir::Const<'tcx>,
+        span: Option<Span>,
+        layout: Option<TyAndLayout<'tcx>>,
+    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+        let const_val = self.ctfe_query(span, |tcx| val.eval(*tcx, self.param_env, span))?;
+        self.const_val_to_op(const_val, val.ty(), layout)
+    }
+
     #[must_use]
-    pub fn dump_place(&self, place: Place<M::Provenance>) -> PlacePrinter<'_, 'mir, 'tcx, M> {
-        PlacePrinter { ecx: self, place }
+    pub fn dump_place(
+        &self,
+        place: &PlaceTy<'tcx, M::Provenance>,
+    ) -> PlacePrinter<'_, 'mir, 'tcx, M> {
+        PlacePrinter { ecx: self, place: *place.place() }
     }
 
     #[must_use]
diff --git a/compiler/rustc_const_eval/src/interpret/intern.rs b/compiler/rustc_const_eval/src/interpret/intern.rs
index 910c3ca..8c0009c 100644
--- a/compiler/rustc_const_eval/src/interpret/intern.rs
+++ b/compiler/rustc_const_eval/src/interpret/intern.rs
@@ -24,7 +24,7 @@
 use rustc_ast::Mutability;
 
 use super::{
-    AllocId, Allocation, ConstAllocation, InterpCx, MPlaceTy, Machine, MemoryKind, PlaceTy,
+    AllocId, Allocation, InterpCx, MPlaceTy, Machine, MemoryKind, PlaceTy, Projectable,
     ValueVisitor,
 };
 use crate::const_eval;
@@ -177,7 +177,7 @@
             if let ty::Dynamic(_, _, ty::Dyn) =
                 tcx.struct_tail_erasing_lifetimes(referenced_ty, self.ecx.param_env).kind()
             {
-                let ptr = mplace.meta.unwrap_meta().to_pointer(&tcx)?;
+                let ptr = mplace.meta().unwrap_meta().to_pointer(&tcx)?;
                 if let Some(alloc_id) = ptr.provenance {
                     // Explicitly choose const mode here, since vtables are immutable, even
                     // if the reference of the fat pointer is mutable.
@@ -191,7 +191,7 @@
             }
             // Check if we have encountered this pointer+layout combination before.
             // Only recurse for allocation-backed pointers.
-            if let Some(alloc_id) = mplace.ptr.provenance {
+            if let Some(alloc_id) = mplace.ptr().provenance {
                 // Compute the mode with which we intern this. Our goal here is to make as many
                 // statics as we can immutable so they can be placed in read-only memory by LLVM.
                 let ref_mode = match self.mode {
@@ -267,7 +267,7 @@
 
                     // If there is no provenance in this allocation, it does not contain references
                     // that point to another allocation, and we can avoid the interning walk.
-                    if let Some(alloc) = self.ecx.get_ptr_alloc(mplace.ptr, size, align)? {
+                    if let Some(alloc) = self.ecx.get_ptr_alloc(mplace.ptr(), size, align)? {
                         if !alloc.has_provenance() {
                             return Ok(false);
                         }
@@ -353,7 +353,7 @@
         leftover_allocations,
         // The outermost allocation must exist, because we allocated it with
         // `Memory::allocate`.
-        ret.ptr.provenance.unwrap(),
+        ret.ptr().provenance.unwrap(),
         base_intern_mode,
         Some(ret.layout.ty),
     );
@@ -378,7 +378,8 @@
                 ecx.tcx.sess.delay_span_bug(
                     ecx.tcx.span,
                     format!(
-                        "error during interning should later cause validation failure: {error:?}"
+                        "error during interning should later cause validation failure: {}",
+                        ecx.format_error(error),
                     ),
                 );
             }
@@ -454,7 +455,7 @@
 {
     /// A helper function that allocates memory for the layout given and gives you access to mutate
     /// it. Once your own mutation code is done, the backing `Allocation` is removed from the
-    /// current `Memory` and returned.
+    /// current `Memory` and interned as read-only into the global memory.
     pub fn intern_with_temp_alloc(
         &mut self,
         layout: TyAndLayout<'tcx>,
@@ -462,11 +463,15 @@
             &mut InterpCx<'mir, 'tcx, M>,
             &PlaceTy<'tcx, M::Provenance>,
         ) -> InterpResult<'tcx, ()>,
-    ) -> InterpResult<'tcx, ConstAllocation<'tcx>> {
+    ) -> InterpResult<'tcx, AllocId> {
+        // `allocate` picks a fresh AllocId that we will associate with its data below.
         let dest = self.allocate(layout, MemoryKind::Stack)?;
         f(self, &dest.clone().into())?;
-        let mut alloc = self.memory.alloc_map.remove(&dest.ptr.provenance.unwrap()).unwrap().1;
+        let mut alloc = self.memory.alloc_map.remove(&dest.ptr().provenance.unwrap()).unwrap().1;
         alloc.mutability = Mutability::Not;
-        Ok(self.tcx.mk_const_alloc(alloc))
+        let alloc = self.tcx.mk_const_alloc(alloc);
+        let alloc_id = dest.ptr().provenance.unwrap(); // this was just allocated, it must have provenance
+        self.tcx.set_alloc_id_memory(alloc_id, alloc);
+        Ok(alloc_id)
     }
 }
diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics.rs b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
index f22cd91..2c0ba9b 100644
--- a/compiler/rustc_const_eval/src/interpret/intrinsics.rs
+++ b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
@@ -5,10 +5,8 @@
 use rustc_hir::def_id::DefId;
 use rustc_middle::mir::{
     self,
-    interpret::{
-        Allocation, ConstAllocation, ConstValue, GlobalId, InterpResult, PointerArithmetic, Scalar,
-    },
-    BinOp, NonDivergingIntrinsic,
+    interpret::{Allocation, ConstAllocation, GlobalId, InterpResult, PointerArithmetic, Scalar},
+    BinOp, ConstValue, NonDivergingIntrinsic,
 };
 use rustc_middle::ty;
 use rustc_middle::ty::layout::{LayoutOf as _, ValidityRequirement};
@@ -64,7 +62,7 @@
         sym::type_name => {
             ensure_monomorphic_enough(tcx, tp_ty)?;
             let alloc = alloc_type_name(tcx, tp_ty);
-            ConstValue::Slice { data: alloc, start: 0, end: alloc.inner().len() }
+            ConstValue::Slice { data: alloc, meta: alloc.inner().size().bytes() }
         }
         sym::needs_drop => {
             ensure_monomorphic_enough(tcx, tp_ty)?;
@@ -102,8 +100,7 @@
             | ty::Dynamic(_, _, _)
             | ty::Closure(_, _)
             | ty::Generator(_, _, _)
-            | ty::GeneratorWitness(_)
-            | ty::GeneratorWitnessMIR(_, _)
+            | ty::GeneratorWitness(..)
             | ty::Never
             | ty::Tuple(_)
             | ty::Error(_) => ConstValue::from_target_usize(0u64, &tcx),
@@ -125,15 +122,9 @@
     ) -> InterpResult<'tcx, bool> {
         let instance_args = instance.args;
         let intrinsic_name = self.tcx.item_name(instance.def_id());
-
-        // First handle intrinsics without return place.
-        let ret = match ret {
-            None => match intrinsic_name {
-                sym::abort => M::abort(self, "the program aborted execution".to_owned())?,
-                // Unsupported diverging intrinsic.
-                _ => return Ok(false),
-            },
-            Some(p) => p,
+        let Some(ret) = ret else {
+            // We don't support any intrinsic without return place.
+            return Ok(false);
         };
 
         match intrinsic_name {
@@ -228,7 +219,7 @@
                 let place = self.deref_pointer(&args[0])?;
                 let variant = self.read_discriminant(&place)?;
                 let discr = self.discriminant_for_variant(place.layout, variant)?;
-                self.write_scalar(discr, dest)?;
+                self.write_immediate(*discr, dest)?;
             }
             sym::exact_div => {
                 let l = self.read_immediate(&args[0])?;
@@ -315,7 +306,7 @@
                 let dist = {
                     // Addresses are unsigned, so this is a `usize` computation. We have to do the
                     // overflow check separately anyway.
-                    let (val, overflowed, _ty) = {
+                    let (val, overflowed) = {
                         let a_offset = ImmTy::from_uint(a_offset, usize_layout);
                         let b_offset = ImmTy::from_uint(b_offset, usize_layout);
                         self.overflowing_binary_op(BinOp::Sub, &a_offset, &b_offset)?
@@ -332,7 +323,7 @@
                         // The signed form of the intrinsic allows this. If we interpret the
                         // difference as isize, we'll get the proper signed difference. If that
                         // seems *positive*, they were more than isize::MAX apart.
-                        let dist = val.to_target_isize(self)?;
+                        let dist = val.to_scalar().to_target_isize(self)?;
                         if dist >= 0 {
                             throw_ub_custom!(
                                 fluent::const_eval_offset_from_underflow,
@@ -342,7 +333,7 @@
                         dist
                     } else {
                         // b >= a
-                        let dist = val.to_target_isize(self)?;
+                        let dist = val.to_scalar().to_target_isize(self)?;
                         // If converting to isize produced a *negative* result, we had an overflow
                         // because they were more than isize::MAX apart.
                         if dist < 0 {
@@ -410,7 +401,9 @@
                         ValidityRequirement::Uninit => bug!("assert_uninit_valid doesn't exist"),
                     };
 
-                    M::abort(self, msg)?;
+                    M::panic_nounwind(self, &msg)?;
+                    // Skip the `go_to_block` at the end.
+                    return Ok(true);
                 }
             }
             sym::simd_insert => {
@@ -470,7 +463,7 @@
             _ => return Ok(false),
         }
 
-        trace!("{:?}", self.dump_place(**dest));
+        trace!("{:?}", self.dump_place(dest));
         self.go_to_block(ret);
         Ok(true)
     }
@@ -510,9 +503,9 @@
         // Performs an exact division, resulting in undefined behavior where
         // `x % y != 0` or `y == 0` or `x == T::MIN && y == -1`.
         // First, check x % y != 0 (or if that computation overflows).
-        let (res, overflow, _ty) = self.overflowing_binary_op(BinOp::Rem, &a, &b)?;
+        let (res, overflow) = self.overflowing_binary_op(BinOp::Rem, &a, &b)?;
         assert!(!overflow); // All overflow is UB, so this should never return on overflow.
-        if res.assert_bits(a.layout.size) != 0 {
+        if res.to_scalar().assert_bits(a.layout.size) != 0 {
             throw_ub_custom!(
                 fluent::const_eval_exact_div_has_remainder,
                 a = format!("{a}"),
@@ -530,7 +523,7 @@
         r: &ImmTy<'tcx, M::Provenance>,
     ) -> InterpResult<'tcx, Scalar<M::Provenance>> {
         assert!(matches!(mir_op, BinOp::Add | BinOp::Sub));
-        let (val, overflowed, _ty) = self.overflowing_binary_op(mir_op, l, r)?;
+        let (val, overflowed) = self.overflowing_binary_op(mir_op, l, r)?;
         Ok(if overflowed {
             let size = l.layout.size;
             let num_bits = size.bits();
@@ -562,7 +555,7 @@
                 }
             }
         } else {
-            val
+            val.to_scalar()
         })
     }
 
diff --git a/compiler/rustc_const_eval/src/interpret/machine.rs b/compiler/rustc_const_eval/src/interpret/machine.rs
index e101785..aaa674a 100644
--- a/compiler/rustc_const_eval/src/interpret/machine.rs
+++ b/compiler/rustc_const_eval/src/interpret/machine.rs
@@ -9,7 +9,7 @@
 use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
 use rustc_middle::mir;
 use rustc_middle::ty::layout::TyAndLayout;
-use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_middle::ty::{self, TyCtxt};
 use rustc_span::def_id::DefId;
 use rustc_target::abi::{Align, Size};
 use rustc_target::spec::abi::Abi as CallAbi;
@@ -18,7 +18,7 @@
 
 use super::{
     AllocBytes, AllocId, AllocRange, Allocation, ConstAllocation, FnArg, Frame, ImmTy, InterpCx,
-    InterpResult, MemoryKind, OpTy, Operand, PlaceTy, Pointer, Provenance, Scalar,
+    InterpResult, MPlaceTy, MemoryKind, OpTy, PlaceTy, Pointer, Provenance,
 };
 
 /// Data returned by Machine::stack_pop,
@@ -130,6 +130,9 @@
     /// Should the machine panic on allocation failures?
     const PANIC_ON_ALLOC_FAIL: bool;
 
+    /// Should post-monomorphization checks be run when a stack frame is pushed?
+    const POST_MONO_CHECKS: bool = true;
+
     /// Whether memory accesses should be alignment-checked.
     fn enforce_alignment(ecx: &InterpCx<'mir, 'tcx, Self>) -> CheckAlignment;
 
@@ -218,10 +221,14 @@
         unwind: mir::UnwindAction,
     ) -> InterpResult<'tcx>;
 
-    /// Called to abort evaluation.
-    fn abort(_ecx: &mut InterpCx<'mir, 'tcx, Self>, _msg: String) -> InterpResult<'tcx, !> {
-        throw_unsup_format!("aborting execution is not supported")
-    }
+    /// Called to trigger a non-unwinding panic.
+    fn panic_nounwind(_ecx: &mut InterpCx<'mir, 'tcx, Self>, msg: &str) -> InterpResult<'tcx>;
+
+    /// Called when unwinding reached a state where execution should be terminated.
+    fn unwind_terminate(
+        ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        reason: mir::UnwindTerminateReason,
+    ) -> InterpResult<'tcx>;
 
     /// Called for all binary operations where the LHS has pointer type.
     ///
@@ -231,24 +238,24 @@
         bin_op: mir::BinOp,
         left: &ImmTy<'tcx, Self::Provenance>,
         right: &ImmTy<'tcx, Self::Provenance>,
-    ) -> InterpResult<'tcx, (Scalar<Self::Provenance>, bool, Ty<'tcx>)>;
+    ) -> InterpResult<'tcx, (ImmTy<'tcx, Self::Provenance>, bool)>;
 
-    /// Called to write the specified `local` from the `frame`.
+    /// Called before writing the specified `local` of the `frame`.
     /// Since writing a ZST is not actually accessing memory or locals, this is never invoked
     /// for ZST reads.
     ///
     /// Due to borrow checker trouble, we indicate the `frame` as an index rather than an `&mut
     /// Frame`.
-    #[inline]
-    fn access_local_mut<'a>(
-        ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
-        frame: usize,
-        local: mir::Local,
-    ) -> InterpResult<'tcx, &'a mut Operand<Self::Provenance>>
+    #[inline(always)]
+    fn before_access_local_mut<'a>(
+        _ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
+        _frame: usize,
+        _local: mir::Local,
+    ) -> InterpResult<'tcx>
     where
         'tcx: 'mir,
     {
-        ecx.stack_mut()[frame].locals[local].access_mut()
+        Ok(())
     }
 
     /// Called before a basic block terminator is executed.
@@ -461,6 +468,7 @@
 
     /// Called immediately after a stack frame got popped, but before jumping back to the caller.
     /// The `locals` have already been destroyed!
+    #[inline(always)]
     fn after_stack_pop(
         _ecx: &mut InterpCx<'mir, 'tcx, Self>,
         _frame: Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>,
@@ -470,6 +478,18 @@
         assert!(!unwinding);
         Ok(StackPopJump::Normal)
     }
+
+    /// Called immediately after actual memory was allocated for a local
+    /// but before the local's stack frame is updated to point to that memory.
+    #[inline(always)]
+    fn after_local_allocated(
+        _ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        _frame: usize,
+        _local: mir::Local,
+        _mplace: &MPlaceTy<'tcx, Self::Provenance>,
+    ) -> InterpResult<'tcx> {
+        Ok(())
+    }
 }
 
 /// A lot of the flexibility above is just needed for `Miri`, but all "compile-time" machines
@@ -500,6 +520,14 @@
     }
 
     #[inline(always)]
+    fn unwind_terminate(
+        _ecx: &mut InterpCx<$mir, $tcx, Self>,
+        _reason: mir::UnwindTerminateReason,
+    ) -> InterpResult<$tcx> {
+        unreachable!("unwinding cannot happen during compile-time evaluation")
+    }
+
+    #[inline(always)]
     fn call_extra_fn(
         _ecx: &mut InterpCx<$mir, $tcx, Self>,
         fn_val: !,
@@ -527,7 +555,7 @@
         def_id: DefId,
     ) -> InterpResult<$tcx, Pointer> {
         // Use the `AllocId` associated with the `DefId`. Any actual *access* will fail.
-        Ok(Pointer::new(ecx.tcx.create_static_alloc(def_id), Size::ZERO))
+        Ok(Pointer::new(ecx.tcx.reserve_and_set_static_alloc(def_id), Size::ZERO))
     }
 
     #[inline(always)]
diff --git a/compiler/rustc_const_eval/src/interpret/memory.rs b/compiler/rustc_const_eval/src/interpret/memory.rs
index 11bffed..436c4d5 100644
--- a/compiler/rustc_const_eval/src/interpret/memory.rs
+++ b/compiler/rustc_const_eval/src/interpret/memory.rs
@@ -176,12 +176,9 @@
         M::adjust_alloc_base_pointer(self, ptr)
     }
 
-    pub fn create_fn_alloc_ptr(
-        &mut self,
-        fn_val: FnVal<'tcx, M::ExtraFnVal>,
-    ) -> Pointer<M::Provenance> {
+    pub fn fn_ptr(&mut self, fn_val: FnVal<'tcx, M::ExtraFnVal>) -> Pointer<M::Provenance> {
         let id = match fn_val {
-            FnVal::Instance(instance) => self.tcx.create_fn_alloc(instance),
+            FnVal::Instance(instance) => self.tcx.reserve_and_set_fn_alloc(instance),
             FnVal::Other(extra) => {
                 // FIXME(RalfJung): Should we have a cache here?
                 let id = self.tcx.reserve_alloc_id();
diff --git a/compiler/rustc_const_eval/src/interpret/mod.rs b/compiler/rustc_const_eval/src/interpret/mod.rs
index b0b553c..69eb220 100644
--- a/compiler/rustc_const_eval/src/interpret/mod.rs
+++ b/compiler/rustc_const_eval/src/interpret/mod.rs
@@ -20,16 +20,21 @@
 
 pub use rustc_middle::mir::interpret::*; // have all the `interpret` symbols in one place: here
 
-pub use self::eval_context::{Frame, FrameInfo, InterpCx, LocalState, LocalValue, StackPopCleanup};
+pub use self::eval_context::{Frame, FrameInfo, InterpCx, StackPopCleanup};
 pub use self::intern::{intern_const_alloc_recursive, InternKind};
 pub use self::machine::{compile_time_machine, AllocMap, Machine, MayLeak, StackPopJump};
 pub use self::memory::{AllocKind, AllocRef, AllocRefMut, FnVal, Memory, MemoryKind};
-pub use self::operand::{ImmTy, Immediate, OpTy, Operand, Readable};
-pub use self::place::{MPlaceTy, MemPlace, MemPlaceMeta, Place, PlaceTy, Writeable};
+pub use self::operand::{ImmTy, Immediate, OpTy, Readable};
+pub use self::place::{MPlaceTy, MemPlaceMeta, PlaceTy, Writeable};
 pub use self::projection::Projectable;
 pub use self::terminator::FnArg;
 pub use self::validity::{CtfeValidationMode, RefTracking};
 pub use self::visitor::ValueVisitor;
 
+use self::{
+    operand::Operand,
+    place::{MemPlace, Place},
+};
+
 pub(crate) use self::intrinsics::eval_nullary_intrinsic;
 use eval_context::{from_known_layout, mir_assign_valid_types};
diff --git a/compiler/rustc_const_eval/src/interpret/operand.rs b/compiler/rustc_const_eval/src/interpret/operand.rs
index 6e57a56..a32ea20 100644
--- a/compiler/rustc_const_eval/src/interpret/operand.rs
+++ b/compiler/rustc_const_eval/src/interpret/operand.rs
@@ -8,15 +8,13 @@
 use rustc_hir::def::Namespace;
 use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
 use rustc_middle::ty::print::{FmtPrinter, PrettyPrinter};
-use rustc_middle::ty::{ConstInt, Ty, ValTree};
+use rustc_middle::ty::{ConstInt, Ty, TyCtxt};
 use rustc_middle::{mir, ty};
-use rustc_span::Span;
 use rustc_target::abi::{self, Abi, Align, HasDataLayout, Size};
 
 use super::{
-    alloc_range, from_known_layout, mir_assign_valid_types, AllocId, ConstValue, Frame, GlobalId,
-    InterpCx, InterpResult, MPlaceTy, Machine, MemPlace, MemPlaceMeta, PlaceTy, Pointer,
-    Projectable, Provenance, Scalar,
+    alloc_range, from_known_layout, mir_assign_valid_types, AllocId, Frame, InterpCx, InterpResult,
+    MPlaceTy, Machine, MemPlace, MemPlaceMeta, PlaceTy, Pointer, Projectable, Provenance, Scalar,
 };
 
 /// An `Immediate` represents a single immediate self-contained Rust value.
@@ -33,7 +31,7 @@
     /// A pair of two scalar value (must have `ScalarPair` ABI where both fields are
     /// `Scalar::Initialized`).
     ScalarPair(Scalar<Prov>, Scalar<Prov>),
-    /// A value of fully uninitialized memory. Can have arbitrary size and layout.
+    /// A value of fully uninitialized memory. Can have arbitrary size and layout, but must be sized.
     Uninit,
 }
 
@@ -45,24 +43,30 @@
 }
 
 impl<Prov: Provenance> Immediate<Prov> {
-    pub fn from_pointer(p: Pointer<Prov>, cx: &impl HasDataLayout) -> Self {
-        Immediate::Scalar(Scalar::from_pointer(p, cx))
+    pub fn from_pointer(ptr: Pointer<Prov>, cx: &impl HasDataLayout) -> Self {
+        Immediate::Scalar(Scalar::from_pointer(ptr, cx))
     }
 
-    pub fn from_maybe_pointer(p: Pointer<Option<Prov>>, cx: &impl HasDataLayout) -> Self {
-        Immediate::Scalar(Scalar::from_maybe_pointer(p, cx))
+    pub fn from_maybe_pointer(ptr: Pointer<Option<Prov>>, cx: &impl HasDataLayout) -> Self {
+        Immediate::Scalar(Scalar::from_maybe_pointer(ptr, cx))
     }
 
-    pub fn new_slice(val: Scalar<Prov>, len: u64, cx: &impl HasDataLayout) -> Self {
-        Immediate::ScalarPair(val, Scalar::from_target_usize(len, cx))
+    pub fn new_slice(ptr: Pointer<Option<Prov>>, len: u64, cx: &impl HasDataLayout) -> Self {
+        Immediate::ScalarPair(
+            Scalar::from_maybe_pointer(ptr, cx),
+            Scalar::from_target_usize(len, cx),
+        )
     }
 
     pub fn new_dyn_trait(
-        val: Scalar<Prov>,
+        val: Pointer<Option<Prov>>,
         vtable: Pointer<Option<Prov>>,
         cx: &impl HasDataLayout,
     ) -> Self {
-        Immediate::ScalarPair(val, Scalar::from_maybe_pointer(vtable, cx))
+        Immediate::ScalarPair(
+            Scalar::from_maybe_pointer(val, cx),
+            Scalar::from_maybe_pointer(vtable, cx),
+        )
     }
 
     #[inline]
@@ -88,7 +92,7 @@
 
 // ScalarPair needs a type to interpret, so we often have an immediate and a type together
 // as input for binary and cast operations.
-#[derive(Clone, Debug)]
+#[derive(Clone)]
 pub struct ImmTy<'tcx, Prov: Provenance = AllocId> {
     imm: Immediate<Prov>,
     pub layout: TyAndLayout<'tcx>,
@@ -134,6 +138,16 @@
     }
 }
 
+impl<Prov: Provenance> std::fmt::Debug for ImmTy<'_, Prov> {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        // Printing `layout` results in too much noise; just print a nice version of the type.
+        f.debug_struct("ImmTy")
+            .field("imm", &self.imm)
+            .field("ty", &format_args!("{}", self.layout.ty))
+            .finish()
+    }
+}
+
 impl<'tcx, Prov: Provenance> std::ops::Deref for ImmTy<'tcx, Prov> {
     type Target = Immediate<Prov>;
     #[inline(always)]
@@ -142,64 +156,30 @@
     }
 }
 
-/// An `Operand` is the result of computing a `mir::Operand`. It can be immediate,
-/// or still in memory. The latter is an optimization, to delay reading that chunk of
-/// memory and to avoid having to store arbitrary-sized data here.
-#[derive(Copy, Clone, Debug)]
-pub enum Operand<Prov: Provenance = AllocId> {
-    Immediate(Immediate<Prov>),
-    Indirect(MemPlace<Prov>),
-}
-
-#[derive(Clone, Debug)]
-pub struct OpTy<'tcx, Prov: Provenance = AllocId> {
-    op: Operand<Prov>, // Keep this private; it helps enforce invariants.
-    pub layout: TyAndLayout<'tcx>,
-    /// rustc does not have a proper way to represent the type of a field of a `repr(packed)` struct:
-    /// it needs to have a different alignment than the field type would usually have.
-    /// So we represent this here with a separate field that "overwrites" `layout.align`.
-    /// This means `layout.align` should never be used for an `OpTy`!
-    /// `None` means "alignment does not matter since this is a by-value operand"
-    /// (`Operand::Immediate`); this field is only relevant for `Operand::Indirect`.
-    /// Also CTFE ignores alignment anyway, so this is for Miri only.
-    pub align: Option<Align>,
-}
-
-impl<'tcx, Prov: Provenance> std::ops::Deref for OpTy<'tcx, Prov> {
-    type Target = Operand<Prov>;
-    #[inline(always)]
-    fn deref(&self) -> &Operand<Prov> {
-        &self.op
-    }
-}
-
-impl<'tcx, Prov: Provenance> From<MPlaceTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
-    #[inline(always)]
-    fn from(mplace: MPlaceTy<'tcx, Prov>) -> Self {
-        OpTy { op: Operand::Indirect(*mplace), layout: mplace.layout, align: Some(mplace.align) }
-    }
-}
-
-impl<'tcx, Prov: Provenance> From<ImmTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
-    #[inline(always)]
-    fn from(val: ImmTy<'tcx, Prov>) -> Self {
-        OpTy { op: Operand::Immediate(val.imm), layout: val.layout, align: None }
-    }
-}
-
 impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
     #[inline]
     pub fn from_scalar(val: Scalar<Prov>, layout: TyAndLayout<'tcx>) -> Self {
+        debug_assert!(layout.abi.is_scalar(), "`ImmTy::from_scalar` on non-scalar layout");
         ImmTy { imm: val.into(), layout }
     }
 
-    #[inline]
+    #[inline(always)]
     pub fn from_immediate(imm: Immediate<Prov>, layout: TyAndLayout<'tcx>) -> Self {
+        debug_assert!(
+            match (imm, layout.abi) {
+                (Immediate::Scalar(..), Abi::Scalar(..)) => true,
+                (Immediate::ScalarPair(..), Abi::ScalarPair(..)) => true,
+                (Immediate::Uninit, _) if layout.is_sized() => true,
+                _ => false,
+            },
+            "immediate {imm:?} does not fit to layout {layout:?}",
+        );
         ImmTy { imm, layout }
     }
 
     #[inline]
     pub fn uninit(layout: TyAndLayout<'tcx>) -> Self {
+        debug_assert!(layout.is_sized(), "immediates must be sized");
         ImmTy { imm: Immediate::Uninit, layout }
     }
 
@@ -223,6 +203,12 @@
     }
 
     #[inline]
+    pub fn from_bool(b: bool, tcx: TyCtxt<'tcx>) -> Self {
+        let layout = tcx.layout_of(ty::ParamEnv::reveal_all().and(tcx.types.bool)).unwrap();
+        Self::from_scalar(Scalar::from_bool(b), layout)
+    }
+
+    #[inline]
     pub fn to_const_int(self) -> ConstInt {
         assert!(self.layout.ty.is_integral());
         let int = self.to_scalar().assert_int();
@@ -239,6 +225,7 @@
             // if the entire value is uninit, then so is the field (can happen in ConstProp)
             (Immediate::Uninit, _) => Immediate::Uninit,
             // the field contains no information, can be left uninit
+            // (Scalar/ScalarPair can contain even aligned ZST, not just 1-ZST)
             _ if layout.is_zst() => Immediate::Uninit,
             // some fieldless enum variants can have non-zero size but still `Aggregate` ABI... try
             // to detect those here and also give them no data
@@ -290,23 +277,21 @@
         self.layout
     }
 
-    fn meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
-        &self,
-        _ecx: &InterpCx<'mir, 'tcx, M>,
-    ) -> InterpResult<'tcx, MemPlaceMeta<M::Provenance>> {
-        assert!(self.layout.is_sized()); // unsized ImmTy can only exist temporarily and should never reach this here
-        Ok(MemPlaceMeta::None)
+    #[inline(always)]
+    fn meta(&self) -> MemPlaceMeta<Prov> {
+        debug_assert!(self.layout.is_sized()); // unsized ImmTy can only exist temporarily and should never reach this here
+        MemPlaceMeta::None
     }
 
-    fn offset_with_meta(
+    fn offset_with_meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
         &self,
         offset: Size,
         meta: MemPlaceMeta<Prov>,
         layout: TyAndLayout<'tcx>,
-        cx: &impl HasDataLayout,
+        ecx: &InterpCx<'mir, 'tcx, M>,
     ) -> InterpResult<'tcx, Self> {
         assert_matches!(meta, MemPlaceMeta::None); // we can't store this anywhere anyway
-        Ok(self.offset_(offset, layout, cx))
+        Ok(self.offset_(offset, layout, ecx))
     }
 
     fn to_op<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
@@ -317,49 +302,95 @@
     }
 }
 
-impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
-    // Provided as inherent method since it doesn't need the `ecx` of `Projectable::meta`.
-    pub fn meta(&self) -> InterpResult<'tcx, MemPlaceMeta<Prov>> {
-        Ok(if self.layout.is_unsized() {
-            if matches!(self.op, Operand::Immediate(_)) {
-                // Unsized immediate OpTy cannot occur. We create a MemPlace for all unsized locals during argument passing.
-                // However, ConstProp doesn't do that, so we can run into this nonsense situation.
-                throw_inval!(ConstPropNonsense);
-            }
-            // There are no unsized immediates.
-            self.assert_mem_place().meta
-        } else {
-            MemPlaceMeta::None
-        })
+/// An `Operand` is the result of computing a `mir::Operand`. It can be immediate,
+/// or still in memory. The latter is an optimization, to delay reading that chunk of
+/// memory and to avoid having to store arbitrary-sized data here.
+#[derive(Copy, Clone, Debug)]
+pub(super) enum Operand<Prov: Provenance = AllocId> {
+    Immediate(Immediate<Prov>),
+    Indirect(MemPlace<Prov>),
+}
+
+#[derive(Clone)]
+pub struct OpTy<'tcx, Prov: Provenance = AllocId> {
+    op: Operand<Prov>, // Keep this private; it helps enforce invariants.
+    pub layout: TyAndLayout<'tcx>,
+    /// rustc does not have a proper way to represent the type of a field of a `repr(packed)` struct:
+    /// it needs to have a different alignment than the field type would usually have.
+    /// So we represent this here with a separate field that "overwrites" `layout.align`.
+    /// This means `layout.align` should never be used for an `OpTy`!
+    /// `None` means "alignment does not matter since this is a by-value operand"
+    /// (`Operand::Immediate`); this field is only relevant for `Operand::Indirect`.
+    /// Also CTFE ignores alignment anyway, so this is for Miri only.
+    pub align: Option<Align>,
+}
+
+impl<Prov: Provenance> std::fmt::Debug for OpTy<'_, Prov> {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        // Printing `layout` results in too much noise; just print a nice version of the type.
+        f.debug_struct("OpTy")
+            .field("op", &self.op)
+            .field("ty", &format_args!("{}", self.layout.ty))
+            .finish()
     }
 }
 
-impl<'tcx, Prov: Provenance + 'static> Projectable<'tcx, Prov> for OpTy<'tcx, Prov> {
+impl<'tcx, Prov: Provenance> From<ImmTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
+    #[inline(always)]
+    fn from(val: ImmTy<'tcx, Prov>) -> Self {
+        OpTy { op: Operand::Immediate(val.imm), layout: val.layout, align: None }
+    }
+}
+
+impl<'tcx, Prov: Provenance> From<MPlaceTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
+    #[inline(always)]
+    fn from(mplace: MPlaceTy<'tcx, Prov>) -> Self {
+        OpTy {
+            op: Operand::Indirect(*mplace.mplace()),
+            layout: mplace.layout,
+            align: Some(mplace.align),
+        }
+    }
+}
+
+impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
+    #[inline(always)]
+    pub(super) fn op(&self) -> &Operand<Prov> {
+        &self.op
+    }
+}
+
+impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for OpTy<'tcx, Prov> {
     #[inline(always)]
     fn layout(&self) -> TyAndLayout<'tcx> {
         self.layout
     }
 
-    fn meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
-        &self,
-        _ecx: &InterpCx<'mir, 'tcx, M>,
-    ) -> InterpResult<'tcx, MemPlaceMeta<M::Provenance>> {
-        self.meta()
+    #[inline]
+    fn meta(&self) -> MemPlaceMeta<Prov> {
+        match self.as_mplace_or_imm() {
+            Left(mplace) => mplace.meta(),
+            Right(_) => {
+                debug_assert!(self.layout.is_sized(), "unsized immediates are not a thing");
+                MemPlaceMeta::None
+            }
+        }
     }
 
-    fn offset_with_meta(
+    fn offset_with_meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
         &self,
         offset: Size,
         meta: MemPlaceMeta<Prov>,
         layout: TyAndLayout<'tcx>,
-        cx: &impl HasDataLayout,
+        ecx: &InterpCx<'mir, 'tcx, M>,
     ) -> InterpResult<'tcx, Self> {
         match self.as_mplace_or_imm() {
-            Left(mplace) => Ok(mplace.offset_with_meta(offset, meta, layout, cx)?.into()),
+            Left(mplace) => Ok(mplace.offset_with_meta(offset, meta, layout, ecx)?.into()),
             Right(imm) => {
-                assert!(!meta.has_meta()); // no place to store metadata here
+                debug_assert!(layout.is_sized(), "unsized immediates are not a thing");
+                assert_matches!(meta, MemPlaceMeta::None); // no place to store metadata here
                 // Every part of an uninit is uninit.
-                Ok(imm.offset(offset, layout, cx)?.into())
+                Ok(imm.offset_(offset, layout, ecx).into())
             }
         }
     }
@@ -372,18 +403,19 @@
     }
 }
 
+/// The `Readable` trait describes interpreter values that one can read from.
 pub trait Readable<'tcx, Prov: Provenance>: Projectable<'tcx, Prov> {
     fn as_mplace_or_imm(&self) -> Either<MPlaceTy<'tcx, Prov>, ImmTy<'tcx, Prov>>;
 }
 
-impl<'tcx, Prov: Provenance + 'static> Readable<'tcx, Prov> for OpTy<'tcx, Prov> {
+impl<'tcx, Prov: Provenance> Readable<'tcx, Prov> for OpTy<'tcx, Prov> {
     #[inline(always)]
     fn as_mplace_or_imm(&self) -> Either<MPlaceTy<'tcx, Prov>, ImmTy<'tcx, Prov>> {
         self.as_mplace_or_imm()
     }
 }
 
-impl<'tcx, Prov: Provenance + 'static> Readable<'tcx, Prov> for MPlaceTy<'tcx, Prov> {
+impl<'tcx, Prov: Provenance> Readable<'tcx, Prov> for MPlaceTy<'tcx, Prov> {
     #[inline(always)]
     fn as_mplace_or_imm(&self) -> Either<MPlaceTy<'tcx, Prov>, ImmTy<'tcx, Prov>> {
         Left(self.clone())
@@ -430,7 +462,7 @@
                     alloc_range(Size::ZERO, size),
                     /*read_provenance*/ matches!(s, abi::Pointer(_)),
                 )?;
-                Some(ImmTy { imm: scalar.into(), layout: mplace.layout })
+                Some(ImmTy::from_scalar(scalar, mplace.layout))
             }
             Abi::ScalarPair(
                 abi::Scalar::Initialized { value: a, .. },
@@ -450,7 +482,7 @@
                     alloc_range(b_offset, b_size),
                     /*read_provenance*/ matches!(b, abi::Pointer(_)),
                 )?;
-                Some(ImmTy { imm: Immediate::ScalarPair(a_val, b_val), layout: mplace.layout })
+                Some(ImmTy::from_immediate(Immediate::ScalarPair(a_val, b_val), mplace.layout))
             }
             _ => {
                 // Neither a scalar nor scalar pair.
@@ -496,11 +528,7 @@
             Abi::Scalar(abi::Scalar::Initialized { .. })
                 | Abi::ScalarPair(abi::Scalar::Initialized { .. }, abi::Scalar::Initialized { .. })
         ) {
-            span_bug!(
-                self.cur_span(),
-                "primitive read not possible for type: {:?}",
-                op.layout().ty
-            );
+            span_bug!(self.cur_span(), "primitive read not possible for type: {}", op.layout().ty);
         }
         let imm = self.read_immediate_raw(op)?.right().unwrap();
         if matches!(*imm, Immediate::Uninit) {
@@ -545,7 +573,7 @@
     /// Turn the wide MPlace into a string (must already be dereferenced!)
     pub fn read_str(&self, mplace: &MPlaceTy<'tcx, M::Provenance>) -> InterpResult<'tcx, &str> {
         let len = mplace.len(self)?;
-        let bytes = self.read_bytes_ptr_strip_provenance(mplace.ptr, Size::from_bytes(len))?;
+        let bytes = self.read_bytes_ptr_strip_provenance(mplace.ptr(), Size::from_bytes(len))?;
         let str = std::str::from_utf8(bytes).map_err(|err| err_ub!(InvalidStr(err)))?;
         Ok(str)
     }
@@ -587,6 +615,13 @@
     ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
         let layout = self.layout_of_local(frame, local, layout)?;
         let op = *frame.locals[local].access()?;
+        if matches!(op, Operand::Immediate(_)) {
+            if layout.is_unsized() {
+                // ConstProp marks *all* locals as `Immediate::Uninit` since it cannot
+                // efficiently check whether they are sized. We have to catch that case here.
+                throw_inval!(ConstPropNonsense);
+            }
+        }
         Ok(OpTy { op, layout, align: Some(layout.align.abi) })
     }
 
@@ -600,16 +635,15 @@
         match place.as_mplace_or_local() {
             Left(mplace) => Ok(mplace.into()),
             Right((frame, local, offset)) => {
+                debug_assert!(place.layout.is_sized()); // only sized locals can ever be `Place::Local`.
                 let base = self.local_to_op(&self.stack()[frame], local, None)?;
-                let mut field = if let Some(offset) = offset {
-                    // This got offset. We can be sure that the field is sized.
-                    base.offset(offset, place.layout, self)?
-                } else {
-                    assert_eq!(place.layout, base.layout);
-                    // Unsized cases are possible here since an unsized local will be a
-                    // `Place::Local` until the first projection calls `place_to_op` to extract the
-                    // underlying mplace.
-                    base
+                let mut field = match offset {
+                    Some(offset) => base.offset(offset, place.layout, self)?,
+                    None => {
+                        // In the common case this hasn't been projected.
+                        debug_assert_eq!(place.layout, base.layout);
+                        base
+                    }
                 };
                 field.align = Some(place.align);
                 Ok(field)
@@ -634,7 +668,7 @@
             op = self.project(&op, elem)?
         }
 
-        trace!("eval_place_to_op: got {:?}", *op);
+        trace!("eval_place_to_op: got {:?}", op);
         // Sanity-check the type we ended up with.
         debug_assert!(
             mir_assign_valid_types(
@@ -645,7 +679,7 @@
                 )?)?,
                 op.layout,
             ),
-            "eval_place of a MIR place with type {:?} produced an interpreter operand with type {:?}",
+            "eval_place of a MIR place with type {:?} produced an interpreter operand with type {}",
             mir_place.ty(&self.frame().body.local_decls, *self.tcx).ty,
             op.layout.ty,
         );
@@ -668,7 +702,7 @@
 
             Constant(constant) => {
                 let c =
-                    self.subst_from_current_frame_and_normalize_erasing_regions(constant.literal)?;
+                    self.subst_from_current_frame_and_normalize_erasing_regions(constant.const_)?;
 
                 // This can still fail:
                 // * During ConstProp, with `TooGeneric` or since the `required_consts` were not all
@@ -677,61 +711,13 @@
                 self.eval_mir_constant(&c, Some(constant.span), layout)?
             }
         };
-        trace!("{:?}: {:?}", mir_op, *op);
+        trace!("{:?}: {:?}", mir_op, op);
         Ok(op)
     }
 
-    fn eval_ty_constant(
-        &self,
-        val: ty::Const<'tcx>,
-        span: Option<Span>,
-    ) -> InterpResult<'tcx, ValTree<'tcx>> {
-        Ok(match val.kind() {
-            ty::ConstKind::Param(_) | ty::ConstKind::Placeholder(..) => {
-                throw_inval!(TooGeneric)
-            }
-            // FIXME(generic_const_exprs): `ConstKind::Expr` should be able to be evaluated
-            ty::ConstKind::Expr(_) => throw_inval!(TooGeneric),
-            ty::ConstKind::Error(reported) => {
-                throw_inval!(AlreadyReported(reported.into()))
-            }
-            ty::ConstKind::Unevaluated(uv) => {
-                let instance = self.resolve(uv.def, uv.args)?;
-                let cid = GlobalId { instance, promoted: None };
-                self.ctfe_query(span, |tcx| tcx.eval_to_valtree(self.param_env.and(cid)))?
-                    .unwrap_or_else(|| bug!("unable to create ValTree for {uv:?}"))
-            }
-            ty::ConstKind::Bound(..) | ty::ConstKind::Infer(..) => {
-                span_bug!(self.cur_span(), "unexpected ConstKind in ctfe: {val:?}")
-            }
-            ty::ConstKind::Value(valtree) => valtree,
-        })
-    }
-
-    pub fn eval_mir_constant(
-        &self,
-        val: &mir::ConstantKind<'tcx>,
-        span: Option<Span>,
-        layout: Option<TyAndLayout<'tcx>>,
-    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
-        match *val {
-            mir::ConstantKind::Ty(ct) => {
-                let ty = ct.ty();
-                let valtree = self.eval_ty_constant(ct, span)?;
-                let const_val = self.tcx.valtree_to_const_val((ty, valtree));
-                self.const_val_to_op(const_val, ty, layout)
-            }
-            mir::ConstantKind::Val(val, ty) => self.const_val_to_op(val, ty, layout),
-            mir::ConstantKind::Unevaluated(uv, _) => {
-                let instance = self.resolve(uv.def, uv.args)?;
-                Ok(self.eval_global(GlobalId { instance, promoted: uv.promoted }, span)?.into())
-            }
-        }
-    }
-
     pub(crate) fn const_val_to_op(
         &self,
-        val_val: ConstValue<'tcx>,
+        val_val: mir::ConstValue<'tcx>,
         ty: Ty<'tcx>,
         layout: Option<TyAndLayout<'tcx>>,
     ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
@@ -744,25 +730,21 @@
         };
         let layout = from_known_layout(self.tcx, self.param_env, layout, || self.layout_of(ty))?;
         let op = match val_val {
-            ConstValue::ByRef { alloc, offset } => {
-                let id = self.tcx.create_memory_alloc(alloc);
+            mir::ConstValue::Indirect { alloc_id, offset } => {
                 // We rely on mutability being set correctly in that allocation to prevent writes
                 // where none should happen.
-                let ptr = self.global_base_pointer(Pointer::new(id, offset))?;
+                let ptr = self.global_base_pointer(Pointer::new(alloc_id, offset))?;
                 Operand::Indirect(MemPlace::from_ptr(ptr.into()))
             }
-            ConstValue::Scalar(x) => Operand::Immediate(adjust_scalar(x)?.into()),
-            ConstValue::ZeroSized => Operand::Immediate(Immediate::Uninit),
-            ConstValue::Slice { data, start, end } => {
+            mir::ConstValue::Scalar(x) => Operand::Immediate(adjust_scalar(x)?.into()),
+            mir::ConstValue::ZeroSized => Operand::Immediate(Immediate::Uninit),
+            mir::ConstValue::Slice { data, meta } => {
                 // We rely on mutability being set correctly in `data` to prevent writes
                 // where none should happen.
-                let ptr = Pointer::new(
-                    self.tcx.create_memory_alloc(data),
-                    Size::from_bytes(start), // offset: `start`
-                );
+                let ptr = Pointer::new(self.tcx.reserve_and_set_memory_alloc(data), Size::ZERO);
                 Operand::Immediate(Immediate::new_slice(
-                    Scalar::from_pointer(self.global_base_pointer(ptr)?, &*self.tcx),
-                    u64::try_from(end.checked_sub(start).unwrap()).unwrap(), // len: `end - start`
+                    self.global_base_pointer(ptr)?.into(),
+                    meta,
                     self,
                 ))
             }
diff --git a/compiler/rustc_const_eval/src/interpret/operator.rs b/compiler/rustc_const_eval/src/interpret/operator.rs
index eb06457..b084864 100644
--- a/compiler/rustc_const_eval/src/interpret/operator.rs
+++ b/compiler/rustc_const_eval/src/interpret/operator.rs
@@ -1,7 +1,7 @@
 use rustc_apfloat::Float;
 use rustc_middle::mir;
 use rustc_middle::mir::interpret::{InterpResult, Scalar};
-use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
+use rustc_middle::ty::layout::TyAndLayout;
 use rustc_middle::ty::{self, FloatTy, Ty};
 use rustc_span::symbol::sym;
 use rustc_target::abi::Abi;
@@ -20,9 +20,9 @@
         right: &ImmTy<'tcx, M::Provenance>,
         dest: &PlaceTy<'tcx, M::Provenance>,
     ) -> InterpResult<'tcx> {
-        let (val, overflowed, ty) = self.overflowing_binary_op(op, &left, &right)?;
+        let (val, overflowed) = self.overflowing_binary_op(op, &left, &right)?;
         debug_assert_eq!(
-            Ty::new_tup(self.tcx.tcx, &[ty, self.tcx.types.bool]),
+            Ty::new_tup(self.tcx.tcx, &[val.layout.ty, self.tcx.types.bool]),
             dest.layout.ty,
             "type mismatch for result of {op:?}",
         );
@@ -30,7 +30,7 @@
         if let Abi::ScalarPair(..) = dest.layout.abi {
             // We can use the optimized path and avoid `place_field` (which might do
             // `force_allocation`).
-            let pair = Immediate::ScalarPair(val, Scalar::from_bool(overflowed));
+            let pair = Immediate::ScalarPair(val.to_scalar(), Scalar::from_bool(overflowed));
             self.write_immediate(pair, dest)?;
         } else {
             assert!(self.tcx.sess.opts.unstable_opts.randomize_layout);
@@ -38,7 +38,7 @@
             // do a component-wise write here. This code path is slower than the above because
             // `place_field` will have to `force_allocate` locals here.
             let val_field = self.project_field(dest, 0)?;
-            self.write_scalar(val, &val_field)?;
+            self.write_scalar(val.to_scalar(), &val_field)?;
             let overflowed_field = self.project_field(dest, 1)?;
             self.write_scalar(Scalar::from_bool(overflowed), &overflowed_field)?;
         }
@@ -54,9 +54,9 @@
         right: &ImmTy<'tcx, M::Provenance>,
         dest: &PlaceTy<'tcx, M::Provenance>,
     ) -> InterpResult<'tcx> {
-        let (val, _overflowed, ty) = self.overflowing_binary_op(op, left, right)?;
-        assert_eq!(ty, dest.layout.ty, "type mismatch for result of {op:?}");
-        self.write_scalar(val, dest)
+        let val = self.wrapping_binary_op(op, left, right)?;
+        assert_eq!(val.layout.ty, dest.layout.ty, "type mismatch for result of {op:?}");
+        self.write_immediate(*val, dest)
     }
 }
 
@@ -66,7 +66,7 @@
         bin_op: mir::BinOp,
         l: char,
         r: char,
-    ) -> (Scalar<M::Provenance>, bool, Ty<'tcx>) {
+    ) -> (ImmTy<'tcx, M::Provenance>, bool) {
         use rustc_middle::mir::BinOp::*;
 
         let res = match bin_op {
@@ -78,7 +78,7 @@
             Ge => l >= r,
             _ => span_bug!(self.cur_span(), "Invalid operation on char: {:?}", bin_op),
         };
-        (Scalar::from_bool(res), false, self.tcx.types.bool)
+        (ImmTy::from_bool(res, *self.tcx), false)
     }
 
     fn binary_bool_op(
@@ -86,7 +86,7 @@
         bin_op: mir::BinOp,
         l: bool,
         r: bool,
-    ) -> (Scalar<M::Provenance>, bool, Ty<'tcx>) {
+    ) -> (ImmTy<'tcx, M::Provenance>, bool) {
         use rustc_middle::mir::BinOp::*;
 
         let res = match bin_op {
@@ -101,33 +101,33 @@
             BitXor => l ^ r,
             _ => span_bug!(self.cur_span(), "Invalid operation on bool: {:?}", bin_op),
         };
-        (Scalar::from_bool(res), false, self.tcx.types.bool)
+        (ImmTy::from_bool(res, *self.tcx), false)
     }
 
     fn binary_float_op<F: Float + Into<Scalar<M::Provenance>>>(
         &self,
         bin_op: mir::BinOp,
-        ty: Ty<'tcx>,
+        layout: TyAndLayout<'tcx>,
         l: F,
         r: F,
-    ) -> (Scalar<M::Provenance>, bool, Ty<'tcx>) {
+    ) -> (ImmTy<'tcx, M::Provenance>, bool) {
         use rustc_middle::mir::BinOp::*;
 
-        let (val, ty) = match bin_op {
-            Eq => (Scalar::from_bool(l == r), self.tcx.types.bool),
-            Ne => (Scalar::from_bool(l != r), self.tcx.types.bool),
-            Lt => (Scalar::from_bool(l < r), self.tcx.types.bool),
-            Le => (Scalar::from_bool(l <= r), self.tcx.types.bool),
-            Gt => (Scalar::from_bool(l > r), self.tcx.types.bool),
-            Ge => (Scalar::from_bool(l >= r), self.tcx.types.bool),
-            Add => ((l + r).value.into(), ty),
-            Sub => ((l - r).value.into(), ty),
-            Mul => ((l * r).value.into(), ty),
-            Div => ((l / r).value.into(), ty),
-            Rem => ((l % r).value.into(), ty),
+        let val = match bin_op {
+            Eq => ImmTy::from_bool(l == r, *self.tcx),
+            Ne => ImmTy::from_bool(l != r, *self.tcx),
+            Lt => ImmTy::from_bool(l < r, *self.tcx),
+            Le => ImmTy::from_bool(l <= r, *self.tcx),
+            Gt => ImmTy::from_bool(l > r, *self.tcx),
+            Ge => ImmTy::from_bool(l >= r, *self.tcx),
+            Add => ImmTy::from_scalar((l + r).value.into(), layout),
+            Sub => ImmTy::from_scalar((l - r).value.into(), layout),
+            Mul => ImmTy::from_scalar((l * r).value.into(), layout),
+            Div => ImmTy::from_scalar((l / r).value.into(), layout),
+            Rem => ImmTy::from_scalar((l % r).value.into(), layout),
             _ => span_bug!(self.cur_span(), "invalid float op: `{:?}`", bin_op),
         };
-        (val, false, ty)
+        (val, false)
     }
 
     fn binary_int_op(
@@ -138,7 +138,7 @@
         left_layout: TyAndLayout<'tcx>,
         r: u128,
         right_layout: TyAndLayout<'tcx>,
-    ) -> InterpResult<'tcx, (Scalar<M::Provenance>, bool, Ty<'tcx>)> {
+    ) -> InterpResult<'tcx, (ImmTy<'tcx, M::Provenance>, bool)> {
         use rustc_middle::mir::BinOp::*;
 
         let throw_ub_on_overflow = match bin_op {
@@ -200,19 +200,16 @@
                 );
             }
 
-            return Ok((Scalar::from_uint(truncated, left_layout.size), overflow, left_layout.ty));
+            return Ok((ImmTy::from_uint(truncated, left_layout), overflow));
         }
 
         // For the remaining ops, the types must be the same on both sides
         if left_layout.ty != right_layout.ty {
             span_bug!(
                 self.cur_span(),
-                "invalid asymmetric binary op {:?}: {:?} ({:?}), {:?} ({:?})",
-                bin_op,
-                l,
-                left_layout.ty,
-                r,
-                right_layout.ty,
+                "invalid asymmetric binary op {bin_op:?}: {l:?} ({l_ty}), {r:?} ({r_ty})",
+                l_ty = left_layout.ty,
+                r_ty = right_layout.ty,
             )
         }
 
@@ -230,7 +227,7 @@
             if let Some(op) = op {
                 let l = self.sign_extend(l, left_layout) as i128;
                 let r = self.sign_extend(r, right_layout) as i128;
-                return Ok((Scalar::from_bool(op(&l, &r)), false, self.tcx.types.bool));
+                return Ok((ImmTy::from_bool(op(&l, &r), *self.tcx), false));
             }
             let op: Option<fn(i128, i128) -> (i128, bool)> = match bin_op {
                 Div if r == 0 => throw_ub!(DivisionByZero),
@@ -267,22 +264,22 @@
                 if overflow && let Some(intrinsic_name) = throw_ub_on_overflow {
                     throw_ub_custom!(fluent::const_eval_overflow, name = intrinsic_name);
                 }
-                return Ok((Scalar::from_uint(truncated, size), overflow, left_layout.ty));
+                return Ok((ImmTy::from_uint(truncated, left_layout), overflow));
             }
         }
 
-        let (val, ty) = match bin_op {
-            Eq => (Scalar::from_bool(l == r), self.tcx.types.bool),
-            Ne => (Scalar::from_bool(l != r), self.tcx.types.bool),
+        let val = match bin_op {
+            Eq => ImmTy::from_bool(l == r, *self.tcx),
+            Ne => ImmTy::from_bool(l != r, *self.tcx),
 
-            Lt => (Scalar::from_bool(l < r), self.tcx.types.bool),
-            Le => (Scalar::from_bool(l <= r), self.tcx.types.bool),
-            Gt => (Scalar::from_bool(l > r), self.tcx.types.bool),
-            Ge => (Scalar::from_bool(l >= r), self.tcx.types.bool),
+            Lt => ImmTy::from_bool(l < r, *self.tcx),
+            Le => ImmTy::from_bool(l <= r, *self.tcx),
+            Gt => ImmTy::from_bool(l > r, *self.tcx),
+            Ge => ImmTy::from_bool(l >= r, *self.tcx),
 
-            BitOr => (Scalar::from_uint(l | r, size), left_layout.ty),
-            BitAnd => (Scalar::from_uint(l & r, size), left_layout.ty),
-            BitXor => (Scalar::from_uint(l ^ r, size), left_layout.ty),
+            BitOr => ImmTy::from_uint(l | r, left_layout),
+            BitAnd => ImmTy::from_uint(l & r, left_layout),
+            BitXor => ImmTy::from_uint(l ^ r, left_layout),
 
             Add | AddUnchecked | Sub | SubUnchecked | Mul | MulUnchecked | Rem | Div => {
                 assert!(!left_layout.abi.is_signed());
@@ -304,12 +301,12 @@
                 if overflow && let Some(intrinsic_name) = throw_ub_on_overflow {
                     throw_ub_custom!(fluent::const_eval_overflow, name = intrinsic_name);
                 }
-                return Ok((Scalar::from_uint(truncated, size), overflow, left_layout.ty));
+                return Ok((ImmTy::from_uint(truncated, left_layout), overflow));
             }
 
             _ => span_bug!(
                 self.cur_span(),
-                "invalid binary op {:?}: {:?}, {:?} (both {:?})",
+                "invalid binary op {:?}: {:?}, {:?} (both {})",
                 bin_op,
                 l,
                 r,
@@ -317,7 +314,7 @@
             ),
         };
 
-        Ok((val, false, ty))
+        Ok((val, false))
     }
 
     fn binary_ptr_op(
@@ -325,7 +322,7 @@
         bin_op: mir::BinOp,
         left: &ImmTy<'tcx, M::Provenance>,
         right: &ImmTy<'tcx, M::Provenance>,
-    ) -> InterpResult<'tcx, (Scalar<M::Provenance>, bool, Ty<'tcx>)> {
+    ) -> InterpResult<'tcx, (ImmTy<'tcx, M::Provenance>, bool)> {
         use rustc_middle::mir::BinOp::*;
 
         match bin_op {
@@ -336,7 +333,10 @@
                 let pointee_ty = left.layout.ty.builtin_deref(true).unwrap().ty;
 
                 let offset_ptr = self.ptr_offset_inbounds(ptr, pointee_ty, offset_count)?;
-                Ok((Scalar::from_maybe_pointer(offset_ptr, self), false, left.layout.ty))
+                Ok((
+                    ImmTy::from_scalar(Scalar::from_maybe_pointer(offset_ptr, self), left.layout),
+                    false,
+                ))
             }
 
             // Fall back to machine hook so Miri can support more pointer ops.
@@ -344,16 +344,15 @@
         }
     }
 
-    /// Returns the result of the specified operation, whether it overflowed, and
-    /// the result type.
+    /// Returns the result of the specified operation, and whether it overflowed.